code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
# Notebook to plot EEL sections
*Add description of the project here*
### Import libraries
```
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
#plt.style.use('dark_background')
%matplotlib inline
```
#### Variables
```
pathdir = '../data/raw/csv_ctdgrid'
colsec = 'royalblue'
colvec = 'salmon'
colellipse = 'limegreen'
```
## 1. Load and Prep. data
### 1.1 Load data into DataFrame
```
file4 = pathdir+'/'+'EELCTDandLADCP_3Dfield.csv'
dflad = pd.read_csv(file4,sep=',', index_col=None,
header=0).drop(columns=['Vladcpalong'])
```
#### 1.1.1 Sort data format and variable name
```
dflad=dflad.round({'PMTP': 4, 'PSAL': 3, 'Sigma0': 3, 'Vrel': 3, 'Vladcp': 3, 'Vabs': 3})
dflad['Refdist']=dflad['Refdist'].apply(int)
dflad
df3D = dflad.rename(columns={"Refdist": "refdist","Year": "year","Depth": "depth"}).set_index(['refdist','year','depth']).round(3).sort_values(['refdist','year','depth']).drop(columns=['CruiseID','Staname'])
df3D=df3D.rename(columns={"PTMP": "ptmp", "PSAL": "psal", "Sigma0": "sigma0"})
df3D
```
#### 1.1.2 Import additional metadata
```
# Location of EEL stations
file3 = pathdir+'/'+'EELCTDandLADCP_refpos.csv'
dfloc = pd.read_csv(file3,sep=',', index_col=None,
header=0)
dfloc['Refdist'] = dfloc['Refdist'].round(decimals=1)
# Sorted according to distance:
dfs = dfloc.sort_values('Refdist', ascending=True)
print(dfs)
```
## 1.2 Convert 3D dataframe into Xarray DataSet
```
da0 = df3D.to_xarray()
print(da0)
```
#### 1.2.1 Add additional Variables from metadatafile (Lat,Lon, Station Name, Cruise ID)
```
# Get position metadata
dfloc = pd.read_csv(pathdir+'/'+'EELCTDandLADCP_refpos.csv',sep=',', index_col=None,
header=0)
# Make sure the station name are sorted by their distance along the section
sdfloc = dfloc.rename(columns={"Refdist": "refdist"}).sort_values('refdist', ascending=True)
sdfloc['refdist']=sdfloc['refdist'].apply(int)
sdfloc=sdfloc.set_index('refdist')
daloc = sdfloc.to_xarray()
print(daloc)
# Get cruise (time) metadata
dfdate = pd.read_csv(pathdir+'/'+'EELCTDandLADCP_refdate.csv',sep=',', index_col=None,
header=0).rename(columns={"Year": "year"}).set_index('year')
dadate = dfdate.to_xarray()
print(dadate)
```
#### 1.2.2 Merge metadata into main Dataset
```
da=xr.merge([da0, daloc,dadate])
print(da)
```
#### 1.2.3 Move variables into coordinates
```
da.coords['lon'] = ('refdist', da.LonSta)
da.coords['lat'] = ('refdist', da.LatSta)
da.coords['Staname'] = ('refdist', da.Staname)
da = da.drop(['LonSta','LatSta'])
print(da)
```
#### 1.2.4 Visual inspection of the data
```
da.Vladcp[2,-2,:].plot(y='depth', yincrease=False)
da.Vladcp.sel(year=2017).plot(y='depth', yincrease=False)
da.ptmp.sel(year=2017).plot(y='depth', yincrease=False)
```
\
Plot evolution of Velocity profile at station E
```
# Extra Refdist value associated with Station E and F
staE = da.refdist.values[da.Staname.values=='E'][0]
staF = da.refdist.values[da.Staname.values=='F'][0]
# Plot
da.Vladcp.sel(refdist=staE,year=slice('2004','2017')).plot(y='depth', yincrease=False)
Vladcpmean = da.Vladcp.mean('year')
Vladcpstd = da.Vladcp.std(dim='year')
```
\
Plot with data with min and max values
```
Vladcpstd.plot(y='depth', yincrease=False, vmin=0, vmax=0.18, cmap=plt.cm.Reds)
Vladcpstd.plot(y='depth', yincrease=False, levels=np.arange(0, 0.18, 0.04), cmap=plt.cm.Reds)
```
## 2. Contour plot
Create list of namedtuples for variable to plot on the figure with several subpanels:
The rows of the figure correspond to variable (Temperature, Salinity, Density, Velocity) while the columns correspond to the mean and standard deviation
```
import collections
fields = [
'xarray_var',
'short_name',
'var_type',
'contourf_lvls',
'contour_lvls',
'cmap',
'long_name',
'cbar_title',
]
Datatoplot = collections.namedtuple('Datatoplot',fields,defaults=(None,) * len(fields))
Datatoplot?
```
##### Definitions of the list of variables to plot with plotting options
```
ptmp_mean = Datatoplot(xarray_var='ptmp', short_name='Mean Pot. Temp.', var_type='mean',
contourf_lvls= np.arange(3, 11, 1),cmap='plasma',
long_name = 'Mean Potential Temperature', cbar_title ='$^\circ$C')
ptmp_std = Datatoplot(xarray_var='ptmp', short_name='Std Pot. Temp.', var_type='std',
contourf_lvls= np.arange(0, 1.2, 0.2),cmap='OrRd',
long_name = 'Std Potential Temperature', cbar_title ='$^\circ$C')
psal_mean = Datatoplot(xarray_var='psal', short_name='Mean Sal.', var_type='mean',
contourf_lvls= np.arange(34.9, 35.45, 0.05), cmap='viridis',
long_name = 'Mean Salinity', cbar_title =' ')
psal_std = Datatoplot(xarray_var='psal', short_name='Std Sal.', var_type='std',
contourf_lvls= np.arange(0, 0.12, 0.02),cmap='OrRd',
long_name = 'Std Salinity', cbar_title =' ')
sigma0_mean = Datatoplot(xarray_var='sigma0', short_name='Mean Pot. Dens. Ano.', var_type='mean',
contourf_lvls= np.arange(27.2, 27.9, 0.1), cmap='cividis_r',
long_name = 'Mean Potential Density Anomaly', cbar_title ='kg.m$^{-3}$')
sigma0_std = Datatoplot(xarray_var='sigma0', short_name='Std Pot. Dens. Ano.', var_type='std',
contourf_lvls= np.arange(0, 0.12, 0.02),cmap='OrRd',
long_name = 'Std Potential Density Anomaly', cbar_title ='kg.m$^{-3}$')
ladcp_mean = Datatoplot(xarray_var='Vladcp', short_name='Mean LADCP Vel.', var_type='mean',
contourf_lvls= np.arange(-16, 20, 4)/100, cmap='coolwarm',
long_name = 'Mean cross-section velocity from LADCP', cbar_title ='m.s$^{-1}$')
ladcp_std = Datatoplot(xarray_var='Vladcp', short_name='Std LADCP Vel.', var_type='std',
contourf_lvls= np.arange(0, 18, 4)/100, cmap='OrRd',
long_name = 'Std cross-section velocity from LADCP', cbar_title ='m.s$^{-1}$')
# Create a list of namedtuples
varstoplot=((ptmp_mean,ptmp_std),
(psal_mean,psal_std),
(sigma0_mean,sigma0_std),
(ladcp_mean,ladcp_std),
)
```
Definition of a class to display a "pretty" formatting of the namedtuples (from [stackoverflow post](https://stackoverflow.com/a/43823671/13890678)):
```
from io import StringIO
import pprint
class MyPrettyPrinter(pprint.PrettyPrinter):
def format_namedtuple(self, object, stream, indent, allowance, context, level):
# Code almost equal to _format_dict, see pprint code
write = stream.write
write(object.__class__.__name__ + '(')
object_dict = object._asdict()
length = len(object_dict)
if length:
# We first try to print inline, and if it is too large then we print it on multiple lines
inline_stream = StringIO()
self.format_namedtuple_items(object_dict.items(), inline_stream, indent, allowance + 1, context, level, inline=True)
max_width = self._width - indent - allowance
if len(inline_stream.getvalue()) > max_width:
self.format_namedtuple_items(object_dict.items(), stream, indent, allowance + 1, context, level, inline=False)
else:
stream.write(inline_stream.getvalue())
write(')')
def format_namedtuple_items(self, items, stream, indent, allowance, context, level, inline=False):
# Code almost equal to _format_dict_items, see pprint code
indent += self._indent_per_level
write = stream.write
last_index = len(items) - 1
if inline:
delimnl = ', '
else:
delimnl = ',\n' + ' ' * indent
write('\n' + ' ' * indent)
for i, (key, ent) in enumerate(items):
last = i == last_index
write(key + '=')
self._format(ent, stream, indent + len(key) + 2,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format(self, object, stream, indent, allowance, context, level):
# We dynamically add the types of our namedtuple and namedtuple like
# classes to the _dispatch object of pprint that maps classes to
# formatting methods
# We use a simple criteria (_asdict method) that allows us to use the
# same formatting on other classes but a more precise one is possible
if hasattr(object, '_asdict') and type(object).__repr__ not in self._dispatch:
self._dispatch[type(object).__repr__] = MyPrettyPrinter.format_namedtuple
super()._format(object, stream, indent, allowance, context, level)
pp = MyPrettyPrinter(indent=2, depth=4)
pp.pprint(varstoplot)
```
### 2.1 with matplotlib
```
#plt.rcParams.update({'font.size': 10})
# Create the figure
## fig, axs = plt.subplots(4, 2, figsize=(12, 12), constrained_layout=True)
fig, axs = plt.subplots(4, 2, sharex=True, figsize=(16, 16))
for row in [0,1,2,3]:
for col in [0,1]:
data = varstoplot[row][col]
if data.var_type == 'mean':
var = da[data.xarray_var].mean('year')
elif data.var_type == 'std':
var = da[data.xarray_var].std('year')
else:
raise ValueError(f"wrong var_type in {data}")
cf_levels = data.contourf_lvls
colmap = getattr(plt.cm, data.cmap)
# if VarDir['cmap'] == 'cividis':
# colmap = colmap[:-1,:]
pltitle = data.long_name
plunits = data.cbar_title
X=da.refdist.values
Y=da.depth.values
Z=var.values
Xi,Yi = np.meshgrid(X, Y, indexing='ij')
ax = axs[row, col]
# ax = axs[col]
CS = ax.contourf(Xi, -Yi, Z, cf_levels, alpha=0.9, cmap=colmap, extend='both');
if data.contour_lvls is not None:
CS2 = ax.contour(CS, levels=data.contour_lvls, colors='k')
# Plot specific potential density contour indicating of different water masses
# adapt the line colors according to background colormap
c_WM = [27.20,27.50,27.70,27.85]
if data.var_type == 'mean':
CS_WM = ax.contour(Xi, -Yi, da['sigma0'].mean('year'), levels=c_WM, colors='w' )
CS_WM.collections[0].set_color('k')
if data.xarray_var == 'psal':
CS_WM.collections[0].set_color('w')
elif data.xarray_var == 'Vladcp':
for c in CS_WM.collections:
c.set_color('k')
elif data.var_type == 'std':
CS_WM = ax.contour(Xi, -Yi, da['sigma0'].mean('year'), levels=c_WM, colors='k' )
CS_WM.collections[0].set_color('w')
ax.set_ylabel('Depth (m)')
ax.set_xlim([0,1250])
ax.set_ylim([-3100,0])
# ax.set_title(pltitle)
ax.text(550, -2700, data.short_name, fontsize=12, color ='white' )
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig.colorbar(CS, ax=ax)
cbar.ax.set_ylabel(plunits)
#cbar.ax.set_ylabel('m/s')
# Add the contour line levels to the colorbar
if data.contour_lvls is not None:
cbar.add_lines(CS2)
# Add bathymetry (from the nominal depth of the reference EEL stations)
ax.fill_between(dfs['Refdist'], -dfs['DepthSta'], -2900, color="#331a00", alpha=0.8)
ax.vlines(dfs['Refdist'], -3050, -2950, colors='k')
plt.tight_layout(pad=0.5,w_pad=3)
plt.savefig('figures/02_' + 'figure4x2.pdf')
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib.pyplot as plt
#plt.style.use('dark_background')
%matplotlib inline
pathdir = '../data/raw/csv_ctdgrid'
colsec = 'royalblue'
colvec = 'salmon'
colellipse = 'limegreen'
file4 = pathdir+'/'+'EELCTDandLADCP_3Dfield.csv'
dflad = pd.read_csv(file4,sep=',', index_col=None,
header=0).drop(columns=['Vladcpalong'])
dflad=dflad.round({'PMTP': 4, 'PSAL': 3, 'Sigma0': 3, 'Vrel': 3, 'Vladcp': 3, 'Vabs': 3})
dflad['Refdist']=dflad['Refdist'].apply(int)
dflad
df3D = dflad.rename(columns={"Refdist": "refdist","Year": "year","Depth": "depth"}).set_index(['refdist','year','depth']).round(3).sort_values(['refdist','year','depth']).drop(columns=['CruiseID','Staname'])
df3D=df3D.rename(columns={"PTMP": "ptmp", "PSAL": "psal", "Sigma0": "sigma0"})
df3D
# Location of EEL stations
file3 = pathdir+'/'+'EELCTDandLADCP_refpos.csv'
dfloc = pd.read_csv(file3,sep=',', index_col=None,
header=0)
dfloc['Refdist'] = dfloc['Refdist'].round(decimals=1)
# Sorted according to distance:
dfs = dfloc.sort_values('Refdist', ascending=True)
print(dfs)
da0 = df3D.to_xarray()
print(da0)
# Get position metadata
dfloc = pd.read_csv(pathdir+'/'+'EELCTDandLADCP_refpos.csv',sep=',', index_col=None,
header=0)
# Make sure the station name are sorted by their distance along the section
sdfloc = dfloc.rename(columns={"Refdist": "refdist"}).sort_values('refdist', ascending=True)
sdfloc['refdist']=sdfloc['refdist'].apply(int)
sdfloc=sdfloc.set_index('refdist')
daloc = sdfloc.to_xarray()
print(daloc)
# Get cruise (time) metadata
dfdate = pd.read_csv(pathdir+'/'+'EELCTDandLADCP_refdate.csv',sep=',', index_col=None,
header=0).rename(columns={"Year": "year"}).set_index('year')
dadate = dfdate.to_xarray()
print(dadate)
da=xr.merge([da0, daloc,dadate])
print(da)
da.coords['lon'] = ('refdist', da.LonSta)
da.coords['lat'] = ('refdist', da.LatSta)
da.coords['Staname'] = ('refdist', da.Staname)
da = da.drop(['LonSta','LatSta'])
print(da)
da.Vladcp[2,-2,:].plot(y='depth', yincrease=False)
da.Vladcp.sel(year=2017).plot(y='depth', yincrease=False)
da.ptmp.sel(year=2017).plot(y='depth', yincrease=False)
# Extra Refdist value associated with Station E and F
staE = da.refdist.values[da.Staname.values=='E'][0]
staF = da.refdist.values[da.Staname.values=='F'][0]
# Plot
da.Vladcp.sel(refdist=staE,year=slice('2004','2017')).plot(y='depth', yincrease=False)
Vladcpmean = da.Vladcp.mean('year')
Vladcpstd = da.Vladcp.std(dim='year')
Vladcpstd.plot(y='depth', yincrease=False, vmin=0, vmax=0.18, cmap=plt.cm.Reds)
Vladcpstd.plot(y='depth', yincrease=False, levels=np.arange(0, 0.18, 0.04), cmap=plt.cm.Reds)
import collections
fields = [
'xarray_var',
'short_name',
'var_type',
'contourf_lvls',
'contour_lvls',
'cmap',
'long_name',
'cbar_title',
]
Datatoplot = collections.namedtuple('Datatoplot',fields,defaults=(None,) * len(fields))
Datatoplot?
ptmp_mean = Datatoplot(xarray_var='ptmp', short_name='Mean Pot. Temp.', var_type='mean',
contourf_lvls= np.arange(3, 11, 1),cmap='plasma',
long_name = 'Mean Potential Temperature', cbar_title ='$^\circ$C')
ptmp_std = Datatoplot(xarray_var='ptmp', short_name='Std Pot. Temp.', var_type='std',
contourf_lvls= np.arange(0, 1.2, 0.2),cmap='OrRd',
long_name = 'Std Potential Temperature', cbar_title ='$^\circ$C')
psal_mean = Datatoplot(xarray_var='psal', short_name='Mean Sal.', var_type='mean',
contourf_lvls= np.arange(34.9, 35.45, 0.05), cmap='viridis',
long_name = 'Mean Salinity', cbar_title =' ')
psal_std = Datatoplot(xarray_var='psal', short_name='Std Sal.', var_type='std',
contourf_lvls= np.arange(0, 0.12, 0.02),cmap='OrRd',
long_name = 'Std Salinity', cbar_title =' ')
sigma0_mean = Datatoplot(xarray_var='sigma0', short_name='Mean Pot. Dens. Ano.', var_type='mean',
contourf_lvls= np.arange(27.2, 27.9, 0.1), cmap='cividis_r',
long_name = 'Mean Potential Density Anomaly', cbar_title ='kg.m$^{-3}$')
sigma0_std = Datatoplot(xarray_var='sigma0', short_name='Std Pot. Dens. Ano.', var_type='std',
contourf_lvls= np.arange(0, 0.12, 0.02),cmap='OrRd',
long_name = 'Std Potential Density Anomaly', cbar_title ='kg.m$^{-3}$')
ladcp_mean = Datatoplot(xarray_var='Vladcp', short_name='Mean LADCP Vel.', var_type='mean',
contourf_lvls= np.arange(-16, 20, 4)/100, cmap='coolwarm',
long_name = 'Mean cross-section velocity from LADCP', cbar_title ='m.s$^{-1}$')
ladcp_std = Datatoplot(xarray_var='Vladcp', short_name='Std LADCP Vel.', var_type='std',
contourf_lvls= np.arange(0, 18, 4)/100, cmap='OrRd',
long_name = 'Std cross-section velocity from LADCP', cbar_title ='m.s$^{-1}$')
# Create a list of namedtuples
varstoplot=((ptmp_mean,ptmp_std),
(psal_mean,psal_std),
(sigma0_mean,sigma0_std),
(ladcp_mean,ladcp_std),
)
from io import StringIO
import pprint
class MyPrettyPrinter(pprint.PrettyPrinter):
def format_namedtuple(self, object, stream, indent, allowance, context, level):
# Code almost equal to _format_dict, see pprint code
write = stream.write
write(object.__class__.__name__ + '(')
object_dict = object._asdict()
length = len(object_dict)
if length:
# We first try to print inline, and if it is too large then we print it on multiple lines
inline_stream = StringIO()
self.format_namedtuple_items(object_dict.items(), inline_stream, indent, allowance + 1, context, level, inline=True)
max_width = self._width - indent - allowance
if len(inline_stream.getvalue()) > max_width:
self.format_namedtuple_items(object_dict.items(), stream, indent, allowance + 1, context, level, inline=False)
else:
stream.write(inline_stream.getvalue())
write(')')
def format_namedtuple_items(self, items, stream, indent, allowance, context, level, inline=False):
# Code almost equal to _format_dict_items, see pprint code
indent += self._indent_per_level
write = stream.write
last_index = len(items) - 1
if inline:
delimnl = ', '
else:
delimnl = ',\n' + ' ' * indent
write('\n' + ' ' * indent)
for i, (key, ent) in enumerate(items):
last = i == last_index
write(key + '=')
self._format(ent, stream, indent + len(key) + 2,
allowance if last else 1,
context, level)
if not last:
write(delimnl)
def _format(self, object, stream, indent, allowance, context, level):
# We dynamically add the types of our namedtuple and namedtuple like
# classes to the _dispatch object of pprint that maps classes to
# formatting methods
# We use a simple criteria (_asdict method) that allows us to use the
# same formatting on other classes but a more precise one is possible
if hasattr(object, '_asdict') and type(object).__repr__ not in self._dispatch:
self._dispatch[type(object).__repr__] = MyPrettyPrinter.format_namedtuple
super()._format(object, stream, indent, allowance, context, level)
pp = MyPrettyPrinter(indent=2, depth=4)
pp.pprint(varstoplot)
#plt.rcParams.update({'font.size': 10})
# Create the figure
## fig, axs = plt.subplots(4, 2, figsize=(12, 12), constrained_layout=True)
fig, axs = plt.subplots(4, 2, sharex=True, figsize=(16, 16))
for row in [0,1,2,3]:
for col in [0,1]:
data = varstoplot[row][col]
if data.var_type == 'mean':
var = da[data.xarray_var].mean('year')
elif data.var_type == 'std':
var = da[data.xarray_var].std('year')
else:
raise ValueError(f"wrong var_type in {data}")
cf_levels = data.contourf_lvls
colmap = getattr(plt.cm, data.cmap)
# if VarDir['cmap'] == 'cividis':
# colmap = colmap[:-1,:]
pltitle = data.long_name
plunits = data.cbar_title
X=da.refdist.values
Y=da.depth.values
Z=var.values
Xi,Yi = np.meshgrid(X, Y, indexing='ij')
ax = axs[row, col]
# ax = axs[col]
CS = ax.contourf(Xi, -Yi, Z, cf_levels, alpha=0.9, cmap=colmap, extend='both');
if data.contour_lvls is not None:
CS2 = ax.contour(CS, levels=data.contour_lvls, colors='k')
# Plot specific potential density contour indicating of different water masses
# adapt the line colors according to background colormap
c_WM = [27.20,27.50,27.70,27.85]
if data.var_type == 'mean':
CS_WM = ax.contour(Xi, -Yi, da['sigma0'].mean('year'), levels=c_WM, colors='w' )
CS_WM.collections[0].set_color('k')
if data.xarray_var == 'psal':
CS_WM.collections[0].set_color('w')
elif data.xarray_var == 'Vladcp':
for c in CS_WM.collections:
c.set_color('k')
elif data.var_type == 'std':
CS_WM = ax.contour(Xi, -Yi, da['sigma0'].mean('year'), levels=c_WM, colors='k' )
CS_WM.collections[0].set_color('w')
ax.set_ylabel('Depth (m)')
ax.set_xlim([0,1250])
ax.set_ylim([-3100,0])
# ax.set_title(pltitle)
ax.text(550, -2700, data.short_name, fontsize=12, color ='white' )
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = fig.colorbar(CS, ax=ax)
cbar.ax.set_ylabel(plunits)
#cbar.ax.set_ylabel('m/s')
# Add the contour line levels to the colorbar
if data.contour_lvls is not None:
cbar.add_lines(CS2)
# Add bathymetry (from the nominal depth of the reference EEL stations)
ax.fill_between(dfs['Refdist'], -dfs['DepthSta'], -2900, color="#331a00", alpha=0.8)
ax.vlines(dfs['Refdist'], -3050, -2950, colors='k')
plt.tight_layout(pad=0.5,w_pad=3)
plt.savefig('figures/02_' + 'figure4x2.pdf')
| 0.489503 | 0.917967 |
```
from torchvision.models import *
import wandb
from sklearn.model_selection import train_test_split
import os,cv2
import numpy as np
import matplotlib.pyplot as plt
from torch.nn import *
import torch,torchvision
from tqdm import tqdm
device = 'cuda'
PROJECT_NAME = 'Gender-Clf'
transformations = torchvision.transforms.Compose(
[torchvision.transforms.RandomVerticalFlip(),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor()
]
)
def load_data():
labels = {}
labels_r = {}
idx = 0
data = []
for folder in tqdm(os.listdir('./data/')):
idx += 1
labels[folder] = idx
for folder in tqdm(os.listdir('./data/')):
for file in os.listdir(f'./data/{folder}/'):
img = cv2.imread(f'./data/{folder}/{file}')
img = cv2.resize(img,(56,56))
img = img / 255.0
data.append([img,np.eye(labels[folder]+1,len(labels))[labels[folder]]])
np.random.shuffle(data)
X = []
y = []
for d in data:
X.append(d[0])
y.append(d[1])
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,shuffle=False)
X_train = torch.from_numpy(np.array(X_train)).to(device).view(-1,3,56,56).float()
y_train = torch.from_numpy(np.array(y_train)).to(device).float()
X_test = torch.from_numpy(np.array(X_test)).to(device).view(-1,3,56,56).float()
y_test = torch.from_numpy(np.array(y_test)).to(device).float()
return X,y,X_train,X_test,y_train,y_test,labels,labels_r,idx,data
X,y,X_train,X_test,y_train,y_test,labels,labels_r,idx,data = load_data()
torch.save(X_train,'X_train.pt')
torch.save(y_train,'y_train.pt')
torch.save(X_test,'X_test.pt')
torch.save(y_test,'y_test.pt')
torch.save(labels_r,'labels_r.pt')
torch.save(labels,'labels.pt')
torch.save(X_train,'X_train.pth')
torch.save(y_train,'y_train.pth')
torch.save(X_test,'X_test.pth')
torch.save(y_test,'y_test.pth')
torch.save(labels_r,'labels_r.pth')
torch.save(labels,'labels.pth')
def get_loss(model,X,y,criterion):
preds = model(X)
loss = criterion(preds,y)
return loss.item()
def get_accuracy(model,X,y):
correct = 0
total = 0
preds = model(X)
for pred,yb in zip(preds,y):
pred = int(torch.argmax(pred))
yb = int(torch.argmax(yb))
if pred == yb:
correct += 1
total += 1
acc = round(correct/total,3)*100
return acc
model = resnet18().to(device)
model.fc = Linear(512,2)
criterion = MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
batch_size = 32
epochs = 100
wandb.init(project=PROJECT_NAME,name='baseline')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
model.to(device)
preds = model(X_batch)
loss = criterion(preds.view(-1,1),y_batch.view(-1,1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
torch.save(model,'model-resnet18.pt')
torch.save(model,'model-resnet18.pth')
torch.save(model.state_dict(),'model-sd-resnet18.pt')
torch.save(model.state_dict(),'model-sd-resnet18.pth')
class Model(Module):
def __init__():
self.max_pool2d = MaxPool2d((2,2),(2,2))
self.activation = ReLU()
self.conv1 = Conv2d(3,7,(5,5))
self.conv2 = Conv2d(7,14,(5,5))
self.conv2bn = BatchNorm2d(14)
self.conv3 = Conv2d(14,21,(5,5))
self.linear1 = Linear(21*3*3,256)
self.linear2 = Linear(256,512)
self.linear2bn = BatchNorm1d(512)
self.linear3 = Linear(512,256)
self.output = Linear(256,len(labels))
def forward(self,X):
preds = self.max_pool2d(self.activation(self.conv1(X)))
preds = self.max_pool2d(self.activation(self.conv2bn(self.conv2(preds))))
preds = self.max_pool2d(self.activation(self.conv3(preds)))
print(preds.shape)
preds = preds.view(-1,21*3*3)
preds = self.activation(self.linear1(preds))
preds = self.activation(self.linear2bn(self.linear2(preds)))
preds = self.activation(self.linear3(preds))
preds = self.output(preds)
return preds
model = Model().to(device)
criterion = MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
wandb.init(project=PROJECT_NAME,name='baseline')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
model.to(device)
preds = model(X_batch)
loss = criterion(preds.view(-1,1),y_batch.view(-1,1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
torch.save(model,'model-CNN.pt')
torch.save(model,'model-CNN.pth')
torch.save(model.state_dict(),'model-sd-CNN.pt')
torch.save(model.state_dict(),'model-sd-CNN.pth')
```
|
github_jupyter
|
from torchvision.models import *
import wandb
from sklearn.model_selection import train_test_split
import os,cv2
import numpy as np
import matplotlib.pyplot as plt
from torch.nn import *
import torch,torchvision
from tqdm import tqdm
device = 'cuda'
PROJECT_NAME = 'Gender-Clf'
transformations = torchvision.transforms.Compose(
[torchvision.transforms.RandomVerticalFlip(),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor()
]
)
def load_data():
labels = {}
labels_r = {}
idx = 0
data = []
for folder in tqdm(os.listdir('./data/')):
idx += 1
labels[folder] = idx
for folder in tqdm(os.listdir('./data/')):
for file in os.listdir(f'./data/{folder}/'):
img = cv2.imread(f'./data/{folder}/{file}')
img = cv2.resize(img,(56,56))
img = img / 255.0
data.append([img,np.eye(labels[folder]+1,len(labels))[labels[folder]]])
np.random.shuffle(data)
X = []
y = []
for d in data:
X.append(d[0])
y.append(d[1])
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.25,shuffle=False)
X_train = torch.from_numpy(np.array(X_train)).to(device).view(-1,3,56,56).float()
y_train = torch.from_numpy(np.array(y_train)).to(device).float()
X_test = torch.from_numpy(np.array(X_test)).to(device).view(-1,3,56,56).float()
y_test = torch.from_numpy(np.array(y_test)).to(device).float()
return X,y,X_train,X_test,y_train,y_test,labels,labels_r,idx,data
X,y,X_train,X_test,y_train,y_test,labels,labels_r,idx,data = load_data()
torch.save(X_train,'X_train.pt')
torch.save(y_train,'y_train.pt')
torch.save(X_test,'X_test.pt')
torch.save(y_test,'y_test.pt')
torch.save(labels_r,'labels_r.pt')
torch.save(labels,'labels.pt')
torch.save(X_train,'X_train.pth')
torch.save(y_train,'y_train.pth')
torch.save(X_test,'X_test.pth')
torch.save(y_test,'y_test.pth')
torch.save(labels_r,'labels_r.pth')
torch.save(labels,'labels.pth')
def get_loss(model,X,y,criterion):
preds = model(X)
loss = criterion(preds,y)
return loss.item()
def get_accuracy(model,X,y):
correct = 0
total = 0
preds = model(X)
for pred,yb in zip(preds,y):
pred = int(torch.argmax(pred))
yb = int(torch.argmax(yb))
if pred == yb:
correct += 1
total += 1
acc = round(correct/total,3)*100
return acc
model = resnet18().to(device)
model.fc = Linear(512,2)
criterion = MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
batch_size = 32
epochs = 100
wandb.init(project=PROJECT_NAME,name='baseline')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
model.to(device)
preds = model(X_batch)
loss = criterion(preds.view(-1,1),y_batch.view(-1,1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
torch.save(model,'model-resnet18.pt')
torch.save(model,'model-resnet18.pth')
torch.save(model.state_dict(),'model-sd-resnet18.pt')
torch.save(model.state_dict(),'model-sd-resnet18.pth')
class Model(Module):
def __init__():
self.max_pool2d = MaxPool2d((2,2),(2,2))
self.activation = ReLU()
self.conv1 = Conv2d(3,7,(5,5))
self.conv2 = Conv2d(7,14,(5,5))
self.conv2bn = BatchNorm2d(14)
self.conv3 = Conv2d(14,21,(5,5))
self.linear1 = Linear(21*3*3,256)
self.linear2 = Linear(256,512)
self.linear2bn = BatchNorm1d(512)
self.linear3 = Linear(512,256)
self.output = Linear(256,len(labels))
def forward(self,X):
preds = self.max_pool2d(self.activation(self.conv1(X)))
preds = self.max_pool2d(self.activation(self.conv2bn(self.conv2(preds))))
preds = self.max_pool2d(self.activation(self.conv3(preds)))
print(preds.shape)
preds = preds.view(-1,21*3*3)
preds = self.activation(self.linear1(preds))
preds = self.activation(self.linear2bn(self.linear2(preds)))
preds = self.activation(self.linear3(preds))
preds = self.output(preds)
return preds
model = Model().to(device)
criterion = MSELoss()
optimizer = torch.optim.Adam(model.parameters(),lr=0.001)
wandb.init(project=PROJECT_NAME,name='baseline')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
model.to(device)
preds = model(X_batch)
loss = criterion(preds.view(-1,1),y_batch.view(-1,1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion))/2})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val ACC':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
torch.save(model,'model-CNN.pt')
torch.save(model,'model-CNN.pth')
torch.save(model.state_dict(),'model-sd-CNN.pt')
torch.save(model.state_dict(),'model-sd-CNN.pth')
| 0.691602 | 0.433742 |
Adapted from [Fall 2019 Data 100 HW 4: Trump, Twitter, and Text](http://www.ds100.org/fa19/syllabus/)
```
import numpy as np
from datascience import *
# Table.interactive()
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
```
# Project 2: Trump's Tweets
## Table of Contents
<a href='#section 0'>Background Knowledge: Twitter & the President </a>
1. <a href='#section 1'> The Data Science Life Cycle</a>
a. <a href='#subsection 1a'>Formulating a question or problem</a>
b. <a href='#subsection 1b'>Acquiring and cleaning data</a>
c. <a href='#subsection 1c'>Conducting exploratory data analysis</a>
d. <a href='#subsection 1d'>Using prediction and inference to draw conclusions</a>
<br><br>
### Background Knowledge: Twitter & the President <a id='section 0'></a>
<img src="twitter_trump.png" width = 1000/>
[Source](https://www.politico.com/magazine/story/2018/01/26/donald-trump-twitter-addiction-216530)
President Donald Trump's Twitter history has grown over time from before he was elected into the presidency. From the image above, we can even see part of how he used his Twitter during the time that he was running for the election.
<div class="alert alert-warning">
<b>Question:</b> What are some key points you notice from the graph above? What are other points in President Trump's time in office that are notable (Think about his election, the Wall, Impeachment, etc.)?
</div>
*Answer here*
# The Data Science Life Cycle <a id='section 1'></a>
## Formulating a question or problem <a id='subsection 1a'></a>
It is important to ask questions that will be informative and that will avoid misleading results. There are many different questions we could ask about Trump's Tweets, for example, many people are interested in how he uses twitter to connect with his supporters.
<div class="alert alert-warning">
<b>Question:</b> Recall the questions you developed with your group on Tuesday. Write down that question below, and try to add on to it with the context from the articles from Wednesday. Think about what data you would need to answer your question. You can review the articles on the bCourses page under Module 4.3.
</div>
Original Question(s): *here*
Updated Question(s): *here*
Data you would need: *here*
## Acquiring and cleaning data <a id='subsection 1b'></a>
The following table, `trump`, contains tweets from President Donald Trump's Personal Twitter Account from January 2016 till February 2019. Here is information about the columns of the dataset.
|<center>Codebook</center>|
| --- | --- |
| time |Coordinated Universal Time of Day that the Tweet was published|
| source | Source of the Tweet (Andriod, iPhone, Web Browser, etc.)|
| text| Original Text if the tweet (includes all punctuation)|
|retweet_count| Number of Times Original Tweet was Shared|
|year| Year the Tweet was released|
|est_time| Eastern Standard Time of the Day that the Tweet was published|
|hour| Hour of the Day the Tweet was Published|
|no_punc| Text from Tweet without any punctuation|
|Polarity| Score measuring the sentiment of the Tweet|
```
trump = Table().read_table('trump_tweets.csv')
trump
```
<div class="alert alert-warning">
<b>Question:</b> It's important to evalute our data source. What do you know about the source (Trump's Twitter Account)? What motivations might he have for posting? What data might be missing? How might deleted tweets be dealt with?
</div>
*Insert answer*
<div class="alert alert-warning">
<b>Question:</b> We want to learn more about the dataset. First, how many total rows are in this table? What does each row represent?
</div>
```
total_rows = ...
```
*Description of a row here*
## Conducting exploratory data analysis <a id='subsection 1c'></a>
We will explore how Trump's tweets vary by sentiment and extend that analysis in the context of how many retweets he gets and patterns over time. In the end, we will try to answer **"How do Trump's tweets influence the interpretation of big events based on the sentiment of his tweets, reception of his tweets (retweets), and how his tweets of the event progress over time?"**
### Part 1: Polarity & Sentiment
It turns out that we can use the words in Trump's tweets to calculate a measure of the sentiment of the tweet. For example, the sentence "I love America!" has positive sentiment, whereas the sentence "I hate taxes!" has a negative sentiment. In addition, some words have stronger positive / negative sentiment than others: "I love America." is more positive than "I like America."
We will use the [VADER (Valence Aware Dictionary and sEntiment Reasoner)](https://github.com/cjhutto/vaderSentiment) lexicon to analyze the sentiment of Trump's tweets. VADER is a lexicon and rule-based sentiment analysis tool that is specifically attuned to sentiments expressed in social media which is great for our usage.
The VADER lexicon gives the sentiment of individual words. Run the following cell to show a few rows of the lexicon:
```
print(''.join(open("vader_lexicon.txt").readlines()[300:310]))
```
We used the VADER Lexicon to calculate the polarity for each tweet. This is in the "polarity" column of the `trump` table. We can use this to find the most positive and negative tweets.
<div class="alert alert-warning">
<b>Question:</b> Find the 5 most negative tweets in the dataset. (Hint: first, sort the data.)
</div>
```
most_negative = trump.sort('polarity').take(np.arange(5))
most_negative
# most_negative = trump...(...)...(np.arange(5))
# most_negative
## Just Run this cell to view the whole text of the tweets in a nicer format
print('Most negative tweets:')
for t in most_negative.column('text'):
print('\n ', t)
```
<div class="alert alert-warning">
<b>Question:</b> What patterns do you notice in the most negative tweets?
</div>
*Answer here*
<div class="alert alert-warning">
<b>Question:</b> Find the 5 most positive tweets in the dataset. (Hint: first, sort the data.)
</div>
```
most_positive = trump.sort('polarity', descending=True).take(np.arange(5))
most_positive
# most_positive = trump.sort('polarity', descending=True).take(np.arange(5))
# most_positive
## Just Run this cell to view the whole text of the tweets in a nicer format
print('Most positive tweets:')
for t in most_positive.column('text'):
print('\n ', t)
```
<div class="alert alert-warning">
<b>Question:</b> What patterns do you notice in the most positive tweets?
</div>
*Answer here*
**Specific Words:** Based on these more extreme tweets, we can see some trends in the tweets. Based on what we know from these tweets and the news. Let's investigate specific words that Trump uses in his tweets. What context does he use these words in?
<div class="alert alert-warning">
<b>Question:</b> Choose 6 different keywords. Then, calculate the average polarity for tweets that contain those keywords. Use the `avg_pol` function. Make sure to run the cell that defines the function. We have provided the word "immigr" as an example for format, feel free to change this.
</div>
Note: Some words are used more often then others, but there is usually a stem or root part of a word that appears more often. For example, if you are interested in immigration consider using "immigr", so that you find cases that contain immigration, immigrant, etc.
```
## RUN THIS CELL!!
def avg_pol(keyword_array):
pol_arr = make_array()
for i in keyword_array:
tbl = trump.where("no_punc", are.containing(i))
avg = np.average(tbl.column("polarity"))
pol_arr = np.append(pol_arr,avg)
return pol_arr
words = make_array("immigr", "great", "china", "hillary", "crazy", "wall")
polarity_score = avg_pol(words)
polarity_score
# words = make_array("immigr", ..., ..., ..., ..., ...)
# polarity_score = avg_pol(words)
# polarity_score
```
We have compiled the keywords we are interested in and their average polarities. In order to compare the numbers in the array, it would be easier if they were in a table, so let's create one.
<div class="alert alert-warning">
<b>Question:</b> Create a table called `words_polarity` that has two columns. The first called `Word`, and the second called `Average Polarity` which contains the `polarity_score` array we made above. Then, sort the "Average Polarity" column in ascending order.
</div>
```
words_polarity = Table().with_columns("Words", words, "Average Polarity", polarity_score).sort("Average Polarity")
words_polarity
# words_polarity = Table().with_columns("...", words, "Average Polarity", ...).sort("Polarity")
# words_polarity
```
<div class="alert alert-warning">
<b>Question:</b> Using the words_polarity table, we can make a bar chart. Fill in the code below.
</div>
```
words_polarity.barh("Words")
```
<div class="alert alert-warning">
<b>Question:</b> What are some possible reasons for the disparities between the bars?
</div>
*Insert answer here.*
### Part 2: Polarity in relation to Retweets & Time
In Part 1, we learned about polarity and the sentiment of some of Trump's tweets, but how does this relate to other parts of the data. Two other interesting components are the number of retweets certain posts get over others and differences over time in polarity. How are these variables related to sentiment? Let's start with retweets.
**Retweets:** Similar to other social media platforms, retweeting allows people to share content others post. The higher the number of retweets, then the more popular the post.
<div class="alert alert-warning">
<b>Question:</b> Find the 5 most retweeted posts in the dataset. (Hint: first, sort the data.)
</div>
```
most_retweeted = trump.sort("retweet_count", descending= True).take(np.arange(5))
most_retweeted
## Just Run this cell to view the whole text of the tweets in a nicer format
print('Most retweeted posts:')
for t in most_retweeted.column('text'):
print('\n ', t)
```
<div class="alert alert-warning">
<b>Question:</b> What patterns do you notice in the most retweeted tweets?
</div>
*Answer here*
<div class="alert alert-warning">
<b>Question:</b> How do retweets relate to polarity? Make a scatterplot that compares retweets and polarity
</div>
```
trump.scatter("polarity", "retweet_count")
```
**Polarity Over Time:** We learned about retweeting patterns a little bit, but how do these patterns vary over time. Let's focus on years, so we can see the broad pattern over time.
<div class="alert alert-warning">
<b>Question:</b> Group the data by year, so that each row represents a unique year. Take the average of every other column. If a column contains strings, make sure to drop it. Call this table `year_group`.
</div>
```
year_group = trump.group("year", np.average).drop(1,2,3,5,7)
year_group
```
<div class="alert alert-warning">
<b>Question:</b> Using the grouped table, create a plot comparing `year` by `retweet_count average`.
</div>
```
year_group.plot("year", "retweet_count average")
```
<div class="alert alert-warning">
<b>Question:</b> What do you notice from the plot? What trend exists over time (if any)?
</div>
*Answer here*
<div class="alert alert-warning">
<b>Question:</b> Let's do the same for polarity over time. Using the grouped table, create a plot comparing `year` by `polarity average`.
</div>
```
year_group.plot("year", "polarity average")
```
<div class="alert alert-warning">
<b>Question:</b> What do you notice from the plot? What trend exists over time (if any)?
</div>
*Answer here*
<div class="alert alert-warning">
<b>Question:</b> Given the changes in polarity and retweet counts over time, what might we expect to see from Trump's 2020 twitter data?
</div>
*Answer here.*
## Using prediction and inference to draw conclusions <a id='subsection 1a'></a>
Now that we have some context for the data, let's think back to major events that have happened in Trump's period as president. Consider his fight against Hillary & Bernie, his inauguration, the witch hunt period, fake new, Russia scandal, and Charlottesville to name a few. These are all major events that happened in the past few years. **How do these events appear in Trump's tweets?**
From the previous sections, we have looked at the polarity of certain words, and we can do something similar to explore these events. As a group, choose an event you would like to explore more in depth.
<div class="alert alert-warning">
<b>Question:</b> What event are you interested in exploring? Determine a keyword you can use to find all related tweets to the event.
</div>
*Answer here*
<div class="alert alert-warning">
<b>Question:</b> Use your keyword to find all the tweets where your keyword in contained in the post.
</div>
```
event = trump.where("no_punc", are.containing("charlottesville"))
event
```
<div class="alert alert-warning">
<b>Question:</b> What is the time range of tweets related to your event? How does this compare to what you know of the event? I recommend searching a bit about the event you are exploring.
</div>
*Answer here*
#### The tweets are primarily from from August 2017, but he wrote a rememberance tweet in August 2018. https://en.wikipedia.org/wiki/Unite_the_Right_rally
<div class="alert alert-warning">
<b>Question:</b> Plot the change over time in retweets for your event. Comment on what patterns you noticed.
</div>
```
event.plot("time", "retweet_count")
```
*Comment here*
<div class="alert alert-warning">
<b>Question:</b> Plot the change over time in polarity for your event. Comment on what patterns you noticed.
</div>
```
event.plot("time", "polarity")
```
*Comment here*
<div class="alert alert-warning">
<b>Question:</b> Based on these two measure, how do Trump's tweets frame the event? How does that differ from your interpretation of the event? How is it similar?
</div>
*Answer here*
<div class="alert alert-warning">
<b>Question:</b> What impact might President Trump's tweets have an effect on these major events? How does his use of twitter influence individuals who agree and disagree with his beliefs?
</div>
*Answer here*
<div class="alert alert-warning">
<b>Question:</b> What is something interesting you learned from the project?
</div>
*Answer here*
Source: Adapted from [Fall 2019 Data 100 HW 4: Trump, Twitter, and Text](http://www.ds100.org/fa19/syllabus/)
Notebook Authors: Alleanna Clark, Ashley Quiterio, Karla Palos Castellanos
|
github_jupyter
|
import numpy as np
from datascience import *
# Table.interactive()
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
trump = Table().read_table('trump_tweets.csv')
trump
total_rows = ...
print(''.join(open("vader_lexicon.txt").readlines()[300:310]))
most_negative = trump.sort('polarity').take(np.arange(5))
most_negative
# most_negative = trump...(...)...(np.arange(5))
# most_negative
## Just Run this cell to view the whole text of the tweets in a nicer format
print('Most negative tweets:')
for t in most_negative.column('text'):
print('\n ', t)
most_positive = trump.sort('polarity', descending=True).take(np.arange(5))
most_positive
# most_positive = trump.sort('polarity', descending=True).take(np.arange(5))
# most_positive
## Just Run this cell to view the whole text of the tweets in a nicer format
print('Most positive tweets:')
for t in most_positive.column('text'):
print('\n ', t)
## RUN THIS CELL!!
def avg_pol(keyword_array):
pol_arr = make_array()
for i in keyword_array:
tbl = trump.where("no_punc", are.containing(i))
avg = np.average(tbl.column("polarity"))
pol_arr = np.append(pol_arr,avg)
return pol_arr
words = make_array("immigr", "great", "china", "hillary", "crazy", "wall")
polarity_score = avg_pol(words)
polarity_score
# words = make_array("immigr", ..., ..., ..., ..., ...)
# polarity_score = avg_pol(words)
# polarity_score
words_polarity = Table().with_columns("Words", words, "Average Polarity", polarity_score).sort("Average Polarity")
words_polarity
# words_polarity = Table().with_columns("...", words, "Average Polarity", ...).sort("Polarity")
# words_polarity
words_polarity.barh("Words")
most_retweeted = trump.sort("retweet_count", descending= True).take(np.arange(5))
most_retweeted
## Just Run this cell to view the whole text of the tweets in a nicer format
print('Most retweeted posts:')
for t in most_retweeted.column('text'):
print('\n ', t)
trump.scatter("polarity", "retweet_count")
year_group = trump.group("year", np.average).drop(1,2,3,5,7)
year_group
year_group.plot("year", "retweet_count average")
year_group.plot("year", "polarity average")
event = trump.where("no_punc", are.containing("charlottesville"))
event
event.plot("time", "retweet_count")
event.plot("time", "polarity")
| 0.36139 | 0.988268 |
```
%load_ext autoreload
%autoreload 2
%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
```
## Does nn.Conv2d init work well?
```
#export
from exp.nb_02 import *
def get_data():
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train,y_train,x_valid,y_valid))
def normalize(x, m, s): return (x-m)/s
torch.nn.modules.conv._ConvNd.reset_parameters??
x_train,y_train,x_valid,y_valid = get_data()
train_mean,train_std = x_train.mean(),x_train.std()
x_train = normalize(x_train, train_mean, train_std)
x_valid = normalize(x_valid, train_mean, train_std)
x_train = x_train.view(-1,1,28,28)
x_valid = x_valid.view(-1,1,28,28)
x_train.shape,x_valid.shape
n,*_ = x_train.shape
c = y_train.max()+1
nh = 32
n,c
nn.Conv2d?
l1 = nn.Conv2d(1, nh, 5)
x = x_valid[:100]
x.shape
def stats(x):
return x.mean(),x.std()
l1.weight.shape
stats(l1.weight),stats(l1.bias)
t = l1(x)
stats(t)
init.kaiming_normal_(l1.weight, a=1.)
stats(l1(x))
import torch.nn.functional as F
def f1(x, a=0):
return F.leaky_relu(l1(x), a)
init.kaiming_normal_(l1.weight, a=0)
stats(f1(x))
l1 = nn.Conv2d(1, nh, 5)
stats(f1(x))
l1.weight.shape
# receptive field size
rec_fs = l1.weight[0,0].numel()
rec_fs
nf,ni,*_ = l1.weight.shape
nf,ni
fan_in = ni*rec_fs
fan_out = nf*rec_fs
fan_in,fan_out
def gain(a):
return math.sqrt(2.0 / (1 + a**2))
gain(1),gain(0),gain(0.01),gain(0.1),gain(math.sqrt(5.))
torch.zeros(10000).uniform_(-1,1).std()
1/math.sqrt(3.)
def kaiming2(x,a, use_fan_out=False):
nf,ni,*_ = x.shape
rec_fs = x[0,0].shape.numel()
fan = nf*rec_fs if use_fan_out else ni*rec_fs
std = gain(a) / math.sqrt(fan)
bound = math.sqrt(3.) * std
x.data.uniform_(-bound,bound)
kaiming2(l1.weight, a=0);
stats(f1(x))
kaiming2(l1.weight, a=math.sqrt(5.))
stats(f1(x))
class Flatten(nn.Module):
def forward(self,x):
return x.view(-1)
# -1 is to infer the size of the dimension
a = torch.arange(6).view(3, -1)
a
a.view(-1)
m = nn.Sequential(
nn.Conv2d(1,8, 5,stride=2,padding=2), nn.ReLU(),
nn.Conv2d(8,16,3,stride=2,padding=1), nn.ReLU(),
nn.Conv2d(16,32,3,stride=2,padding=1), nn.ReLU(),
nn.Conv2d(32,1,3,stride=2,padding=1),
nn.AdaptiveAvgPool2d(1),
Flatten(),
)
y = y_valid[:100].float()
t = m(x)
stats(t)
l = mse(t,y)
l.backward()
stats(m[0].weight.grad)
init.kaiming_uniform_??
for l in m:
if isinstance(l, nn.Conv2d):
init.kaiming_uniform_(l.weight)
l.bias.data.zero_()
t = m(x)
stats(t)
l = mse(t,y)
l.backward()
stats(m[0].weight.grad)
```
## Export
```
!./notebook2script.py 02a_why_sqrt5.ipynb
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
%matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
#export
from exp.nb_02 import *
def get_data():
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
return map(tensor, (x_train,y_train,x_valid,y_valid))
def normalize(x, m, s): return (x-m)/s
torch.nn.modules.conv._ConvNd.reset_parameters??
x_train,y_train,x_valid,y_valid = get_data()
train_mean,train_std = x_train.mean(),x_train.std()
x_train = normalize(x_train, train_mean, train_std)
x_valid = normalize(x_valid, train_mean, train_std)
x_train = x_train.view(-1,1,28,28)
x_valid = x_valid.view(-1,1,28,28)
x_train.shape,x_valid.shape
n,*_ = x_train.shape
c = y_train.max()+1
nh = 32
n,c
nn.Conv2d?
l1 = nn.Conv2d(1, nh, 5)
x = x_valid[:100]
x.shape
def stats(x):
return x.mean(),x.std()
l1.weight.shape
stats(l1.weight),stats(l1.bias)
t = l1(x)
stats(t)
init.kaiming_normal_(l1.weight, a=1.)
stats(l1(x))
import torch.nn.functional as F
def f1(x, a=0):
return F.leaky_relu(l1(x), a)
init.kaiming_normal_(l1.weight, a=0)
stats(f1(x))
l1 = nn.Conv2d(1, nh, 5)
stats(f1(x))
l1.weight.shape
# receptive field size
rec_fs = l1.weight[0,0].numel()
rec_fs
nf,ni,*_ = l1.weight.shape
nf,ni
fan_in = ni*rec_fs
fan_out = nf*rec_fs
fan_in,fan_out
def gain(a):
return math.sqrt(2.0 / (1 + a**2))
gain(1),gain(0),gain(0.01),gain(0.1),gain(math.sqrt(5.))
torch.zeros(10000).uniform_(-1,1).std()
1/math.sqrt(3.)
def kaiming2(x,a, use_fan_out=False):
nf,ni,*_ = x.shape
rec_fs = x[0,0].shape.numel()
fan = nf*rec_fs if use_fan_out else ni*rec_fs
std = gain(a) / math.sqrt(fan)
bound = math.sqrt(3.) * std
x.data.uniform_(-bound,bound)
kaiming2(l1.weight, a=0);
stats(f1(x))
kaiming2(l1.weight, a=math.sqrt(5.))
stats(f1(x))
class Flatten(nn.Module):
def forward(self,x):
return x.view(-1)
# -1 is to infer the size of the dimension
a = torch.arange(6).view(3, -1)
a
a.view(-1)
m = nn.Sequential(
nn.Conv2d(1,8, 5,stride=2,padding=2), nn.ReLU(),
nn.Conv2d(8,16,3,stride=2,padding=1), nn.ReLU(),
nn.Conv2d(16,32,3,stride=2,padding=1), nn.ReLU(),
nn.Conv2d(32,1,3,stride=2,padding=1),
nn.AdaptiveAvgPool2d(1),
Flatten(),
)
y = y_valid[:100].float()
t = m(x)
stats(t)
l = mse(t,y)
l.backward()
stats(m[0].weight.grad)
init.kaiming_uniform_??
for l in m:
if isinstance(l, nn.Conv2d):
init.kaiming_uniform_(l.weight)
l.bias.data.zero_()
t = m(x)
stats(t)
l = mse(t,y)
l.backward()
stats(m[0].weight.grad)
!./notebook2script.py 02a_why_sqrt5.ipynb
| 0.764188 | 0.74468 |
<img src="../images/dask_horizontal.svg" align="right" width="30%">
# Dask Array
<img src="../images/Dask Array (Light).png" width="50%" align="right">
Dask array provides a parallel, larger-than-memory, n-dimensional array using blocked algorithms. Simply put: distributed Numpy.
* **Parallel**: Uses all of the cores on your computer
* **Larger-than-memory**: Lets you work on datasets that are larger than your available memory by breaking up your array into many small pieces, operating on those pieces in an order that minimizes the memory footprint of your computation, and effectively streaming data from disk.
* **Blocked Algorithms**: Perform large computations by performing many smaller computations
## Learning Objectives
- Understand key features of dask arrays
- Work with Dask Array's in much the same way you would work with a NumPy array
## Prerequisites
| Concepts | Importance | Notes |
| --- | --- | --- |
| Familiarity with NumPy | Necessary | |
- **Time to learn**: *20-25 minutes*
## Setup
```
from dask.distributed import Client, LocalCluster
cluster = LocalCluster()
client = Client(cluster)
client
```
## Blocked Algorithms
A *blocked algorithm* executes on a large dataset by breaking it up into many small blocks.
For example, consider taking the sum of a billion numbers. We might instead break up the array into 1,000 chunks, each of size 1,000,000, take the sum of each chunk, and then take the sum of the intermediate sums.
We achieve the intended result (one sum on one billion numbers) by performing many smaller results (one thousand sums on one million numbers each, followed by another sum of a thousand numbers.)
## `dask.array` contains these algorithms
`dask.array` implements a subset of the NumPy ndarray interface using blocked algorithms, cutting up the large array into many small arrays. This lets us compute on arrays larger than memory using multiple cores. We coordinate these blocked algorithms using Dask graphs. Dask Array's are also lazy, meaning that they do not evaluate until you explicitly ask for a result using the compute method.
### Create `dask.array` object
If we want to create a 3D NumPy array of random values, we do it like this:
```
import dask
import dask.array as da
import numpy as np
from distributed.utils import format_bytes
shape = (600, 200, 200)
arr = np.random.random(shape)
arr
format_bytes(arr.nbytes)
```
This array contains `~183 MB` of data
Now let's create the same array using Dask's array interface.
```
darr = da.random.random(shape, chunks=(300, 100, 200))
```
A chunk size to tell us how to block up our array, like `(300, 100, 200)`.
<div class="admonition alert alert-info">
<p class="admonition-title" style="font-weight:bold">Specifying Chunks</p>
There are <a href="https://docs.dask.org/en/latest/array-chunks.html">several ways to specify chunks</a>. In this tutorial, we will use a block shape.
</div>
```
darr
```
Notice that we just see a symbolic representation of the array, including its `shape`, `dtype`, and `chunksize`. No data has been generated yet. Let's visualize the constructed task graph.
```
darr.visualize()
```
Our array has four chunks. To generate it, Dask calls `np.random.random` four times and then concatenates this together into one array.
### Manipulate `dask.array` object as you would a numpy array
Now that we have an `Array` we perform standard numpy-style computations like arithmetic, mathematics, slicing, reductions, etc..
The interface is familiar, but the actual work is different. `dask_array.sum()` does not do the same thing as `numpy_array.sum()`.
#### What's the difference?
`dask_array.sum()` builds an expression of the computation. It does not do the computation yet. `numpy_array.sum()` computes the sum immediately.
#### Why the difference?
Dask arrays are split into chunks. Each chunk must have computations run on that chunk explicitly. If the desired answer comes from a small slice of the entire dataset, running the computation over all data would be wasteful of CPU and memory.
```
total = darr.sum()
total
total.visualize()
```
#### Compute result
Dask.array objects are lazily evaluated. Operations like `.sum` build up a graph of blocked tasks to execute.
We ask for the final result with a call to `.compute()`. This triggers the actual computation.
```
%%time
total.compute()
```
### Exercise: Modify the chunk size (or shape) in the random dask array, call `.sum()` on the new array, and visualize how the task graph changes.
```
da.random.random(shape, chunks=(50, 200, 400)).sum().visualize()
```
Here we see Dask's strategy for finding the sum. This simple example illustrates the beauty of Dask: it automatically designs an algorithm appropriate for custom operations with big data.
If we make our operation more complex, the graph gets more complex:
```
z = darr.dot(darr.T).mean(axis=0)[::2, :].std(axis=1)
z
z.visualize()
```
### A Bigger Calculation
The examples above were toy examples; the data (180 MB) is probably not big enough to warrant the use of Dask.
We can make it a lot bigger! Let's create a new, big array
```
darr = da.random.random((8000, 100, 8000), chunks=(1000, 100, 500)).astype('float32')
darr
```
This dataset is `~23 GB`, rather than 32 MB! This is probably close to or greater than the amount of available RAM than you have in your computer. Nevertheless, Dask has no problem working on it.
```
z = (darr + darr.T)[::2, :].mean(axis=2)
z.visualize()
%%time
z.compute()
cluster.close()
client.close()
%load_ext watermark
%watermark --time --python --updated --iversion
```
---
## Summary
Dask Array does not implement the entire numpy interface. Users expecting this
will be disappointed. Notably Dask Array has the following failings:
1. Dask does not implement all of ``np.linalg``. This has been done by a
number of excellent BLAS/LAPACK implementations and is the focus of
numerous ongoing academic research projects.
2. Dask Array does not support some operations where the resulting shape
depends on the values of the array. For those that it does support
(for example, masking one Dask Array with another boolean mask),
the chunk sizes will be unknown, which may cause issues with other
operations that need to know the chunk sizes.
3. Dask Array does not attempt operations like ``sort`` which are notoriously
difficult to do in parallel and are of somewhat diminished value on very
large data (you rarely actually need a full sort).
Often we include parallel-friendly alternatives like ``topk``.
4. Dask development is driven by immediate need, and so many lesser used
functions, like ``np.sometrue`` have not been implemented purely out of
laziness. These would make excellent community contributions.
## Learn More
Visit the [Array documentation](https://docs.dask.org/en/latest/array.html). In particular, this [array screencast](https://youtu.be/9h_61hXCDuI) will reinforce the concepts you learned here.
```
from IPython.display import YouTubeVideo
YouTubeVideo(id="9h_61hXCDuI", width=600, height=300)
```
## Resources and references
* Reference
* [Dask Docs](https://dask.org/)
* [Dask Examples](https://examples.dask.org/)
* [Dask Code](https://github.com/dask/dask/)
* [Dask Blog](https://blog.dask.org/)
* [Xarray Docs](https://xarray.pydata.org/)
* Ask for help
* [`dask`](http://stackoverflow.com/questions/tagged/dask) tag on Stack Overflow, for usage questions
* [github discussions: dask](https://github.com/dask/dask/discussions) for general, non-bug, discussion, and usage questions
* [github issues: dask](https://github.com/dask/dask/issues/new) for bug reports and feature requests
* [github discussions: xarray](https://github.com/pydata/xarray/discussions) for general, non-bug, discussion, and usage questions
* [github issues: xarray](https://github.com/pydata/xarray/issues/new) for bug reports and feature requests
* Pieces of this notebook are adapted from the following sources
* https://github.com/dask/dask-tutorial/blob/main/03_array.ipynb
* https://github.com/xarray-contrib/xarray-tutorial/blob/master/scipy-tutorial/06_xarray_and_dask.ipynb
<div class="admonition alert alert-success">
<p class="title" style="font-weight:bold">Previous: <a href="./08-dask-delayed.ipynb">Dask Delayed</a></p>
<p class="title" style="font-weight:bold">Next: <a href="./10-dask-and-xarray.ipynb">Dask and Xarray</a></p>
</div>
|
github_jupyter
|
from dask.distributed import Client, LocalCluster
cluster = LocalCluster()
client = Client(cluster)
client
import dask
import dask.array as da
import numpy as np
from distributed.utils import format_bytes
shape = (600, 200, 200)
arr = np.random.random(shape)
arr
format_bytes(arr.nbytes)
darr = da.random.random(shape, chunks=(300, 100, 200))
darr
darr.visualize()
total = darr.sum()
total
total.visualize()
%%time
total.compute()
da.random.random(shape, chunks=(50, 200, 400)).sum().visualize()
z = darr.dot(darr.T).mean(axis=0)[::2, :].std(axis=1)
z
z.visualize()
darr = da.random.random((8000, 100, 8000), chunks=(1000, 100, 500)).astype('float32')
darr
z = (darr + darr.T)[::2, :].mean(axis=2)
z.visualize()
%%time
z.compute()
cluster.close()
client.close()
%load_ext watermark
%watermark --time --python --updated --iversion
from IPython.display import YouTubeVideo
YouTubeVideo(id="9h_61hXCDuI", width=600, height=300)
| 0.36977 | 0.988447 |
```
%matplotlib inline
```
# Tight Layout guide
How to use tight-layout to fit plots within your figure cleanly.
*tight_layout* automatically adjusts subplot params so that the
subplot(s) fits in to the figure area. This is an experimental
feature and may not work for some cases. It only checks the extents
of ticklabels, axis labels, and titles.
An alternative to *tight_layout* is :doc:`constrained_layout
</tutorials/intermediate/constrainedlayout_guide>`.
Simple Example
==============
In matplotlib, the location of axes (including subplots) are specified in
normalized figure coordinates. It can happen that your axis labels or
titles (or sometimes even ticklabels) go outside the figure area, and are thus
clipped.
```
# sphinx_gallery_thumbnail_number = 7
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['savefig.facecolor'] = "0.8"
def example_plot(ax, fontsize=12):
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
plt.close('all')
fig, ax = plt.subplots()
example_plot(ax, fontsize=24)
```
To prevent this, the location of axes needs to be adjusted. For
subplots, this can be done by adjusting the subplot params
(`howto-subplots-adjust`). Matplotlib v1.1 introduces a new
command :func:`~matplotlib.pyplot.tight_layout` that does this
automatically for you.
```
fig, ax = plt.subplots()
example_plot(ax, fontsize=24)
plt.tight_layout()
```
Note that :func:`matplotlib.pyplot.tight_layout` will only adjust the
subplot params when it is called. In order to perform this adjustment each
time the figure is redrawn, you can call ``fig.set_tight_layout(True)``, or,
equivalently, set the ``figure.autolayout`` rcParam to ``True``.
When you have multiple subplots, often you see labels of different
axes overlapping each other.
```
plt.close('all')
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
```
:func:`~matplotlib.pyplot.tight_layout` will also adjust spacing between
subplots to minimize the overlaps.
```
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
```
:func:`~matplotlib.pyplot.tight_layout` can take keyword arguments of
*pad*, *w_pad* and *h_pad*. These control the extra padding around the
figure border and between subplots. The pads are specified in fraction
of fontsize.
```
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
```
:func:`~matplotlib.pyplot.tight_layout` will work even if the sizes of
subplots are different as far as their grid specification is
compatible. In the example below, *ax1* and *ax2* are subplots of a 2x2
grid, while *ax3* is of a 1x2 grid.
```
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
```
It works with subplots created with
:func:`~matplotlib.pyplot.subplot2grid`. In general, subplots created
from the gridspec (:doc:`/tutorials/intermediate/gridspec`) will work.
```
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
```
Although not thoroughly tested, it seems to work for subplots with
aspect != "auto" (e.g., axes with images).
```
arr = np.arange(100).reshape((10, 10))
plt.close('all')
fig = plt.figure(figsize=(5, 4))
ax = plt.subplot(111)
im = ax.imshow(arr, interpolation="none")
plt.tight_layout()
```
Caveats
=======
* :func:`~matplotlib.pyplot.tight_layout` only considers ticklabels, axis
labels, and titles. Thus, other artists may be clipped and also may
overlap.
* It assumes that the extra space needed for ticklabels, axis labels,
and titles is independent of original location of axes. This is
often true, but there are rare cases where it is not.
* pad=0 clips some of the texts by a few pixels. This may be a bug or
a limitation of the current algorithm and it is not clear why it
happens. Meanwhile, use of pad at least larger than 0.3 is
recommended.
Use with GridSpec
=================
GridSpec has its own :func:`~matplotlib.gridspec.GridSpec.tight_layout` method
(the pyplot api :func:`~matplotlib.pyplot.tight_layout` also works).
```
import matplotlib.gridspec as gridspec
plt.close('all')
fig = plt.figure()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig)
```
You may provide an optional *rect* parameter, which specifies the bounding box
that the subplots will be fit inside. The coordinates must be in normalized
figure coordinates and the default is (0, 0, 1, 1).
```
fig = plt.figure()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
```
For example, this can be used for a figure with multiple gridspecs.
```
fig = plt.figure()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)
# We may try to match the top and bottom of two grids ::
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.update(top=top, bottom=bottom)
gs2.update(top=top, bottom=bottom)
plt.show()
```
While this should be mostly good enough, adjusting top and bottom
may require adjustment of hspace also. To update hspace & vspace, we
call :func:`~matplotlib.gridspec.GridSpec.tight_layout` again with updated
rect argument. Note that the rect argument specifies the area including the
ticklabels, etc. Thus, we will increase the bottom (which is 0 for the normal
case) by the difference between the *bottom* from above and the bottom of each
gridspec. Same thing for the top.
```
fig = plt.gcf()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.update(top=top, bottom=bottom)
gs2.update(top=top, bottom=bottom)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),
0.5, 1 - (gs1.top-top)])
gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),
None, 1 - (gs2.top-top)],
h_pad=0.5)
```
Legends and Annotations
=======================
Pre Matplotlib 2.2, legends and annotations were excluded from the bounding
box calculations that decide the layout. Subsequently these artists were
added to the calculation, but sometimes it is undesirable to include them.
For instance in this case it might be good to have the axes shring a bit
to make room for the legend:
```
fig, ax = plt.subplots(figsize=(4, 3))
lines = ax.plot(range(10), label='A simple plot')
ax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)
fig.tight_layout()
plt.show()
```
However, sometimes this is not desired (quite often when using
``fig.savefig('outname.png', bbox_inches='tight')``). In order to
remove the legend from the bounding box calculation, we simply set its
bounding ``leg.set_in_layout(False)`` and the legend will be ignored.
```
fig, ax = plt.subplots(figsize=(4, 3))
lines = ax.plot(range(10), label='B simple plot')
leg = ax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)
leg.set_in_layout(False)
fig.tight_layout()
plt.show()
```
Use with AxesGrid1
==================
While limited, :mod:`mpl_toolkits.axes_grid1` is also supported.
```
from mpl_toolkits.axes_grid1 import Grid
plt.close('all')
fig = plt.figure()
grid = Grid(fig, rect=111, nrows_ncols=(2, 2),
axes_pad=0.25, label_mode='L',
)
for ax in grid:
example_plot(ax)
ax.title.set_visible(False)
plt.tight_layout()
```
Colorbar
========
If you create a colorbar with the :func:`~matplotlib.pyplot.colorbar`
command, the created colorbar is an instance of Axes, *not* Subplot, so
tight_layout does not work. With Matplotlib v1.1, you may create a
colorbar as a subplot using the gridspec.
```
plt.close('all')
arr = np.arange(100).reshape((10, 10))
fig = plt.figure(figsize=(4, 4))
im = plt.imshow(arr, interpolation="none")
plt.colorbar(im, use_gridspec=True)
plt.tight_layout()
```
Another option is to use AxesGrid1 toolkit to
explicitly create an axes for colorbar.
```
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.close('all')
arr = np.arange(100).reshape((10, 10))
fig = plt.figure(figsize=(4, 4))
im = plt.imshow(arr, interpolation="none")
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im, cax=cax)
plt.tight_layout()
```
|
github_jupyter
|
%matplotlib inline
# sphinx_gallery_thumbnail_number = 7
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['savefig.facecolor'] = "0.8"
def example_plot(ax, fontsize=12):
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
plt.close('all')
fig, ax = plt.subplots()
example_plot(ax, fontsize=24)
fig, ax = plt.subplots()
example_plot(ax, fontsize=24)
plt.tight_layout()
plt.close('all')
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
arr = np.arange(100).reshape((10, 10))
plt.close('all')
fig = plt.figure(figsize=(5, 4))
ax = plt.subplot(111)
im = ax.imshow(arr, interpolation="none")
plt.tight_layout()
import matplotlib.gridspec as gridspec
plt.close('all')
fig = plt.figure()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig)
fig = plt.figure()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
fig = plt.figure()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)
# We may try to match the top and bottom of two grids ::
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.update(top=top, bottom=bottom)
gs2.update(top=top, bottom=bottom)
plt.show()
fig = plt.gcf()
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.5)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.update(top=top, bottom=bottom)
gs2.update(top=top, bottom=bottom)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),
0.5, 1 - (gs1.top-top)])
gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),
None, 1 - (gs2.top-top)],
h_pad=0.5)
fig, ax = plt.subplots(figsize=(4, 3))
lines = ax.plot(range(10), label='A simple plot')
ax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)
fig.tight_layout()
plt.show()
fig, ax = plt.subplots(figsize=(4, 3))
lines = ax.plot(range(10), label='B simple plot')
leg = ax.legend(bbox_to_anchor=(0.7, 0.5), loc='center left',)
leg.set_in_layout(False)
fig.tight_layout()
plt.show()
from mpl_toolkits.axes_grid1 import Grid
plt.close('all')
fig = plt.figure()
grid = Grid(fig, rect=111, nrows_ncols=(2, 2),
axes_pad=0.25, label_mode='L',
)
for ax in grid:
example_plot(ax)
ax.title.set_visible(False)
plt.tight_layout()
plt.close('all')
arr = np.arange(100).reshape((10, 10))
fig = plt.figure(figsize=(4, 4))
im = plt.imshow(arr, interpolation="none")
plt.colorbar(im, use_gridspec=True)
plt.tight_layout()
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.close('all')
arr = np.arange(100).reshape((10, 10))
fig = plt.figure(figsize=(4, 4))
im = plt.imshow(arr, interpolation="none")
divider = make_axes_locatable(plt.gca())
cax = divider.append_axes("right", "5%", pad="3%")
plt.colorbar(im, cax=cax)
plt.tight_layout()
| 0.598195 | 0.953057 |
<a href="https://colab.research.google.com/github/novoforce/Exploring-Tensorflow/blob/main/2_ANN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
```
# Data Preparation and Loading
```
(x_train,y_train), (x_test,y_test)= mnist.load_data()
print(x_train.shape,y_train.shape)
#reshaping the tensor shape so as to feed into the neural network
x_train= x_train.reshape(-1,28*28).astype('float32') / 255.0
x_test= x_test.reshape(-1,28*28).astype('float32') / 255.0
print('reshaped tensors:> ',x_train.shape,x_test.shape,type(x_train),type(x_test))
```
#Create the AI model (Sequential API)
It is very convinient to use but not flexible. If the model to be build has **1 input and 1 output** then the **Sequential API** is the way for creating the AI model **otherwise** we have to rely on **Functional API**
```
model= keras.Sequential(
[
layers.Dense(512,activation='relu'),
layers.Dense(256,activation='relu'),
layers.Dense(10),
]
)
```
# Compile the AI model
```
#since we are not having SOFTMAX defined so using 'from_logits=True'
# Difference between "sparce categorical crossentropy" and "categorical cross entropy" is that latter expects 1-hot encoded labels
# and the previous expects integer labels
model.compile(
loss= keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer= keras.optimizers.Adam(lr=0.001),
metrics=['accuracy']
)
```
# Train and Evaluate the model
```
model.fit(x_train,y_train,batch_size=32,epochs=5,verbose=2)
model.evaluate(x_test,y_test,batch_size=32,verbose=2)
```
# Summary of the model
Suppose we want to print the summary of the model and this can we done by passing the `keras.Input(shape=())` layers to the definition of the model and then using `model.summary()`
```
model= keras.Sequential(
[
keras.Input(shape=(28*28)),
layers.Dense(512,activation='relu'),
layers.Dense(256,activation='relu'),
layers.Dense(10),
]
)
print(model.summary())
```
# Another way of defining the sequential model
Helpful in debugging the layers as we can add `model.summary()` in between each layers and check the params info.
```
model= keras.Sequential()
model.add(keras.Input(shape=(28*28)))
model.summary() #<----------------------- similary fashion
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(256,activation='relu'))
model.summary() #<----------------------- similary fashion
model.add(layers.Dense(10))
```
# Functional API
This API can handle multiple input and the multiple output. So it's more flexible.
In the below example we will see the above model defined in the Functional API form
```
input= keras.Input(shape=(28*28))
x= layers.Dense(512,activation='relu')(input)
x= layers.Dense(256,activation='relu')(x)
output= layers.Dense(10,activation='softmax')(x)
model= keras.Model(inputs= input, outputs= output)
#compile the model
model.compile(
loss= keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer= keras.optimizers.Adam(lr=0.001),
metrics=['accuracy']
)
#fit the model and evaluate the model
model.fit(x_train,y_train,batch_size=32,epochs=5,verbose=2)
model.evaluate(x_test,y_test,batch_size=32,verbose=2)
```
# Custom naming of the layers
```
input= keras.Input(shape=(28*28),name='input_layer')
x= layers.Dense(512,activation='relu',name='first_layer')(input)
x= layers.Dense(256,activation='relu',name='second_layer')(x)
output= layers.Dense(10,activation='softmax',name='final_layer')(x)
model= keras.Model(inputs= input, outputs= output)
model.summary()
```
# How to get the output of the intermediate layer ?
```
model= keras.Sequential()
model.add(keras.Input(shape=(28*28)))
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(10))
model= keras.Model(inputs=model.inputs,outputs=[model.layers[-2].output]) #-2 is second last layer
feature= model.predict(x_train) #we get the feature(output of the -2 layer)
print(feature.shape)
#suppose we want to visualize the output of layer named 'check'
model= keras.Sequential()
model.add(keras.Input(shape=(28*28)))
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(256,activation='relu',name='check'))
model.add(layers.Dense(10))
model= keras.Model(inputs=model.inputs,outputs=[model.get_layer('check').output]) #-2 is second last layer
feature= model.predict(x_train) #we get the feature(output of the -2 layer)
print(feature.shape)
#suppose we want the output of all the layers
model= keras.Model(inputs=model.inputs,outputs=[layer.output for layer in model.layers]) #-2 is second last layer
features= model.predict(x_train) #we get the feature(output of the -2 layer)
for feature in features:
print(feature.shape)
```
|
github_jupyter
|
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
(x_train,y_train), (x_test,y_test)= mnist.load_data()
print(x_train.shape,y_train.shape)
#reshaping the tensor shape so as to feed into the neural network
x_train= x_train.reshape(-1,28*28).astype('float32') / 255.0
x_test= x_test.reshape(-1,28*28).astype('float32') / 255.0
print('reshaped tensors:> ',x_train.shape,x_test.shape,type(x_train),type(x_test))
model= keras.Sequential(
[
layers.Dense(512,activation='relu'),
layers.Dense(256,activation='relu'),
layers.Dense(10),
]
)
#since we are not having SOFTMAX defined so using 'from_logits=True'
# Difference between "sparce categorical crossentropy" and "categorical cross entropy" is that latter expects 1-hot encoded labels
# and the previous expects integer labels
model.compile(
loss= keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer= keras.optimizers.Adam(lr=0.001),
metrics=['accuracy']
)
model.fit(x_train,y_train,batch_size=32,epochs=5,verbose=2)
model.evaluate(x_test,y_test,batch_size=32,verbose=2)
model= keras.Sequential(
[
keras.Input(shape=(28*28)),
layers.Dense(512,activation='relu'),
layers.Dense(256,activation='relu'),
layers.Dense(10),
]
)
print(model.summary())
model= keras.Sequential()
model.add(keras.Input(shape=(28*28)))
model.summary() #<----------------------- similary fashion
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(256,activation='relu'))
model.summary() #<----------------------- similary fashion
model.add(layers.Dense(10))
input= keras.Input(shape=(28*28))
x= layers.Dense(512,activation='relu')(input)
x= layers.Dense(256,activation='relu')(x)
output= layers.Dense(10,activation='softmax')(x)
model= keras.Model(inputs= input, outputs= output)
#compile the model
model.compile(
loss= keras.losses.SparseCategoricalCrossentropy(from_logits=False),
optimizer= keras.optimizers.Adam(lr=0.001),
metrics=['accuracy']
)
#fit the model and evaluate the model
model.fit(x_train,y_train,batch_size=32,epochs=5,verbose=2)
model.evaluate(x_test,y_test,batch_size=32,verbose=2)
input= keras.Input(shape=(28*28),name='input_layer')
x= layers.Dense(512,activation='relu',name='first_layer')(input)
x= layers.Dense(256,activation='relu',name='second_layer')(x)
output= layers.Dense(10,activation='softmax',name='final_layer')(x)
model= keras.Model(inputs= input, outputs= output)
model.summary()
model= keras.Sequential()
model.add(keras.Input(shape=(28*28)))
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(256,activation='relu'))
model.add(layers.Dense(10))
model= keras.Model(inputs=model.inputs,outputs=[model.layers[-2].output]) #-2 is second last layer
feature= model.predict(x_train) #we get the feature(output of the -2 layer)
print(feature.shape)
#suppose we want to visualize the output of layer named 'check'
model= keras.Sequential()
model.add(keras.Input(shape=(28*28)))
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dense(256,activation='relu',name='check'))
model.add(layers.Dense(10))
model= keras.Model(inputs=model.inputs,outputs=[model.get_layer('check').output]) #-2 is second last layer
feature= model.predict(x_train) #we get the feature(output of the -2 layer)
print(feature.shape)
#suppose we want the output of all the layers
model= keras.Model(inputs=model.inputs,outputs=[layer.output for layer in model.layers]) #-2 is second last layer
features= model.predict(x_train) #we get the feature(output of the -2 layer)
for feature in features:
print(feature.shape)
| 0.806091 | 0.95452 |
# Rolling Update Tests
Check rolling updates function as expected.
```
import json
import time
```
Before we get started we'd like to make sure that we're making all the changes in a new blank namespace of the name `seldon`
```
!kubectl create namespace seldon
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
```
## Change Image
We'll want to try modifying an image and seeing how the rolling update performs the changes.
We'll first create the following model:
```
%%writefile resources/fixed_v1.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 3
```
Now we can run that model and wait until it's released
```
!kubectl apply -f resources/fixed_v1.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
```
Let's confirm that the state of the model is Available
```
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
Now we can modify the model by providing a new image name, using the following config file:
%%writefile resources/fixed_v2.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v2.yaml
```
Now let's actually send a couple of requests to make sure that there are no failed requests as the rolling update is performed
```
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 3:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v1.yaml
```
## Separate Service Orchestrator
We can test that the rolling update works when we use the annotation that allows us to have the service orchestrator on a separate pod, namely `seldon.io/engine-separate-pod: "true"`, as per the config file below:
```
%%writefile resources/fixed_v1_sep.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
annotations:
seldon.io/engine-separate-pod: "true"
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v1_sep.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
```
We can wait until the pod is available before starting the rolling update.
```
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
```
Now we can make a rolling update by changing the version of the docker image we will be updating it for.
```
%%writefile resources/fixed_v2_sep.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
annotations:
seldon.io/engine-separate-pod: "true"
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v2_sep.yaml
```
And we can send requests to confirm that the rolling update is performed without interruptions
```
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 1:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v1_sep.yaml
```
## Two PodSpecs
We can test that the rolling update works when we have multiple podSpecs in our deployment.
```
%%writefile resources/fixed_v1_2podspecs.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier1
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier2
graph:
name: classifier1
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v1_2podspecs.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
```
We can wait until the pod is available before starting the rolling update.
```
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
```
Now we can make a rolling update by changing the version of the docker image we will be updating it for.
```
%%writefile resources/fixed_v2_2podspecs.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier1
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier2
graph:
name: classifier1
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v2_2podspecs.yaml
```
And we can send requests to confirm that the rolling update is performed without interruptions
```
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 1:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v1_2podspecs.yaml
```
## Two Models
We can test that the rolling update works when we have two predictors / models in our deployment.
```
%%writefile resources/fixed_v1_2models.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
- image: seldonio/fixed-model:0.1
name: classifier2
graph:
name: classifier
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v1_2models.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
```
We can wait until the pod is available before starting the rolling update.
```
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
```
Now we can make a rolling update by changing the version of the docker image we will be updating it for.
```
%%writefile resources/fixed_v2_2models.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier
- image: seldonio/fixed-model:0.2
name: classifier2
graph:
name: classifier
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v2_2models.yaml
```
And we can send requests to confirm that the rolling update is performed without interruptions
```
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 3:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v2_2models.yaml
```
## Model name changes
This will not do a rolling update but create a new deployment.
```
%%writefile resources/fixed_v1.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v1.yaml
```
We can wait until the pod is available.
```
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
```
Now when we apply the update, we should see the change taking place, but there should not be an actual full rolling update triggered.
```
%%writefile resources/fixed_v2_new_name.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier2
graph:
name: classifier2
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v2_new_name.yaml
time.sleep(5)
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numItems = len(resources["items"])
if numItems == 1:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v2_new_name.yaml
```
|
github_jupyter
|
import json
import time
!kubectl create namespace seldon
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
%%writefile resources/fixed_v1.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v1.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
Now we can modify the model by providing a new image name, using the following config file:
%%writefile resources/fixed_v2.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v2.yaml
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 3:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v1.yaml
%%writefile resources/fixed_v1_sep.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
annotations:
seldon.io/engine-separate-pod: "true"
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v1_sep.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
%%writefile resources/fixed_v2_sep.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
annotations:
seldon.io/engine-separate-pod: "true"
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v2_sep.yaml
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 1:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v1_sep.yaml
%%writefile resources/fixed_v1_2podspecs.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier1
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier2
graph:
name: classifier1
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v1_2podspecs.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
%%writefile resources/fixed_v2_2podspecs.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier1
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier2
graph:
name: classifier1
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 1
!kubectl apply -f resources/fixed_v2_2podspecs.yaml
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 1:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v1_2podspecs.yaml
%%writefile resources/fixed_v1_2models.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
- image: seldonio/fixed-model:0.1
name: classifier2
graph:
name: classifier
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v1_2models.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
%%writefile resources/fixed_v2_2models.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier
- image: seldonio/fixed-model:0.2
name: classifier2
graph:
name: classifier
type: MODEL
children:
- name: classifier2
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v2_2models.yaml
time.sleep(5) # To allow operator to start the update
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numReplicas = int(resources["items"][0]["status"]["replicas"])
if numReplicas == 3:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v2_2models.yaml
%%writefile resources/fixed_v1.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.1
name: classifier
graph:
name: classifier
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v1.yaml
!kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=fixed \
-o jsonpath='{.items[0].metadata.name}')
for i in range(60):
state=!kubectl get sdep fixed -o jsonpath='{.status.state}'
state=state[0]
print(state)
if state=="Available":
break
time.sleep(1)
assert(state=="Available")
!curl -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' \
-X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions \
-H "Content-Type: application/json"
%%writefile resources/fixed_v2_new_name.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: fixed
spec:
name: fixed
protocol: seldon
transport: rest
predictors:
- componentSpecs:
- spec:
containers:
- image: seldonio/fixed-model:0.2
name: classifier2
graph:
name: classifier2
type: MODEL
name: default
replicas: 3
!kubectl apply -f resources/fixed_v2_new_name.yaml
time.sleep(5)
for i in range(120):
responseRaw=!curl -s -d '{"data": {"ndarray":[[1.0, 2.0, 5.0]]}}' -X POST http://localhost:8003/seldon/seldon/fixed/api/v1.0/predictions -H "Content-Type: application/json"
try:
response = json.loads(responseRaw[0])
except:
print("Failed to parse json",responseRaw)
continue
assert(response['data']['ndarray'][0]==1 or response['data']['ndarray'][0]==5)
jsonRaw=!kubectl get deploy -l seldon-deployment-id=fixed -o json
data="".join(jsonRaw)
resources = json.loads(data)
numItems = len(resources["items"])
if numItems == 1:
break
time.sleep(1)
print("Rollout Success")
!kubectl delete -f resources/fixed_v2_new_name.yaml
| 0.224565 | 0.870432 |
## Deploy a generic Flist
#### Requirements
In order to be able to deploy this example deployment you will have to have the following components activated
- the TF Grid SDK, in the form of a local container with the SDK, or a grid based SDK container. Getting started instuctions are [here](https://github.com/Threefoldfoundation/info_projectX/tree/development/doc/jumpscale_SDK)
- if you use a locally installed container with the 3Bot SDK you need to have the wireguard software installed. Instructions to how to get his installed on your platform could be found [here](https://www.wireguard.com/install/)
- capacity reservation are not free so you will need to have some ThreeFold_Tokens (TFT) to play around with. Instructions to get tokens could be found [here](https://github.com/Threefoldfoundation/info_projectX/blob/development/doc/jumpscale_SDK_information/payment/FreeTFT_testtoken.md)
After following these install instructions you should end up having a local, working TF Grid SDK installed. You could work / connect to the installed SDK as described [here](https://github.com/Threefoldfoundation/info_projectX/blob/development/doc/jumpscale_SDK/SDK_getting_started.md)
### Overview
The design a simple kubernetes cluster we need to follow a few steps:
- create (or identify and use) an overlay network that spans all of the nodes needed in the solution
- identify which nodes are involved in the kubernetes cluster, master and worker nodes
- create reservations for the kubernetes virtual machines.
- deploy the kubernetes cluster.
#### Create overlay network of identity an previously deployed overlay network
Each overlay network is private and contains private IP addresses. Each overlay network is deployed in such a way that is has no connection to the public (IPv4 or IPv6) network directly. In order to work with such a network a tunnel needs to be created between the overlay network on the grid and your local network. You could find instructions how to do that [here](https://github.com/Threefoldfoundation/info_projectX/blob/development/doc/jumpscale_SDK_examples/network/overlay_network.md)
#### Set up the capacity environment to find, reserve and configure
Make sure that your SDK points to the mainnet explorer for deploying this capacity example. Also make sure you have an identity loaded. The example code uses the default identity. Multiple identities could be stored in the TF Grid SDK. To check your available identities you could request the number of identities available for you by typing `j.tools.threebot.me` in the kosmos shell.
```
j.clients.explorer.default_addr_set('explorer.grid.tf')
# Which identities are available in you SDK
j.tools.threebot.me
# Make sure I have an identity (set default one for mainnet of testnet)
me = j.tools.threebot.me.default
# Load the zero-os sal and reate empty reservation method
zos = j.sal.zosv2
r = zos.reservation_create()
```
#### What is an Flist?
An Flist is a very special container images. One of the challenges with industry leading technologies like docker and kubernetes is that every node involved in an IT architecture has to have local copies of all of the images it needs to run as containers. These could either be base images on which specific modifications need to be made or they are specific images downloaded from the docker hub or a private image repository (enterprise use cases). Having these images exists on many different nodes requires these to be downloaded and maintained for version control and bug fixes. This is waistfull (many times the same image required storage space) and time consuming.
The Flist solves that issue by facilitating container images to be made available on fly to nodes that needs the content of a container image over the network from a so called hub. There is a public hub that serves images but the hub facility is open source and could be replicated for private or corporate usage. The hub could be found here: `http://hub.grid.tf`.
The Flist represents a very efficient way to distribute de-duped container image with a bandwidth optmised transfer process and increased security by signed files. For (a lot) more details please go here:
* generic description [here](https://github.com/Threefoldtech/0-Flist/blob/development/doc/Flist.md)
* GitHub repository [here](https://github.com/Threefoldtech/0-Flist)
* FreeFlow pages [article](http://freeflowpages.com/content/perma?id=9396)
On the public hub there is import functionality to import docker images and create Flists out of them. Another way to create your own tar archives and upload these to transform into Flists. More information with regards to creating, managing and using Flists could be found [here](https://hub.grid.tf/)
#### Select which Flist to deploy?
For this example we selected the code-server Flist in public hub. The code-server Flist is based on an open opensource software managed here: https://github.com/Microsoft/vscode. Its visual studio code providing a very feature rich coding and code management environment. The Flist could be found [here](https://hub.grid.tf/weynandkuijpers.3Bot/codercom-code-server-latest.Flist).
#### Node selection and parameters.
You have created a network in the network creation [notebook](https://github.com/Threefoldfoundation/info_projectX/blob/development/code/jupyter/SDK_examples/network/overlay_network.ipynb) with the following details:
```python
demo_ip_range="172.20.0.0/16"
demo_port=8030
demo_network_name="demo_network_name_01"
```
When you executed the reservation it also provided you with data on order number, node ID and private network range on the node. All the nodes in the network are connected peer2peer with a wireguard tunnel. On these nodes we could now launch the Flist. For this solution we will be using some of these nodes as master nodes and others as worker nodes. Using the ouput of the network reservation notebook to describe the high level design of the kubernetes cluster:
| Nr. | Location | Node ID. | IPV4 network | Function. |
|--------|---|---|---|---|
| 1 | Salzburg | 9kcLeTuseybGHGWw2YXvdu4kk2jZzyZCaCHV9t6Axqqx | 172.20.15.0/24 | Available |
| 2 | Salzburg | 3h4TKp11bNWjb2UemgrVwayuPnYcs2M1bccXvi3jPR2Y | 172.20.16.0/24 | Available |
| 3 | Salzburg | FUq4Sz7CdafZYV2qJmTe3Rs4U4fxtJFcnV6mPNgGbmRg | 172.20.17.0/24 | Available|
| 4 | Vienna | 9LmpYPBhnrL9VrboNmycJoGfGDjuaMNGsGQKeqrUMSii | 172.20.28.0/24 | Available |
| 5 | Vienna | 3FPB4fPoxw8WMHsqdLHamfXAdUrcRwdZY7hxsFQt3odL | 172.20.29.0/24 | Available |
| 6 | Vienna | CrgLXq3w2Pavr7XrVA7HweH6LJvLWnKPwUbttcNNgJX7 | 172.20.30.0/24 | Available |
The reservation for a general purpose Flist has the following structure
```python
zos.container.create(reservation=r,
node_id=string, # node_id to make the capacity reservation on and deploy the Flist
network_name=string, # network_name deployed on the node (node could have multiple private networks)
ip_address=string, # one IP address in the range of the chosen network_name on the node
Flist=string, # Flist of the container you want to install, htttp hub location.
interactive=Bolean, # True of False. When True the entrypoint start commend is ignored and a web interface to the coreX process will de started instead
cpu=integer, # number of logical cores
memory=interger, # number of mBs of memory
# env={}, # field for parameters like needed in the container environment
entrypoint=string) # start command to get the software running in the container
```
For more details and options please see [here](https://github.com/Threefoldtech/jumpscaleX_libs/blob/master/JumpscaleLibs/sal/zosv2/container.py)
Provinding the corect detais allows us to deploy the code-server container.
```
r = zos.reservation_create()
# Add data to method to what to deploy. Example is code server
zos.container.create(reservation=r,
node_id='CrgLXq3w2Pavr7XrVA7HweH6LJvLWnKPwUbttcNNgJX7', # one of the node_id that is part of the network
network_name=u_networkname, # this assume this network is already provisioned on the node
ip_address='172.20.30.11', # part of ip_range you reserved for your network xxx.xxx.1.10
Flist='https://hub.grid.tf/weynandkuijpers.3Bot/codercom-code-server-latest.Flist', # Flist of the container you want to install
interactive=True, # True only if corex_connect required, default false
cpu=4,
memory=4196,
# env={}, # field for parameters like config
entrypoint='/sbin/my_init')
```
Having defined registration structure `r` we could now deploy. In this example we deploy for 5 minutes (adapt if required
```
# methods needed to do time calculations
import time
# reserve until now + (x) seconds
expiration = j.data.time.epoch + (5*60)
# register the reservation
rid = zos.reservation_register(r, expiration, identity=me)
time.sleep(5)
# inspect the result of the reservation provisioning
result = zos.reservation_result(rid)
```
The reservation had the interactive flag set to True which means the container did not start the entrypoint container bootstrap command. It has created a secure web interface to the corX process where we could now manualy enter the container and start and stop processes. Access is provided through http (as the connection is an encrypted wireguard tunnel).
```
# See the coreX interface (should be empty)
https://172.20.30.11:7681/
# Start a bash shell through the coreX process manager
https://172.20.30.11:7681/api/process/start?arg[]=/bin/bash
# See the list of processes in coreX
https://172.20.30.11:7681/
# Click on the bash process to get shell access.
```
|
github_jupyter
|
j.clients.explorer.default_addr_set('explorer.grid.tf')
# Which identities are available in you SDK
j.tools.threebot.me
# Make sure I have an identity (set default one for mainnet of testnet)
me = j.tools.threebot.me.default
# Load the zero-os sal and reate empty reservation method
zos = j.sal.zosv2
r = zos.reservation_create()
demo_ip_range="172.20.0.0/16"
demo_port=8030
demo_network_name="demo_network_name_01"
zos.container.create(reservation=r,
node_id=string, # node_id to make the capacity reservation on and deploy the Flist
network_name=string, # network_name deployed on the node (node could have multiple private networks)
ip_address=string, # one IP address in the range of the chosen network_name on the node
Flist=string, # Flist of the container you want to install, htttp hub location.
interactive=Bolean, # True of False. When True the entrypoint start commend is ignored and a web interface to the coreX process will de started instead
cpu=integer, # number of logical cores
memory=interger, # number of mBs of memory
# env={}, # field for parameters like needed in the container environment
entrypoint=string) # start command to get the software running in the container
r = zos.reservation_create()
# Add data to method to what to deploy. Example is code server
zos.container.create(reservation=r,
node_id='CrgLXq3w2Pavr7XrVA7HweH6LJvLWnKPwUbttcNNgJX7', # one of the node_id that is part of the network
network_name=u_networkname, # this assume this network is already provisioned on the node
ip_address='172.20.30.11', # part of ip_range you reserved for your network xxx.xxx.1.10
Flist='https://hub.grid.tf/weynandkuijpers.3Bot/codercom-code-server-latest.Flist', # Flist of the container you want to install
interactive=True, # True only if corex_connect required, default false
cpu=4,
memory=4196,
# env={}, # field for parameters like config
entrypoint='/sbin/my_init')
# methods needed to do time calculations
import time
# reserve until now + (x) seconds
expiration = j.data.time.epoch + (5*60)
# register the reservation
rid = zos.reservation_register(r, expiration, identity=me)
time.sleep(5)
# inspect the result of the reservation provisioning
result = zos.reservation_result(rid)
# See the coreX interface (should be empty)
https://172.20.30.11:7681/
# Start a bash shell through the coreX process manager
https://172.20.30.11:7681/api/process/start?arg[]=/bin/bash
# See the list of processes in coreX
https://172.20.30.11:7681/
# Click on the bash process to get shell access.
| 0.71889 | 0.943295 |
```
import diff_classifier.aws as aws
import diff_classifier.utils as ut
import diff_classifier.msd as msd
import diff_classifier.features as ft
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import skimage.io as sio
import numpy.ma as ma
import pandas.util.testing as pdt
import numpy.testing as npt
import diff_classifier.msd as msd
folder = 'Gel_Studies/08_14_18_gel_validation'
file = 'Traj_100nm_XY01_0_1.csv'
#aws.download_s3('{}/{}'.format(folder, file), file, bucket_name='ccurtis.data')
Traj = ut.csv_to_pd(file)
data1 = {'Frame': [0, 1, 2, 3, 4, 0, 1, 2, 3, 4],
'Track_ID': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
'X': [5, 6, 7, 8, 9, 1, 2, 3, 4, 5],
'Y': [6, 7, 8, 9, 10, 2, 3, 4, 5, 6],
'Quality': [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
'SN_Ratio': [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
'Mean_Intensity': [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]}
df = pd.DataFrame(data=data1)
length = max(df['Frame']) + 1
testmsd = msd.all_msds2(df, frames=length)
testft = ft.calculate_features(testmsd, frame=3)
testft
np.nanmean(testdata[testdata['Track_ID']==1]['Mean_Intensity'].values)
try: testdata[testdata['Track_ID']==1]['MSDs'][7]
except: KeyError: print('NaN')
def calculate_features(dframe, framerate=1, frame=100):
"""Calculates multiple features from input MSD dataset and stores in pandas
dataframe.
Parameters
----------
dframe : pandas.core.frame.DataFrame
Output from msd.all_msds2. Must have at a minimum the following
columns:
Track_ID, Frame, X, Y, and MSDs.
framerate : int or float
Framerate of the input videos from which trajectories were calculated.
Required for accurate calculation of some features. Default is 1.
Possibly not required. Ignore if performing all calcuations without
units.
frame : int
Frame at which to calculate Deff
Returns
-------
datai: pandas.core.frame.DataFrame
Contains a row for each trajectory in dframe. Holds the following
features of each trajetory: Track_ID, alpha, D_fit, kurtosis,
asymmetry1, asymmetry2, asymmetry3, aspect ratio (AR), elongation,
boundedness, fractal dimension (fractal_dim), trappedness, efficiency,
straightness, MSD ratio, frames, X, and Y.
Examples
--------
See example outputs from individual feature functions.
"""
# Skeleton of Trajectory features metadata table.
# Builds entry for each unique Track ID.
holder = dframe.Track_ID.unique().astype(float)
die = {'Track_ID': holder,
'alpha': holder,
'D_fit': holder,
'kurtosis': holder,
'asymmetry1': holder,
'asymmetry2': holder,
'asymmetry3': holder,
'AR': holder,
'elongation': holder,
'boundedness': holder,
'fractal_dim': holder,
'trappedness': holder,
'efficiency': holder,
'straightness': holder,
'MSD_ratio': holder,
'frames': holder,
'X': holder,
'Y': holder,
'Quality': holder,
'Mean_Intensity': holder,
'SN_Ratio': holder,
'Deff': holder}
datai = pd.DataFrame(data=die)
trackids = dframe.Track_ID.unique()
partcount = trackids.shape[0]
for particle in range(0, partcount):
single_track_masked =\
dframe.loc[dframe['Track_ID'] ==
trackids[particle]].sort_values(['Track_ID', 'Frame'],
ascending=[
1,
1]).reset_index(drop=True)
single_track = unmask_track(single_track_masked)
(datai['alpha'][particle],
datai['D_fit'][particle]) = alpha_calc(single_track)
datai['kurtosis'][particle] = kurtosis(single_track)
(eig1, eig2, datai['asymmetry1'][particle],
datai['asymmetry2'][particle],
datai['asymmetry3'][particle]) = asymmetry(single_track)
(datai['AR'][particle], datai['elongation'][particle],
(datai['X'][particle],
datai['Y'][particle])) = aspectratio(single_track)
(datai['boundedness'][particle], datai['fractal_dim'][particle],
datai['trappedness'][particle]) = boundedness(single_track, framerate)
(datai['efficiency'][particle],
datai['straightness'][particle]) = efficiency(single_track)
datai['frames'][particle] = single_track.shape[0]
if single_track['Frame'][single_track.shape[0]-2] > 2:
datai['MSD_ratio'][particle] = msd_ratio(single_track, 2,
single_track['Frame'][
single_track.shape[0]-2])
else:
datai['MSD_ratio'][particle] = np.nan
try: datai['Deff'][particle] = single_track['MSDs'][frame] / (4*frame)
except datai['Deff'][particle] = np.nan
datai['Mean_Intensity'] = np.nanmean(single_track['Mean_Intensity'].values)
datai['Quality'] = np.nanmean(single_track['Quality'].values)
datai['SN_Ratio'] = np.nanmean(single_track['SN_Ratio'].values)
return datai
```
|
github_jupyter
|
import diff_classifier.aws as aws
import diff_classifier.utils as ut
import diff_classifier.msd as msd
import diff_classifier.features as ft
import numpy as np
import pandas as pd
import pandas as pd
import numpy as np
import skimage.io as sio
import numpy.ma as ma
import pandas.util.testing as pdt
import numpy.testing as npt
import diff_classifier.msd as msd
folder = 'Gel_Studies/08_14_18_gel_validation'
file = 'Traj_100nm_XY01_0_1.csv'
#aws.download_s3('{}/{}'.format(folder, file), file, bucket_name='ccurtis.data')
Traj = ut.csv_to_pd(file)
data1 = {'Frame': [0, 1, 2, 3, 4, 0, 1, 2, 3, 4],
'Track_ID': [1, 1, 1, 1, 1, 2, 2, 2, 2, 2],
'X': [5, 6, 7, 8, 9, 1, 2, 3, 4, 5],
'Y': [6, 7, 8, 9, 10, 2, 3, 4, 5, 6],
'Quality': [10, 10, 10, 10, 10, 10, 10, 10, 10, 10],
'SN_Ratio': [0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
'Mean_Intensity': [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]}
df = pd.DataFrame(data=data1)
length = max(df['Frame']) + 1
testmsd = msd.all_msds2(df, frames=length)
testft = ft.calculate_features(testmsd, frame=3)
testft
np.nanmean(testdata[testdata['Track_ID']==1]['Mean_Intensity'].values)
try: testdata[testdata['Track_ID']==1]['MSDs'][7]
except: KeyError: print('NaN')
def calculate_features(dframe, framerate=1, frame=100):
"""Calculates multiple features from input MSD dataset and stores in pandas
dataframe.
Parameters
----------
dframe : pandas.core.frame.DataFrame
Output from msd.all_msds2. Must have at a minimum the following
columns:
Track_ID, Frame, X, Y, and MSDs.
framerate : int or float
Framerate of the input videos from which trajectories were calculated.
Required for accurate calculation of some features. Default is 1.
Possibly not required. Ignore if performing all calcuations without
units.
frame : int
Frame at which to calculate Deff
Returns
-------
datai: pandas.core.frame.DataFrame
Contains a row for each trajectory in dframe. Holds the following
features of each trajetory: Track_ID, alpha, D_fit, kurtosis,
asymmetry1, asymmetry2, asymmetry3, aspect ratio (AR), elongation,
boundedness, fractal dimension (fractal_dim), trappedness, efficiency,
straightness, MSD ratio, frames, X, and Y.
Examples
--------
See example outputs from individual feature functions.
"""
# Skeleton of Trajectory features metadata table.
# Builds entry for each unique Track ID.
holder = dframe.Track_ID.unique().astype(float)
die = {'Track_ID': holder,
'alpha': holder,
'D_fit': holder,
'kurtosis': holder,
'asymmetry1': holder,
'asymmetry2': holder,
'asymmetry3': holder,
'AR': holder,
'elongation': holder,
'boundedness': holder,
'fractal_dim': holder,
'trappedness': holder,
'efficiency': holder,
'straightness': holder,
'MSD_ratio': holder,
'frames': holder,
'X': holder,
'Y': holder,
'Quality': holder,
'Mean_Intensity': holder,
'SN_Ratio': holder,
'Deff': holder}
datai = pd.DataFrame(data=die)
trackids = dframe.Track_ID.unique()
partcount = trackids.shape[0]
for particle in range(0, partcount):
single_track_masked =\
dframe.loc[dframe['Track_ID'] ==
trackids[particle]].sort_values(['Track_ID', 'Frame'],
ascending=[
1,
1]).reset_index(drop=True)
single_track = unmask_track(single_track_masked)
(datai['alpha'][particle],
datai['D_fit'][particle]) = alpha_calc(single_track)
datai['kurtosis'][particle] = kurtosis(single_track)
(eig1, eig2, datai['asymmetry1'][particle],
datai['asymmetry2'][particle],
datai['asymmetry3'][particle]) = asymmetry(single_track)
(datai['AR'][particle], datai['elongation'][particle],
(datai['X'][particle],
datai['Y'][particle])) = aspectratio(single_track)
(datai['boundedness'][particle], datai['fractal_dim'][particle],
datai['trappedness'][particle]) = boundedness(single_track, framerate)
(datai['efficiency'][particle],
datai['straightness'][particle]) = efficiency(single_track)
datai['frames'][particle] = single_track.shape[0]
if single_track['Frame'][single_track.shape[0]-2] > 2:
datai['MSD_ratio'][particle] = msd_ratio(single_track, 2,
single_track['Frame'][
single_track.shape[0]-2])
else:
datai['MSD_ratio'][particle] = np.nan
try: datai['Deff'][particle] = single_track['MSDs'][frame] / (4*frame)
except datai['Deff'][particle] = np.nan
datai['Mean_Intensity'] = np.nanmean(single_track['Mean_Intensity'].values)
datai['Quality'] = np.nanmean(single_track['Quality'].values)
datai['SN_Ratio'] = np.nanmean(single_track['SN_Ratio'].values)
return datai
| 0.546738 | 0.624379 |
# TimeEval parameter optimization result analysis (Part 4)
```
# imports
import json
import warnings
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from pathlib import Path
from timeeval import Datasets
```
## Configuration
Define data and results folder:
```
# constants and configuration
data_path = Path("/home/projects/akita/data") / "test-cases"
result_root_path = Path("/home/projects/akita/results")
experiment_result_folder = "2021-10-11_optim-part4"
# build paths
result_paths = [d for d in result_root_path.iterdir() if d.is_dir()]
print("Available result directories:")
display(result_paths)
result_path = result_root_path / experiment_result_folder
print("\nSelecting:")
print(f"Data path: {data_path.resolve()}")
print(f"Result path: {result_path.resolve()}")
```
Load results and dataset metadata:
```
# load results
print(f"Reading results from {result_path.resolve()}")
df = pd.read_csv(result_path / "results.csv")
# add dataset_name column
df["dataset_name"] = df["dataset"].str.split(".").str[0]
# load dataset metadata
dmgr = Datasets(data_path)
```
Extract target optimized parameter names that were iterated in this run (per algorithm):
```
algo_param_mapping = {}
algorithms = df["algorithm"].unique()
param_ignore_list = ["max_anomaly_window_size", "anomaly_window_size", "neighbourhood_size", "window_size", "n_init_train", "embed_dim_range"]
for algo in algorithms:
param_sets = df.loc[df["algorithm"] == algo, "hyper_params"].unique()
param_sets = [json.loads(ps) for ps in param_sets]
param_names = np.unique([name for ps in param_sets for name in ps if name not in param_ignore_list])
search_space = set()
for param_name in param_names:
values = []
for ps in param_sets:
try:
values.append(ps[param_name])
except:
pass
values = np.unique(values)
if values.shape[0] > 1:
search_space.add(param_name)
algo_param_mapping[algo] = list(search_space)
for algo in algo_param_mapping:
print(algo, algo_param_mapping[algo])
```
Extract optimized parameters and their values (columns: optim_param_name and optim_param_value) for each experiment:
```
def extract_hyper_params(algo):
param_names = algo_param_mapping[algo]
def extract(value):
params = json.loads(value)
result = None
for name in param_names:
try:
value = params[name]
if isinstance(value, list):
value = repr(value)
result = pd.Series([name, value], index=["optim_param_name", "optim_param_value"])
break
except KeyError:
pass
if result is None:
return pd.Series([np.nan, np.nan], index=["optim_param_name", "optim_param_value"])
return result
return extract
df[["optim_param_name", "optim_param_value"]] = ""
for algo in algo_param_mapping:
df_algo = df.loc[df["algorithm"] == algo]
df.loc[df_algo.index, ["optim_param_name", "optim_param_value"]] = df_algo["hyper_params"].apply(extract_hyper_params(algo))
```
Extract window size parameters (dependent params) and convert them into multiples of the dataset period size:
```
dependent_param_names = ["neighbourhood_size", "window_size"]
def extract_window_param(value, param_name=""):
params = json.loads(value)
try:
return params[param_name]
except KeyError:
return 0
for param_name in dependent_param_names:
s_windows = df["hyper_params"].apply(extract_window_param, param_name=param_name)
df2 = df[s_windows > 0][["dataset"]].copy()
df2[param_name] = s_windows[df2.index]
df2["period_size"] = df2["dataset"].apply(lambda d: dmgr.get(("GutenTAG", d)).period_size)
df2["optim_param_name"] = param_name
df2["optim_param_value"] = df2[param_name] / df2["period_size"]
df2["optim_param_value"] = (df2["optim_param_value"]
.fillna(df2[param_name])
.round(1)
.replace(50., 0.5)
.replace(100, 1.0)
.replace(150, 1.5)
.replace(200, 2.0))
df.loc[df2.index, ["optim_param_name", "optim_param_value"]] = df2[["optim_param_name", "optim_param_value"]]
```
Define utility functions
```
def load_scores_df(algorithm_name, dataset_id, optim_params, repetition=1):
params_id = df.loc[(df["algorithm"] == algorithm_name) & (df["collection"] == dataset_id[0]) & (df["dataset"] == dataset_id[1]) & (df["optim_param_name"] == optim_params[0]) & (df["optim_param_value"] == optim_params[1]), "hyper_params_id"].item()
path = (
result_path /
algorithm_name /
params_id /
dataset_id[0] /
dataset_id[1] /
str(repetition) /
"anomaly_scores.ts"
)
return pd.read_csv(path, header=None)
```
Define plotting functions:
```
default_use_plotly = True
try:
import plotly.offline
except ImportError:
default_use_plotly = False
def plot_scores(algorithm_name, dataset_name, use_plotly: bool = default_use_plotly, **kwargs):
if isinstance(algorithm_name, tuple):
algorithms = [algorithm_name]
elif not isinstance(algorithm_name, list):
raise ValueError("Please supply a tuple (algorithm_name, optim_param_name, optim_param_value) or a list thereof as first argument!")
else:
algorithms = algorithm_name
# construct dataset ID
dataset_id = ("GutenTAG", f"{dataset_name}.unsupervised")
# load dataset details
df_dataset = dmgr.get_dataset_df(dataset_id)
# check if dataset is multivariate
dataset_dim = df.loc[df["dataset_name"] == dataset_name, "dataset_input_dimensionality"].unique().item()
dataset_dim = dataset_dim.lower()
auroc = {}
df_scores = pd.DataFrame(index=df_dataset.index)
skip_algos = []
algos = []
for algo, optim_param_name, optim_param_value in algorithms:
optim_params = f"{optim_param_name}={optim_param_value}"
algos.append((algo, optim_params))
# get algorithm metric results
try:
auroc[(algo, optim_params)] = df.loc[
(df["algorithm"] == algo) & (df["dataset_name"] == dataset_name) & (df["optim_param_name"] == optim_param_name) & (df["optim_param_value"] == optim_param_value),
"ROC_AUC"
].item()
except ValueError:
warnings.warn(f"No ROC_AUC score found! Probably {algo} with params {optim_params} was not executed on {dataset_name}.")
auroc[(algo, optim_params)] = -1
skip_algos.append((algo, optim_params))
continue
# load scores
training_type = df.loc[df["algorithm"] == algo, "algo_training_type"].values[0].lower().replace("_", "-")
try:
df_scores[(algo, optim_params)] = load_scores_df(algo, ("GutenTAG", f"{dataset_name}.{training_type}"), (optim_param_name, optim_param_value)).iloc[:, 0]
except (ValueError, FileNotFoundError):
warnings.warn(f"No anomaly scores found! Probably {algo} was not executed on {dataset_name} with params {optim_params}.")
df_scores[(algo, optim_params)] = np.nan
skip_algos.append((algo, optim_params))
algorithms = [a for a in algos if a not in skip_algos]
if use_plotly:
return plot_scores_plotly(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs)
else:
return plot_scores_plt(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs)
def plot_scores_plotly(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs):
import plotly.offline as py
import plotly.graph_objects as go
import plotly.figure_factory as ff
import plotly.express as px
from plotly.subplots import make_subplots
# Create plot
fig = make_subplots(2, 1)
if dataset_dim == "multivariate":
for i in range(1, df_dataset.shape[1]-1):
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, i], name=f"channel-{i}"), 1, 1)
else:
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, 1], name="timeseries"), 1, 1)
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset["is_anomaly"], name="label"), 2, 1)
for item in algorithms:
algo, optim_params = item
fig.add_trace(go.Scatter(x=df_scores.index, y=df_scores[item], name=f"{algo}={auroc[item]:.4f} ({optim_params})"), 2, 1)
fig.update_xaxes(matches="x")
fig.update_layout(
title=f"Results of {','.join(np.unique([a for a, _ in algorithms]))} on {dataset_name}",
height=400
)
return py.iplot(fig)
def plot_scores_plt(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs):
import matplotlib.pyplot as plt
# Create plot
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(20, 8))
if dataset_dim == "multivariate":
for i in range(1, df_dataset.shape[1]-1):
axs[0].plot(df_dataset.index, df_dataset.iloc[:, i], label=f"channel-{i}")
else:
axs[0].plot(df_dataset.index, df_dataset.iloc[:, 1], label=f"timeseries")
axs[1].plot(df_dataset.index, df_dataset["is_anomaly"], label="label")
for item in algorithms:
algo, optim_params = item
axs[1].plot(df_scores.index, df_scores[item], label=f"{algo}={auroc[item]:.4f} ({optim_params})")
axs[0].legend()
axs[1].legend()
fig.suptitle(f"Results of {','.join(np.unique([a for a, _ in algorithms]))} on {dataset_name}")
fig.tight_layout()
return fig
```
## Parameter assessment
```
sort_by = ("ROC_AUC", "mean")
metric_agg_type = ["min", "mean", "median"]
time_agg_type = "mean"
aggs = {
"PR_AUC": metric_agg_type,
"ROC_AUC": metric_agg_type,
"train_main_time": time_agg_type,
"execute_main_time": time_agg_type,
"repetition": "count"
}
df_tmp = df.reset_index()
df_tmp = df_tmp.groupby(by=["algorithm", "optim_param_name", "optim_param_value"]).agg(aggs)
df_tmp = df_tmp.reset_index()
df_tmp = df_tmp.sort_values(by=["algorithm", "optim_param_name", sort_by], ascending=False)
df_tmp = df_tmp.set_index(["algorithm", "optim_param_name", "optim_param_value"])
with pd.option_context("display.max_rows", None, "display.max_columns", None):
display(df_tmp)
```
#### Selected parameters
- SAND: **Check why it failed on so many datasets!**
- Random Black Forest (RR): **Only TIMEOUTs** --> perform search over n_estimators, n_trees, and bootstrap again
- PhaseSpace-SVM: `coef0=0,tol=0.001,project_phasespace=True,gamma=1.0,nu=0.35,degree=3,kernel="rbf"` (tol, coef0: make no difference; nu: smaller is better; gamma: 0.8 is very slightly better for ROC_AUC, but worse for PR_AUC; degree: makes no difference)
- PST: `window_size="1.0 * dataset period size",sim=` **wrong parameter format** `sim` is case-sensitive!
- PCI: `window_size="0.5 * dataset period size"` (smaller is better)
```
df[df["algorithm"] == "SAND"].groupby(by=["status"])[["dataset"]].count()
df[df["algorithm"] == "Random Black Forest (RR)"].groupby(by=["status", "hyper_params"])[["dataset"]].count()
plot_scores([
("PST", "window_size", 1),
("PST", "window_size", 1.5)
], "rw-combined-diff-2", use_plotly=False)
plt.show()
```
|
github_jupyter
|
# imports
import json
import warnings
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from pathlib import Path
from timeeval import Datasets
# constants and configuration
data_path = Path("/home/projects/akita/data") / "test-cases"
result_root_path = Path("/home/projects/akita/results")
experiment_result_folder = "2021-10-11_optim-part4"
# build paths
result_paths = [d for d in result_root_path.iterdir() if d.is_dir()]
print("Available result directories:")
display(result_paths)
result_path = result_root_path / experiment_result_folder
print("\nSelecting:")
print(f"Data path: {data_path.resolve()}")
print(f"Result path: {result_path.resolve()}")
# load results
print(f"Reading results from {result_path.resolve()}")
df = pd.read_csv(result_path / "results.csv")
# add dataset_name column
df["dataset_name"] = df["dataset"].str.split(".").str[0]
# load dataset metadata
dmgr = Datasets(data_path)
algo_param_mapping = {}
algorithms = df["algorithm"].unique()
param_ignore_list = ["max_anomaly_window_size", "anomaly_window_size", "neighbourhood_size", "window_size", "n_init_train", "embed_dim_range"]
for algo in algorithms:
param_sets = df.loc[df["algorithm"] == algo, "hyper_params"].unique()
param_sets = [json.loads(ps) for ps in param_sets]
param_names = np.unique([name for ps in param_sets for name in ps if name not in param_ignore_list])
search_space = set()
for param_name in param_names:
values = []
for ps in param_sets:
try:
values.append(ps[param_name])
except:
pass
values = np.unique(values)
if values.shape[0] > 1:
search_space.add(param_name)
algo_param_mapping[algo] = list(search_space)
for algo in algo_param_mapping:
print(algo, algo_param_mapping[algo])
def extract_hyper_params(algo):
param_names = algo_param_mapping[algo]
def extract(value):
params = json.loads(value)
result = None
for name in param_names:
try:
value = params[name]
if isinstance(value, list):
value = repr(value)
result = pd.Series([name, value], index=["optim_param_name", "optim_param_value"])
break
except KeyError:
pass
if result is None:
return pd.Series([np.nan, np.nan], index=["optim_param_name", "optim_param_value"])
return result
return extract
df[["optim_param_name", "optim_param_value"]] = ""
for algo in algo_param_mapping:
df_algo = df.loc[df["algorithm"] == algo]
df.loc[df_algo.index, ["optim_param_name", "optim_param_value"]] = df_algo["hyper_params"].apply(extract_hyper_params(algo))
dependent_param_names = ["neighbourhood_size", "window_size"]
def extract_window_param(value, param_name=""):
params = json.loads(value)
try:
return params[param_name]
except KeyError:
return 0
for param_name in dependent_param_names:
s_windows = df["hyper_params"].apply(extract_window_param, param_name=param_name)
df2 = df[s_windows > 0][["dataset"]].copy()
df2[param_name] = s_windows[df2.index]
df2["period_size"] = df2["dataset"].apply(lambda d: dmgr.get(("GutenTAG", d)).period_size)
df2["optim_param_name"] = param_name
df2["optim_param_value"] = df2[param_name] / df2["period_size"]
df2["optim_param_value"] = (df2["optim_param_value"]
.fillna(df2[param_name])
.round(1)
.replace(50., 0.5)
.replace(100, 1.0)
.replace(150, 1.5)
.replace(200, 2.0))
df.loc[df2.index, ["optim_param_name", "optim_param_value"]] = df2[["optim_param_name", "optim_param_value"]]
def load_scores_df(algorithm_name, dataset_id, optim_params, repetition=1):
params_id = df.loc[(df["algorithm"] == algorithm_name) & (df["collection"] == dataset_id[0]) & (df["dataset"] == dataset_id[1]) & (df["optim_param_name"] == optim_params[0]) & (df["optim_param_value"] == optim_params[1]), "hyper_params_id"].item()
path = (
result_path /
algorithm_name /
params_id /
dataset_id[0] /
dataset_id[1] /
str(repetition) /
"anomaly_scores.ts"
)
return pd.read_csv(path, header=None)
default_use_plotly = True
try:
import plotly.offline
except ImportError:
default_use_plotly = False
def plot_scores(algorithm_name, dataset_name, use_plotly: bool = default_use_plotly, **kwargs):
if isinstance(algorithm_name, tuple):
algorithms = [algorithm_name]
elif not isinstance(algorithm_name, list):
raise ValueError("Please supply a tuple (algorithm_name, optim_param_name, optim_param_value) or a list thereof as first argument!")
else:
algorithms = algorithm_name
# construct dataset ID
dataset_id = ("GutenTAG", f"{dataset_name}.unsupervised")
# load dataset details
df_dataset = dmgr.get_dataset_df(dataset_id)
# check if dataset is multivariate
dataset_dim = df.loc[df["dataset_name"] == dataset_name, "dataset_input_dimensionality"].unique().item()
dataset_dim = dataset_dim.lower()
auroc = {}
df_scores = pd.DataFrame(index=df_dataset.index)
skip_algos = []
algos = []
for algo, optim_param_name, optim_param_value in algorithms:
optim_params = f"{optim_param_name}={optim_param_value}"
algos.append((algo, optim_params))
# get algorithm metric results
try:
auroc[(algo, optim_params)] = df.loc[
(df["algorithm"] == algo) & (df["dataset_name"] == dataset_name) & (df["optim_param_name"] == optim_param_name) & (df["optim_param_value"] == optim_param_value),
"ROC_AUC"
].item()
except ValueError:
warnings.warn(f"No ROC_AUC score found! Probably {algo} with params {optim_params} was not executed on {dataset_name}.")
auroc[(algo, optim_params)] = -1
skip_algos.append((algo, optim_params))
continue
# load scores
training_type = df.loc[df["algorithm"] == algo, "algo_training_type"].values[0].lower().replace("_", "-")
try:
df_scores[(algo, optim_params)] = load_scores_df(algo, ("GutenTAG", f"{dataset_name}.{training_type}"), (optim_param_name, optim_param_value)).iloc[:, 0]
except (ValueError, FileNotFoundError):
warnings.warn(f"No anomaly scores found! Probably {algo} was not executed on {dataset_name} with params {optim_params}.")
df_scores[(algo, optim_params)] = np.nan
skip_algos.append((algo, optim_params))
algorithms = [a for a in algos if a not in skip_algos]
if use_plotly:
return plot_scores_plotly(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs)
else:
return plot_scores_plt(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs)
def plot_scores_plotly(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs):
import plotly.offline as py
import plotly.graph_objects as go
import plotly.figure_factory as ff
import plotly.express as px
from plotly.subplots import make_subplots
# Create plot
fig = make_subplots(2, 1)
if dataset_dim == "multivariate":
for i in range(1, df_dataset.shape[1]-1):
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, i], name=f"channel-{i}"), 1, 1)
else:
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset.iloc[:, 1], name="timeseries"), 1, 1)
fig.add_trace(go.Scatter(x=df_dataset.index, y=df_dataset["is_anomaly"], name="label"), 2, 1)
for item in algorithms:
algo, optim_params = item
fig.add_trace(go.Scatter(x=df_scores.index, y=df_scores[item], name=f"{algo}={auroc[item]:.4f} ({optim_params})"), 2, 1)
fig.update_xaxes(matches="x")
fig.update_layout(
title=f"Results of {','.join(np.unique([a for a, _ in algorithms]))} on {dataset_name}",
height=400
)
return py.iplot(fig)
def plot_scores_plt(algorithms, auroc, df_scores, df_dataset, dataset_dim, dataset_name, **kwargs):
import matplotlib.pyplot as plt
# Create plot
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(20, 8))
if dataset_dim == "multivariate":
for i in range(1, df_dataset.shape[1]-1):
axs[0].plot(df_dataset.index, df_dataset.iloc[:, i], label=f"channel-{i}")
else:
axs[0].plot(df_dataset.index, df_dataset.iloc[:, 1], label=f"timeseries")
axs[1].plot(df_dataset.index, df_dataset["is_anomaly"], label="label")
for item in algorithms:
algo, optim_params = item
axs[1].plot(df_scores.index, df_scores[item], label=f"{algo}={auroc[item]:.4f} ({optim_params})")
axs[0].legend()
axs[1].legend()
fig.suptitle(f"Results of {','.join(np.unique([a for a, _ in algorithms]))} on {dataset_name}")
fig.tight_layout()
return fig
sort_by = ("ROC_AUC", "mean")
metric_agg_type = ["min", "mean", "median"]
time_agg_type = "mean"
aggs = {
"PR_AUC": metric_agg_type,
"ROC_AUC": metric_agg_type,
"train_main_time": time_agg_type,
"execute_main_time": time_agg_type,
"repetition": "count"
}
df_tmp = df.reset_index()
df_tmp = df_tmp.groupby(by=["algorithm", "optim_param_name", "optim_param_value"]).agg(aggs)
df_tmp = df_tmp.reset_index()
df_tmp = df_tmp.sort_values(by=["algorithm", "optim_param_name", sort_by], ascending=False)
df_tmp = df_tmp.set_index(["algorithm", "optim_param_name", "optim_param_value"])
with pd.option_context("display.max_rows", None, "display.max_columns", None):
display(df_tmp)
df[df["algorithm"] == "SAND"].groupby(by=["status"])[["dataset"]].count()
df[df["algorithm"] == "Random Black Forest (RR)"].groupby(by=["status", "hyper_params"])[["dataset"]].count()
plot_scores([
("PST", "window_size", 1),
("PST", "window_size", 1.5)
], "rw-combined-diff-2", use_plotly=False)
plt.show()
| 0.426202 | 0.841077 |
<img align="right" src="images/tf-small.png" width="128"/>
<img align="right" src="images/etcbc.png"/>
<img align="right" src="images/dans-small.png"/>
You might want to consider the [start](search.ipynb) of this tutorial.
Short introductions to other TF datasets:
* [Dead Sea Scrolls](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/dss.ipynb),
* [Old Babylonian Letters](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/oldbabylonian.ipynb),
or the
* [Q'uran](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/quran.ipynb)
# Export to Emdros MQL
[EMDROS](http://emdros.org), written by Ulrik Petersen,
is a text database system with the powerful *topographic* query language MQL.
The ideas are based on a model devised by Christ-Jan Doedens in
[Text Databases: One Database Model and Several Retrieval Languages](https://books.google.nl/books?id=9ggOBRz1dO4C).
Text-Fabric's model of slots, nodes and edges is a fairly straightforward translation of the models of Christ-Jan Doedens and Ulrik Petersen.
[SHEBANQ](https://shebanq.ancient-data.org) uses EMDROS to offer users to execute and save MQL queries against the Hebrew Text Database of the ETCBC.
So it is kind of logical and convenient to be able to work with a Text-Fabric resource through MQL.
If you have obtained an MQL dataset somehow, you can turn it into a text-fabric data set by `importMQL()`,
which we will not show here.
And if you want to export a Text-Fabric data set to MQL, that is also possible.
After the `Fabric(modules=...)` call, you can call `exportMQL()` in order to save all features of the
indicated modules into a big MQL dump, which can be imported by an EMDROS database.
```
%load_ext autoreload
%autoreload 2
```
# Incantation
The ins and outs of installing Text-Fabric, getting the corpus, and initializing a notebook are
explained in the [start tutorial](start.ipynb).
```
from tf.app import use
A = use("etcbc/bhsa", hoist=globals())
TF.exportMQL("mybhsa", "~/Downloads")
```
Now you have a file `~/Downloads/mybhsa.mql` of 530 MB.
You can import it into an Emdros database by saying:
cd ~/Downloads
rm mybhsa.mql
mql -b 3 < mybhsa.mql
The result is an SQLite3 database `mybhsa` in the same directory (168 MB).
You can run a query against it by creating a text file test.mql with this contents:
select all objects where
[lex gloss ~ 'make'
[word FOCUS]
]
And then say
mql -b 3 -d mybhsa test.mql
You will see raw query results: all word occurrences that belong to lexemes with `make` in their gloss.
It is not very pretty, and probably you should use a more visual Emdros tool to run those queries.
You see a lot of node numbers, but the good thing is, you can look those node numbers up in Text-Fabric.
# All steps
* **[start](start.ipynb)** your first step in mastering the bible computationally
* **[display](display.ipynb)** become an expert in creating pretty displays of your text structures
* **[search](search.ipynb)** turbo charge your hand-coding with search templates
* **[exportExcel](exportExcel.ipynb)** make tailor-made spreadsheets out of your results
* **[share](share.ipynb)** draw in other people's data and let them use yours
* **export** export your dataset as an Emdros database
* **[annotate](annotate.ipynb)** annotate plain text by means of other tools and import the annotations as TF features
* **[map](map.ipynb)** map somebody else's annotations to a new version of the corpus
* **[volumes](volumes.ipynb)** work with selected books only
* **[trees](trees.ipynb)** work with the BHSA data as syntax trees
CC-BY Dirk Roorda
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
from tf.app import use
A = use("etcbc/bhsa", hoist=globals())
TF.exportMQL("mybhsa", "~/Downloads")
| 0.337968 | 0.985014 |
# An sbpy Example
Imagine you're a graduate student and your mentor has some very urgent tasks for you:
1. Here is a list of asteroids. Check which of those are observable tonight from Maunakea and send that list to our observer: 1 Ceres, 10, 3200 Phaethon, 3552 Don Quixote, 12893 (1998 QS55), 12345 (1993 FT8), 230 Athamantis, 4500, 3456, 135065
2. Once we have the data in hand, check if either of these objects is brighter than it should be.
```
from sbpy import bib
bib.track()
```
## 1. Checking observability
```
asteroids =['1 Ceres', '10', '3200 Phaethon', '3552 Don Quixote', '12893 (1998 QS55)',
'12345 (1993 FT8)', '230 Athamantis', '4500', '3456', '135065']
```
We need a uniform set of names, designations, or numbers. In this case, we pick numbers:
```
from sbpy.data import Names
ast_numbers = []
for ast in asteroids:
ident = Names.parse_asteroid(ast)
print(ident)
ast_numbers.append(ident['number'])
ast_numbers
```
We retrieve ephemerides for these targets:
```
from sbpy.data import Ephem
from astropy.time import Time
import astropy.units as u
eph = Ephem.from_horizons(ast_numbers, epochs={'start': Time('2019-09-18'), 'stop': Time('2019-09-19'), 'step': 1*u.h},
location='568')
list(eph.field_names)
eph
```
We select only those ephemerides when the target has an airmass less than 2 and when it's really dark:
```
eph = eph[eph['airmass'] < 2] # make a cut on airmass
eph = eph[eph['solar_presence'] == ''] # only dark time
eph
```
Let's check rates. Our telescope can only track at absolute rates less than 1 arcsec/second.
```
import numpy as np
list(np.sqrt(eph['RA*cos(Dec)_rate']**2 + eph['DEC_rate']**2).to('arcsec/s'))
```
Rates are fine. Our targets for tonight are:
```
set(eph['targetname'])
```
## 2. Combining data and plotting
The observer sends you V-band photometric observations for Hygiea only:
```
obs_epochs = Time([2458745.0417, 2458745.0418, 2458745.0419], format='jd')
obs_mags = [8.53, 8.46, 8.49]*u.mag
obs_epochs
obs_epochs.tdb.jd
```
We bundle the data as an `obs` object:
```
from sbpy.data import Obs
obs = Obs.from_dict({'targetname': [10]*len(obs_epochs), 'epoch': obs_epochs, 'mag': obs_mags})
obs
obs = obs.supplement()
obs
```
We compare the expected brightness (`eph['V']`) to the actually measured brightness (`eph['mag']`):
```
print(obs['V'], obs['mag'])
```
Hygiea is actually 3 magnitudes brighter than it should be! Let's put this into perspective and compare it to previous observations:
```
mpc = Obs.from_mpc('10') # retrieve observations reported to the MPC for Hygiea
mpc
list(mpc.field_names)
mpc = mpc[mpc['band'] == 'V'] # extract V-band observations only
mpc
import matplotlib.pyplot as plt
plt.scatter(mpc['epoch'].jd, mpc['mag'], label='MPC')
plt.scatter(obs['epoch'].jd, obs['mag'], label='568')
plt.ylim([13, 8])
plt.xlabel('JD')
plt.ylabel('V (mag)')
plt.legend(loc=3)
```
We should write a paper...
```
print(bib.to_icarus())
```
|
github_jupyter
|
from sbpy import bib
bib.track()
asteroids =['1 Ceres', '10', '3200 Phaethon', '3552 Don Quixote', '12893 (1998 QS55)',
'12345 (1993 FT8)', '230 Athamantis', '4500', '3456', '135065']
from sbpy.data import Names
ast_numbers = []
for ast in asteroids:
ident = Names.parse_asteroid(ast)
print(ident)
ast_numbers.append(ident['number'])
ast_numbers
from sbpy.data import Ephem
from astropy.time import Time
import astropy.units as u
eph = Ephem.from_horizons(ast_numbers, epochs={'start': Time('2019-09-18'), 'stop': Time('2019-09-19'), 'step': 1*u.h},
location='568')
list(eph.field_names)
eph
eph = eph[eph['airmass'] < 2] # make a cut on airmass
eph = eph[eph['solar_presence'] == ''] # only dark time
eph
import numpy as np
list(np.sqrt(eph['RA*cos(Dec)_rate']**2 + eph['DEC_rate']**2).to('arcsec/s'))
set(eph['targetname'])
obs_epochs = Time([2458745.0417, 2458745.0418, 2458745.0419], format='jd')
obs_mags = [8.53, 8.46, 8.49]*u.mag
obs_epochs
obs_epochs.tdb.jd
from sbpy.data import Obs
obs = Obs.from_dict({'targetname': [10]*len(obs_epochs), 'epoch': obs_epochs, 'mag': obs_mags})
obs
obs = obs.supplement()
obs
print(obs['V'], obs['mag'])
mpc = Obs.from_mpc('10') # retrieve observations reported to the MPC for Hygiea
mpc
list(mpc.field_names)
mpc = mpc[mpc['band'] == 'V'] # extract V-band observations only
mpc
import matplotlib.pyplot as plt
plt.scatter(mpc['epoch'].jd, mpc['mag'], label='MPC')
plt.scatter(obs['epoch'].jd, obs['mag'], label='568')
plt.ylim([13, 8])
plt.xlabel('JD')
plt.ylabel('V (mag)')
plt.legend(loc=3)
print(bib.to_icarus())
| 0.524395 | 0.943191 |
```
from PIL import Image
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# read in dataframe with photo file names and associated variety information
df = pd.read_table('Desktop/0_visual_check/ampelometry_id_key.txt', delim_whitespace=True, encoding="latin1")
# loop to load in each leaf segment from each leaf and plot data on original image
for index, row in df.iterrows():
# store information in dataframe as variables
Variety = row["variety"]
Type = row["type"].capitalize() + " grape"
Photo = row["image"]
Leaf = row["value"]
Leaf_number = "Leaf number " + row["leaf"][-1]
# graph attributes
pink = "#B31A82"
orange = "#F28317"
size = 1800 # point size
linewidth = 9 # line width
fs = 60 # font size
ls = 40 # tick number font size
a = 1 # alphaa
# retrieve vectors for each vein/blade region
photo_file = "Desktop/0_visual_check/ampelometry_images/" + Photo + ".JPG"
cm_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_cm.txt"
m_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m.txt"
d_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d.txt"
p_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p.txt"
m1_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m1.txt"
d1_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d1.txt"
p1_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p1.txt"
m2_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m2.txt"
d2_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d2.txt"
p2_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p2.txt"
m3_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m3.txt"
d3_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d3.txt"
p3_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p3.txt"
ma_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_ma.txt"
da_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_da.txt"
pa_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pa.txt"
mb_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_mb.txt"
db_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_db.txt"
pb_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pb.txt"
mc_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_mc.txt"
dc_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_dc.txt"
pc_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pc.txt"
md_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_md.txt"
dd_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_dd.txt"
pd_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pd.txt"
ds_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_ds.txt"
ps_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_ps.txt"
# load in data files
cm = np.loadtxt(cm_file)
m = np.loadtxt(m_file)
d = np.loadtxt(d_file)
p = np.loadtxt(p_file)
m1 = np.loadtxt(m1_file)
d1 = np.loadtxt(d1_file)
p1 = np.loadtxt(p1_file)
m2 = np.loadtxt(m2_file)
d2 = np.loadtxt(d2_file)
p2 = np.loadtxt(p2_file)
m3 = np.loadtxt(m3_file)
d3 = np.loadtxt(d3_file)
p3 = np.loadtxt(p3_file)
ma = np.loadtxt(ma_file)
da = np.loadtxt(da_file)
pa = np.loadtxt(pa_file)
mb = np.loadtxt(mb_file)
db = np.loadtxt(db_file)
pb = np.loadtxt(pb_file)
mc = np.loadtxt(mc_file)
dc = np.loadtxt(dc_file)
pc = np.loadtxt(pc_file)
md = np.loadtxt(md_file)
dd = np.loadtxt(dd_file)
pd = np.loadtxt(pd_file)
ds = np.loadtxt(ds_file)
ps = np.loadtxt(ps_file)
# load in photo
picture = Image.open(photo_file).convert('L')
plt.figure(figsize=(44,36))
plt.imshow(picture, cmap="gist_gray")
# scale bar
plt.plot(cm[:,0], cm[:,1], lw=linewidth, c=pink)
# plot veins on photo
plt.plot(m[:,0], m[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d[:,0], d[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p[:,0], p[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(m1[:,0], m1[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d1[:,0], d1[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p1[:,0], p1[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(m2[:,0], m2[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d2[:,0], d2[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p2[:,0], p2[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(m3[:,0], m3[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d3[:,0], d3[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p3[:,0], p3[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(ma[:,0], ma[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(da[:,0], da[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pa[:,0], pa[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(mb[:,0], mb[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(db[:,0], db[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pb[:,0], pb[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(mc[:,0], mc[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(dc[:,0], dc[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pc[:,0], pc[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(md[:,0], md[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(dd[:,0], dd[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pd[:,0], pd[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(ds[:,0], ds[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(ps[:,0], ps[:,1], lw=linewidth, c=pink, alpha=a)
# plot branch points
plt.scatter(m[0,0], m[0,1], s=size, c=orange)
plt.scatter(m1[0,0], m1[0,1], s=size, c=orange)
plt.scatter(d1[0,0], d1[0,1], s=size, c=orange)
plt.scatter(p1[0,0], p1[0,1], s=size, c=orange)
plt.scatter(m2[0,0], m2[0,1], s=size, c=orange)
plt.scatter(d2[0,0], d2[0,1], s=size, c=orange)
plt.scatter(p2[0,0], p2[0,1], s=size, c=orange)
plt.scatter(m3[0,0], m3[0,1], s=size, c=orange)
plt.scatter(d3[0,0], d3[0,1], s=size, c=orange)
plt.scatter(p3[0,0], p3[0,1], s=size, c=orange)
# plot blade points
plt.scatter(ma[-1,0], ma[-1,1], s=size, c=orange)
plt.scatter(da[-1,0], da[-1,1], s=size, c=orange)
plt.scatter(pa[-1,0], pa[-1,1], s=size, c=orange)
plt.scatter(mb[-1,0], mb[-1,1], s=size, c=orange)
plt.scatter(db[-1,0], db[-1,1], s=size, c=orange)
plt.scatter(pb[-1,0], pb[-1,1], s=size, c=orange)
plt.scatter(mc[-1,0], mc[-1,1], s=size, c=orange)
plt.scatter(dc[-1,0], dc[-1,1], s=size, c=orange)
plt.scatter(pc[-1,0], pc[-1,1], s=size, c=orange)
plt.scatter(md[-1,0], md[-1,1], s=size, c=orange)
plt.scatter(dd[-1,0], dd[-1,1], s=size, c=orange)
plt.scatter(pd[-1,0], pd[-1,1], s=size, c=orange)
plt.scatter(ds[-1,0], ds[-1,1], s=size, c=orange)
plt.scatter(ps[-1,0], ps[-1,1], s=size, c=orange)
# title
plt.title('Variety: {0} \nType: {1} \nImage file: {2} \n{3}'.format(Variety, Type, Photo, Leaf_number),
loc="left", fontsize=fs)
plt.axis('on')
plt.tick_params(labelsize=ls)
plt.xlabel("x", fontsize=fs)
plt.ylabel("y", fontsize=fs)
# save outputs
plt.savefig("Desktop/0_visual_check/output_visual_check/" + Variety + "_" + Leaf + ".jpg")
```
|
github_jupyter
|
from PIL import Image
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# read in dataframe with photo file names and associated variety information
df = pd.read_table('Desktop/0_visual_check/ampelometry_id_key.txt', delim_whitespace=True, encoding="latin1")
# loop to load in each leaf segment from each leaf and plot data on original image
for index, row in df.iterrows():
# store information in dataframe as variables
Variety = row["variety"]
Type = row["type"].capitalize() + " grape"
Photo = row["image"]
Leaf = row["value"]
Leaf_number = "Leaf number " + row["leaf"][-1]
# graph attributes
pink = "#B31A82"
orange = "#F28317"
size = 1800 # point size
linewidth = 9 # line width
fs = 60 # font size
ls = 40 # tick number font size
a = 1 # alphaa
# retrieve vectors for each vein/blade region
photo_file = "Desktop/0_visual_check/ampelometry_images/" + Photo + ".JPG"
cm_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_cm.txt"
m_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m.txt"
d_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d.txt"
p_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p.txt"
m1_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m1.txt"
d1_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d1.txt"
p1_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p1.txt"
m2_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m2.txt"
d2_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d2.txt"
p2_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p2.txt"
m3_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_m3.txt"
d3_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_d3.txt"
p3_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_p3.txt"
ma_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_ma.txt"
da_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_da.txt"
pa_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pa.txt"
mb_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_mb.txt"
db_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_db.txt"
pb_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pb.txt"
mc_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_mc.txt"
dc_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_dc.txt"
pc_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pc.txt"
md_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_md.txt"
dd_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_dd.txt"
pd_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_pd.txt"
ds_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_ds.txt"
ps_file = "Desktop/0_visual_check/ampelometry_data/" + Leaf + "_ps.txt"
# load in data files
cm = np.loadtxt(cm_file)
m = np.loadtxt(m_file)
d = np.loadtxt(d_file)
p = np.loadtxt(p_file)
m1 = np.loadtxt(m1_file)
d1 = np.loadtxt(d1_file)
p1 = np.loadtxt(p1_file)
m2 = np.loadtxt(m2_file)
d2 = np.loadtxt(d2_file)
p2 = np.loadtxt(p2_file)
m3 = np.loadtxt(m3_file)
d3 = np.loadtxt(d3_file)
p3 = np.loadtxt(p3_file)
ma = np.loadtxt(ma_file)
da = np.loadtxt(da_file)
pa = np.loadtxt(pa_file)
mb = np.loadtxt(mb_file)
db = np.loadtxt(db_file)
pb = np.loadtxt(pb_file)
mc = np.loadtxt(mc_file)
dc = np.loadtxt(dc_file)
pc = np.loadtxt(pc_file)
md = np.loadtxt(md_file)
dd = np.loadtxt(dd_file)
pd = np.loadtxt(pd_file)
ds = np.loadtxt(ds_file)
ps = np.loadtxt(ps_file)
# load in photo
picture = Image.open(photo_file).convert('L')
plt.figure(figsize=(44,36))
plt.imshow(picture, cmap="gist_gray")
# scale bar
plt.plot(cm[:,0], cm[:,1], lw=linewidth, c=pink)
# plot veins on photo
plt.plot(m[:,0], m[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d[:,0], d[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p[:,0], p[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(m1[:,0], m1[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d1[:,0], d1[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p1[:,0], p1[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(m2[:,0], m2[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d2[:,0], d2[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p2[:,0], p2[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(m3[:,0], m3[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(d3[:,0], d3[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(p3[:,0], p3[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(ma[:,0], ma[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(da[:,0], da[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pa[:,0], pa[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(mb[:,0], mb[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(db[:,0], db[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pb[:,0], pb[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(mc[:,0], mc[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(dc[:,0], dc[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pc[:,0], pc[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(md[:,0], md[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(dd[:,0], dd[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(pd[:,0], pd[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(ds[:,0], ds[:,1], lw=linewidth, c=pink, alpha=a)
plt.plot(ps[:,0], ps[:,1], lw=linewidth, c=pink, alpha=a)
# plot branch points
plt.scatter(m[0,0], m[0,1], s=size, c=orange)
plt.scatter(m1[0,0], m1[0,1], s=size, c=orange)
plt.scatter(d1[0,0], d1[0,1], s=size, c=orange)
plt.scatter(p1[0,0], p1[0,1], s=size, c=orange)
plt.scatter(m2[0,0], m2[0,1], s=size, c=orange)
plt.scatter(d2[0,0], d2[0,1], s=size, c=orange)
plt.scatter(p2[0,0], p2[0,1], s=size, c=orange)
plt.scatter(m3[0,0], m3[0,1], s=size, c=orange)
plt.scatter(d3[0,0], d3[0,1], s=size, c=orange)
plt.scatter(p3[0,0], p3[0,1], s=size, c=orange)
# plot blade points
plt.scatter(ma[-1,0], ma[-1,1], s=size, c=orange)
plt.scatter(da[-1,0], da[-1,1], s=size, c=orange)
plt.scatter(pa[-1,0], pa[-1,1], s=size, c=orange)
plt.scatter(mb[-1,0], mb[-1,1], s=size, c=orange)
plt.scatter(db[-1,0], db[-1,1], s=size, c=orange)
plt.scatter(pb[-1,0], pb[-1,1], s=size, c=orange)
plt.scatter(mc[-1,0], mc[-1,1], s=size, c=orange)
plt.scatter(dc[-1,0], dc[-1,1], s=size, c=orange)
plt.scatter(pc[-1,0], pc[-1,1], s=size, c=orange)
plt.scatter(md[-1,0], md[-1,1], s=size, c=orange)
plt.scatter(dd[-1,0], dd[-1,1], s=size, c=orange)
plt.scatter(pd[-1,0], pd[-1,1], s=size, c=orange)
plt.scatter(ds[-1,0], ds[-1,1], s=size, c=orange)
plt.scatter(ps[-1,0], ps[-1,1], s=size, c=orange)
# title
plt.title('Variety: {0} \nType: {1} \nImage file: {2} \n{3}'.format(Variety, Type, Photo, Leaf_number),
loc="left", fontsize=fs)
plt.axis('on')
plt.tick_params(labelsize=ls)
plt.xlabel("x", fontsize=fs)
plt.ylabel("y", fontsize=fs)
# save outputs
plt.savefig("Desktop/0_visual_check/output_visual_check/" + Variety + "_" + Leaf + ".jpg")
| 0.383295 | 0.337408 |
```
import importlib
import common
importlib.reload(common)
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sqlalchemy
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score, make_scorer
from common import create_engine
from common import display_all
from common import figsize
from common import save_df
from common import save_model, read_model, create_features
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters() # converters e.g. for datetime in plots
engine = create_engine('db-conf.json', 'local')
pd.sql = lambda sql: pd.read_sql(sqlalchemy.text(sql), con=engine)
# get data from one source (NN)
fetch_data_query = '''
WITH fb_popularity AS (SELECT sbq.url, sbq.sync_date, sbq.reaction_count, sbq.comment_count, sbq.share_count
FROM (
SELECT afe.*, row_number() OVER (PARTITION BY url ORDER BY sync_date) as rn
FROM article_fb_engagement afe) sbq
WHERE sbq.rn = 1
)
SELECT a.id,
a.url,
title,
perex,
body,
published_at,
extracted_at,
a.source_id,
category,
other_info,
aut.name as author_name,
s.id as source_id,
s.name as source_name,
s.url as source_url,
stype as source_type,
is_reliable::integer as source_is_reliable,
sync_date as fb_sync_date,
reaction_count as fb_reaction_count,
comment_count as fb_comment_count,
share_count as fb_share_count,
(reaction_count + comment_count + share_count) as fb_popularity
FROM article a
JOIN source s on a.source_id = s.id
JOIN (SELECT * FROM fb_popularity) p ON a.url = p.url
JOIN author aut on a.author_id = aut.id
WHERE s.id = 145;
'''
df = pd.sql(fetch_data_query)
df.info()
# our blog has only title and body + label
df = df[['id', 'title', 'body', 'fb_popularity']]
df = df.set_index('id')
```
get popularity predictions (labels)
```
df
df.fb_popularity.describe()
df['label'] = 1
df.fb_popularity.quantile([.65, .85, .95])
df.loc[df.fb_popularity > 162, 'label'] = 2
df.loc[df.fb_popularity > 465, 'label'] = 3
df.loc[df.fb_popularity > 1667, 'label'] = 4
df.label.value_counts()
df = df.drop(columns=['fb_popularity'])
df
df_with_features = create_features(df)
df_with_features['label'] = df['label'].array
from sklearn.model_selection import train_test_split
target_df = df_with_features
y = target_df['label']
X = target_df.drop(columns='label')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123)
X_train
rdf = RandomForestClassifier()
rdf.fit(X_train, y_train)
y_pred = rdf.predict(X_test)
print(confusion_matrix(y_test, y_pred))
y_test.value_counts()
accuracy_score(y_test, y_pred)
f1_score(y_test, y_pred, average='macro')
save_model(rdf)
df.info()
df.sort_values(by='label', ascending=False)
xx = X_test.copy()
xx['label'] = y_pred
xx['label2'] = y_test
xx[xx.label == 4]
from common import normalize
normalize(df.iloc[13872].body)
df.iloc[13872].title
y_train
```
|
github_jupyter
|
import importlib
import common
importlib.reload(common)
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import sqlalchemy
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.metrics import confusion_matrix, f1_score, accuracy_score, precision_score, recall_score, make_scorer
from common import create_engine
from common import display_all
from common import figsize
from common import save_df
from common import save_model, read_model, create_features
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters() # converters e.g. for datetime in plots
engine = create_engine('db-conf.json', 'local')
pd.sql = lambda sql: pd.read_sql(sqlalchemy.text(sql), con=engine)
# get data from one source (NN)
fetch_data_query = '''
WITH fb_popularity AS (SELECT sbq.url, sbq.sync_date, sbq.reaction_count, sbq.comment_count, sbq.share_count
FROM (
SELECT afe.*, row_number() OVER (PARTITION BY url ORDER BY sync_date) as rn
FROM article_fb_engagement afe) sbq
WHERE sbq.rn = 1
)
SELECT a.id,
a.url,
title,
perex,
body,
published_at,
extracted_at,
a.source_id,
category,
other_info,
aut.name as author_name,
s.id as source_id,
s.name as source_name,
s.url as source_url,
stype as source_type,
is_reliable::integer as source_is_reliable,
sync_date as fb_sync_date,
reaction_count as fb_reaction_count,
comment_count as fb_comment_count,
share_count as fb_share_count,
(reaction_count + comment_count + share_count) as fb_popularity
FROM article a
JOIN source s on a.source_id = s.id
JOIN (SELECT * FROM fb_popularity) p ON a.url = p.url
JOIN author aut on a.author_id = aut.id
WHERE s.id = 145;
'''
df = pd.sql(fetch_data_query)
df.info()
# our blog has only title and body + label
df = df[['id', 'title', 'body', 'fb_popularity']]
df = df.set_index('id')
df
df.fb_popularity.describe()
df['label'] = 1
df.fb_popularity.quantile([.65, .85, .95])
df.loc[df.fb_popularity > 162, 'label'] = 2
df.loc[df.fb_popularity > 465, 'label'] = 3
df.loc[df.fb_popularity > 1667, 'label'] = 4
df.label.value_counts()
df = df.drop(columns=['fb_popularity'])
df
df_with_features = create_features(df)
df_with_features['label'] = df['label'].array
from sklearn.model_selection import train_test_split
target_df = df_with_features
y = target_df['label']
X = target_df.drop(columns='label')
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123)
X_train
rdf = RandomForestClassifier()
rdf.fit(X_train, y_train)
y_pred = rdf.predict(X_test)
print(confusion_matrix(y_test, y_pred))
y_test.value_counts()
accuracy_score(y_test, y_pred)
f1_score(y_test, y_pred, average='macro')
save_model(rdf)
df.info()
df.sort_values(by='label', ascending=False)
xx = X_test.copy()
xx['label'] = y_pred
xx['label2'] = y_test
xx[xx.label == 4]
from common import normalize
normalize(df.iloc[13872].body)
df.iloc[13872].title
y_train
| 0.457379 | 0.351269 |
### Requirements
- python 3.6.2
```python
conda install -c sebp scikit-survival
conda install -c anaconda seaborn
```
- numpy 1.15.4
- pandas 0.20.3
- scikit-learn 0.19.2
- scikit-survival 0.6.0
- seaborn 0.9.0
```
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
```
## 1. load data
```
df = pd.read_csv("employee_retention_data.csv")
```
#### Data Example
- employee_id : id of the employee. Unique by employee per company
- company_id : company id.
- dept : employee dept
- seniority : number of yrs of work experience when hired
- salary: avg yearly salary of the employee during her tenure within the company
- join_date: when the employee joined the company, it can only be between 2011/01/24 and 2015/12/13
- quit_date: when the employee left her job (if she is still employed as of 2015/12/13, this field is NA)
```
df.head()
```
#### Check Missing Data
- no missing data
- NAN in join_date: still employed by 2015/12/13
```
df.isnull().sum()
```
#### Data Size
```
df.shape
```
#### Censored Date
people didn't quit
```
sum(df["quit_date"].isnull())
```
## 2. Exploratory Analysis: distribution
#### Distribution of company
- 34% data from one company
```
plt.hist(df["company_id"], density = True, bins = range(1,14))
```
#### Distribution of company
```
plt.hist(df["dept"], density = True, bins = 6)
```
#### Distribution of salary
- left skewed
```
plt.hist(df["salary"], bins = 20)
plt.show()
```
## 3. Exploratory Analysis: quit time trend
```
df['quit_date'] = pd.to_datetime(df['quit_date'])
df['quit_date'].groupby(df["quit_date"].dt.year).count().plot(kind="bar", color = "royalblue")
plt.title("quit time by year")
df['quit_date'].groupby(df["quit_date"].dt.month).count().plot(kind="bar", color = "royalblue")
plt.title("quit time by month")
```
## 4. survival analysis: feature engineer
#### df_y
- "status": True if quit
- survival_days: days between join and quit
```
df["status"] = df["quit_date"].notnull()
df['quit_date'] = pd.to_datetime(df['quit_date'])
df['join_date'] = pd.to_datetime(df['join_date'])
df["survival_days"] = (df["quit_date"] - df["join_date"]).dt.days
df_y = pd.DataFrame({"status": df["status"],
"survival_days": df["survival_days"]})
### survival time for people who haven't quit
df_y["survival_days"] = df_y["survival_days"].fillna((pd.Timestamp("2015/12/13 ") - pd.Timestamp("2011/01/24")).days)
df_y.head()
df_y_array = df_y.to_records(index = False)
```
#### df_x
- numeric: standardize
- categorical: one hot coding
```
df.columns
df_x = df[['company_id', 'dept', 'seniority', 'salary']]
df_x['company_id'] = df_x['company_id'].astype('category')
df_x['dept'] = df_x['dept'].astype('category')
from sksurv.preprocessing import OneHotEncoder
df_x_numeric = OneHotEncoder().fit_transform(df_x)
df_x_numeric.head()
```
#### train, test split
```
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df_x_numeric, df_y_array, test_size=0.3, random_state=12345)
```
## 5. survival analysis: linear model
Cox's proportional hazard's model
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train[["seniority","salary"]])
x_train[["seniority","salary"]] = scaler.transform(x_train[["seniority","salary"]])
from sksurv.linear_model import CoxPHSurvivalAnalysis
estimator = CoxPHSurvivalAnalysis()
estimator.fit(x_train, y_train)
```
#### test accuracy
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_test[["seniority","salary"]])
x_test[["seniority","salary"]] = scaler.transform(x_test[["seniority","salary"]])
estimator.score(x_test, y_test)
```
#### feature importance:
```
pd.Series(estimator.coef_, index=df_x_numeric.columns)
```
|
github_jupyter
|
conda install -c sebp scikit-survival
conda install -c anaconda seaborn
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
df = pd.read_csv("employee_retention_data.csv")
df.head()
df.isnull().sum()
df.shape
sum(df["quit_date"].isnull())
plt.hist(df["company_id"], density = True, bins = range(1,14))
plt.hist(df["dept"], density = True, bins = 6)
plt.hist(df["salary"], bins = 20)
plt.show()
df['quit_date'] = pd.to_datetime(df['quit_date'])
df['quit_date'].groupby(df["quit_date"].dt.year).count().plot(kind="bar", color = "royalblue")
plt.title("quit time by year")
df['quit_date'].groupby(df["quit_date"].dt.month).count().plot(kind="bar", color = "royalblue")
plt.title("quit time by month")
df["status"] = df["quit_date"].notnull()
df['quit_date'] = pd.to_datetime(df['quit_date'])
df['join_date'] = pd.to_datetime(df['join_date'])
df["survival_days"] = (df["quit_date"] - df["join_date"]).dt.days
df_y = pd.DataFrame({"status": df["status"],
"survival_days": df["survival_days"]})
### survival time for people who haven't quit
df_y["survival_days"] = df_y["survival_days"].fillna((pd.Timestamp("2015/12/13 ") - pd.Timestamp("2011/01/24")).days)
df_y.head()
df_y_array = df_y.to_records(index = False)
df.columns
df_x = df[['company_id', 'dept', 'seniority', 'salary']]
df_x['company_id'] = df_x['company_id'].astype('category')
df_x['dept'] = df_x['dept'].astype('category')
from sksurv.preprocessing import OneHotEncoder
df_x_numeric = OneHotEncoder().fit_transform(df_x)
df_x_numeric.head()
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df_x_numeric, df_y_array, test_size=0.3, random_state=12345)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train[["seniority","salary"]])
x_train[["seniority","salary"]] = scaler.transform(x_train[["seniority","salary"]])
from sksurv.linear_model import CoxPHSurvivalAnalysis
estimator = CoxPHSurvivalAnalysis()
estimator.fit(x_train, y_train)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_test[["seniority","salary"]])
x_test[["seniority","salary"]] = scaler.transform(x_test[["seniority","salary"]])
estimator.score(x_test, y_test)
pd.Series(estimator.coef_, index=df_x_numeric.columns)
| 0.400749 | 0.853852 |
# Character-Level LSTM in PyTorch
In this notebook, I'll construct a character-level LSTM with PyTorch. The network will train character by character on some text, then generate new text character by character. As an example, I will train on Anna Karenina. **This model will be able to generate new text based on the text from the book!**
This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
First let's load in our required resources for data loading and model creation.
```
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
```
## Load in Data
Then, we'll load the Anna Karenina text file and convert it into integers for our network to use.
```
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
```
Let's check out the first 100 characters, make sure everything is peachy. According to the [American Book Review](http://americanbookreview.org/100bestlines.asp), this is the 6th best first line of a book ever.
```
text[:100]
```
### Tokenization
In the cells, below, I'm creating a couple **dictionaries** to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
```
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
```
And we can see those same characters from above, encoded as integers.
```
encoded[:100]
```
## Pre-processing the data
As you can see in our char-RNN image above, our LSTM expects an input that is **one-hot encoded** meaning that each character is converted into an integer (via our created dictionary) and *then* converted into a column vector where only it's corresponding integer index will have the value of 1 and the rest of the vector will be filled with 0's. Since we're one-hot encoding the data, let's make a function to do that!
```
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
```
## Making training mini-batches
To train on this data, we also want to create mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/[email protected]" width=500px>
<br>
In this example, we'll take the encoded characters (passed in as the `arr` parameter) and split them into multiple sequences, given by `batch_size`. Each of our sequences will be `seq_length` long.
### Creating Batches
**1. The first thing we need to do is discard some of the text so we only have completely full mini-batches. **
Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences in a batch) and $M$ is the seq_length or number of time steps in a sequence. Then, to get the total number of batches, $K$, that we can make from the array `arr`, you divide the length of `arr` by the number of characters per batch. Once you know the number of batches, you can get the total number of characters to keep from `arr`, $N * M * K$.
**2. After that, we need to split `arr` into $N$ batches. **
You can do this using `arr.reshape(size)` where `size` is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences in a batch, so let's make that the size of the first dimension. For the second dimension, you can use `-1` as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$.
**3. Now that we have this array, we can iterate through it to get our mini-batches. **
The idea is each batch is a $N \times M$ window on the $N \times (M * K)$ array. For each subsequent batch, the window moves over by `seq_length`. We also want to create both the input and target arrays. Remember that the targets are just the inputs shifted over by one character. The way I like to do this window is use `range` to take steps of size `n_steps` from $0$ to `arr.shape[1]`, the total number of tokens in each sequence. That way, the integers you get from `range` always point to the start of a batch, and each window is `seq_length` wide.
> **TODO:** Write the code for creating batches in the function below. The exercises in this notebook _will not be easy_. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, **type out the solution code yourself.**
```
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
## TODO: Get the number of batches we can make
n_batches = len(arr) // (batch_size * seq_length)
## TODO: Keep only enough characters to make full batches
arr = arr[:batch_size * seq_length * n_batches]
## TODO: Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
## TODO: Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
```
### Test Your Implementation
Now I'll make some data sets and we can check out what's going on as we batch data. Here, as an example, I'm going to use a batch size of 8 and 50 sequence steps.
```
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
```
If you implemented `get_batches` correctly, the above output should look something like
```
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
```
although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
---
## Defining the network with PyTorch
Below is where you'll define the network.
<img src="assets/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
### Model Structure
In `__init__` the suggested structure is as follows:
* Create and store the necessary dictionaries (this has been done for you)
* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
* Define a dropout layer with `dropout_prob`
* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
* Finally, initialize the weights (again, this has been given)
Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
---
### LSTM Inputs/Outputs
You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
```python
self.lstm = nn.LSTM(input_size, n_hidden, n_layers,
dropout=drop_prob, batch_first=True)
```
where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
We also need to create an initial hidden state of all zeros. This is done like so
```python
self.init_hidden()
```
```
# check if GPU is available
train_on_gpu = torch.cuda.is_available()
if(train_on_gpu):
print('Training on GPU!')
else:
print('No GPU available, training on CPU; consider making n_epochs very small.')
class CharRNN(nn.Module):
def __init__(self, tokens, n_hidden=256, n_layers=2,
drop_prob=0.5, lr=0.001):
super().__init__()
self.drop_prob = drop_prob
self.n_layers = n_layers
self.n_hidden = n_hidden
self.lr = lr
# creating character dictionaries
self.chars = tokens
self.int2char = dict(enumerate(self.chars))
self.char2int = {ch: ii for ii, ch in self.int2char.items()}
## TODO: define the layers of the model
self.lstm = nn.LSTM(len(self.chars), self.n_hidden, self.n_layers, dropout=self.drop_prob, batch_first=True)
self.dropout = nn.Dropout(self.drop_prob)
self.fc = nn.Linear(self.n_hidden, len(self.chars))
def forward(self, x, hidden):
''' Forward pass through the network.
These inputs are x, and the hidden/cell state `hidden`. '''
## TODO: Get the outputs and the new hidden state from the lstm
r_output, hidden = self.lstm(x, hidden)
out = self.dropout(r_output)
out = out.contiguous().view(-1, self.n_hidden)
out = self.fc(out)
# return the final output and the hidden state
return out, hidden
def init_hidden(self, batch_size):
''' Initializes hidden state '''
# Create two new tensors with sizes n_layers x batch_size x n_hidden,
# initialized to zero, for hidden state and cell state of LSTM
weight = next(self.parameters()).data
if (train_on_gpu):
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_().cuda())
else:
hidden = (weight.new(self.n_layers, batch_size, self.n_hidden).zero_(),
weight.new(self.n_layers, batch_size, self.n_hidden).zero_())
return hidden
```
## Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training:
>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
```
def train(net, data, epochs=10, batch_size=10, seq_length=50, lr=0.001, clip=5, val_frac=0.1, print_every=10):
''' Training a network
Arguments
---------
net: CharRNN network
data: text data to train the network
epochs: Number of epochs to train
batch_size: Number of mini-sequences per mini-batch, aka batch size
seq_length: Number of character steps per mini-batch
lr: learning rate
clip: gradient clipping
val_frac: Fraction of data to hold out for validation
print_every: Number of steps for printing training and validation loss
'''
net.train()
opt = torch.optim.Adam(net.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
# create training and validation data
val_idx = int(len(data)*(1-val_frac))
data, val_data = data[:val_idx], data[val_idx:]
if(train_on_gpu):
net.cuda()
counter = 0
n_chars = len(net.chars)
for e in range(epochs):
# initialize hidden state
h = net.init_hidden(batch_size)
for x, y in get_batches(data, batch_size, seq_length):
counter += 1
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
inputs, targets = torch.from_numpy(x), torch.from_numpy(y)
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
h = tuple([each.data for each in h])
# zero accumulated gradients
net.zero_grad()
# get the output from the model
output, h = net(inputs, h)
# calculate the loss and perform backprop
loss = criterion(output, targets.view(batch_size*seq_length))
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
nn.utils.clip_grad_norm_(net.parameters(), clip)
opt.step()
# loss stats
if counter % print_every == 0:
# Get validation loss
val_h = net.init_hidden(batch_size)
val_losses = []
net.eval()
for x, y in get_batches(val_data, batch_size, seq_length):
# One-hot encode our data and make them Torch tensors
x = one_hot_encode(x, n_chars)
x, y = torch.from_numpy(x), torch.from_numpy(y)
# Creating new variables for the hidden state, otherwise
# we'd backprop through the entire training history
val_h = tuple([each.data for each in val_h])
inputs, targets = x, y
if(train_on_gpu):
inputs, targets = inputs.cuda(), targets.cuda()
output, val_h = net(inputs, val_h)
val_loss = criterion(output, targets.view(batch_size*seq_length))
val_losses.append(val_loss.item())
net.train() # reset to train mode after iterationg through validation data
print("Epoch: {}/{}...".format(e+1, epochs),
"Step: {}...".format(counter),
"Loss: {:.4f}...".format(loss.item()),
"Val Loss: {:.4f}".format(np.mean(val_losses)))
```
## Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
```
## TODO: set you model hyperparameters
# define and print the net
n_hidden=512
n_layers=2
net = CharRNN(chars, n_hidden, n_layers)
print(net)
```
### Set your training hyperparameters!
```
batch_size = 128
seq_length = 100
n_epochs = 20# start small if you are just testing initial behavior
# train the model
train(net, encoded, epochs=n_epochs, batch_size=batch_size, seq_length=seq_length, lr=0.001, print_every=10)
```
## Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
## Hyperparameters
Here are the hyperparameters for the network.
In defining the model:
* `n_hidden` - The number of units in the hidden layers.
* `n_layers` - Number of hidden LSTM layers to use.
We assume that dropout probability and learning rate will be kept at the default, in this example.
And in training:
* `batch_size` - Number of sequences running through the network in one pass.
* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* `lr` - Learning rate for training
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
> ## Tips and Tricks
>### Monitoring Validation Loss vs. Training Loss
>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
> ### Approximate number of parameters
> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
> - The number of parameters in your model. This is printed when you start training.
> - The size of your dataset. 1MB file is approximately 1 million characters.
>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
> ### Best models strategy
>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
## Checkpoint
After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
```
# change the name, for saving multiple files
model_name = 'rnn_x_epoch.net'
checkpoint = {'n_hidden': net.n_hidden,
'n_layers': net.n_layers,
'state_dict': net.state_dict(),
'tokens': net.chars}
with open(model_name, 'wb') as f:
torch.save(checkpoint, f)
```
---
## Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
### A note on the `predict` function
The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
### Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
```
def predict(net, char, h=None, top_k=None):
''' Given a character, predict the next character.
Returns the predicted character and the hidden state.
'''
# tensor inputs
x = np.array([[net.char2int[char]]])
x = one_hot_encode(x, len(net.chars))
inputs = torch.from_numpy(x)
if(train_on_gpu):
inputs = inputs.cuda()
# detach hidden state from history
h = tuple([each.data for each in h])
# get the output of the model
out, h = net(inputs, h)
# get the character probabilities
p = F.softmax(out, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# get top characters
if top_k is None:
top_ch = np.arange(len(net.chars))
else:
p, top_ch = p.topk(top_k)
top_ch = top_ch.numpy().squeeze()
# select the likely next character with some element of randomness
p = p.numpy().squeeze()
char = np.random.choice(top_ch, p=p/p.sum())
# return the encoded value of the predicted char and the hidden state
return net.int2char[char], h
```
### Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
```
def sample(net, size, prime='The', top_k=None):
if(train_on_gpu):
net.cuda()
else:
net.cpu()
net.eval() # eval mode
# First off, run through the prime characters
chars = [ch for ch in prime]
h = net.init_hidden(1)
for ch in prime:
char, h = predict(net, ch, h, top_k=top_k)
chars.append(char)
# Now pass in the previous character and get a new one
for ii in range(size):
char, h = predict(net, chars[-1], h, top_k=top_k)
chars.append(char)
return ''.join(chars)
print(sample(net, 1000, prime='Anna', top_k=5))
```
## Loading a checkpoint
```
# Here we have loaded in a model that trained over 20 epochs `rnn_20_epoch.net`
with open('rnn_x_epoch.net', 'rb') as f:
checkpoint = torch.load(f)
loaded = CharRNN(checkpoint['tokens'], n_hidden=checkpoint['n_hidden'], n_layers=checkpoint['n_layers'])
loaded.load_state_dict(checkpoint['state_dict'])
# Sample using a loaded model
print(sample(loaded, 2000, top_k=5, prime="And Levin said"))
```
|
github_jupyter
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
# open text file and read in data as `text`
with open('data/anna.txt', 'r') as f:
text = f.read()
text[:100]
# encode the text and map each character to an integer and vice versa
# we create two dictionaries:
# 1. int2char, which maps integers to characters
# 2. char2int, which maps characters to unique integers
chars = tuple(set(text))
int2char = dict(enumerate(chars))
char2int = {ch: ii for ii, ch in int2char.items()}
# encode the text
encoded = np.array([char2int[ch] for ch in text])
encoded[:100]
def one_hot_encode(arr, n_labels):
# Initialize the the encoded array
one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)
# Fill the appropriate elements with ones
one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.
# Finally reshape it to get back to the original array
one_hot = one_hot.reshape((*arr.shape, n_labels))
return one_hot
# check that the function works as expected
test_seq = np.array([[3, 5, 1]])
one_hot = one_hot_encode(test_seq, 8)
print(one_hot)
def get_batches(arr, batch_size, seq_length):
'''Create a generator that returns batches of size
batch_size x seq_length from arr.
Arguments
---------
arr: Array you want to make batches from
batch_size: Batch size, the number of sequences per batch
seq_length: Number of encoded chars in a sequence
'''
## TODO: Get the number of batches we can make
n_batches = len(arr) // (batch_size * seq_length)
## TODO: Keep only enough characters to make full batches
arr = arr[:batch_size * seq_length * n_batches]
## TODO: Reshape into batch_size rows
arr = arr.reshape((batch_size, -1))
## TODO: Iterate over the batches using a window of size seq_length
for n in range(0, arr.shape[1], seq_length):
# The features
x = arr[:, n:n+seq_length]
# The targets, shifted by one
y = np.zeros_like(x)
try:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, n+seq_length]
except IndexError:
y[:, :-1], y[:, -1] = x[:, 1:], arr[:, 0]
yield x, y
batches = get_batches(encoded, 8, 50)
x, y = next(batches)
# printing out the first 10 items in a sequence
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
x
[[25 8 60 11 45 27 28 73 1 2]
[17 7 20 73 45 8 60 45 73 60]
[27 20 80 73 7 28 73 60 73 65]
[17 73 45 8 27 73 66 8 46 27]
[73 17 60 12 73 8 27 28 73 45]
[66 64 17 17 46 7 20 73 60 20]
[73 76 20 20 60 73 8 60 80 73]
[47 35 43 7 20 17 24 50 37 73]]
y
[[ 8 60 11 45 27 28 73 1 2 2]
[ 7 20 73 45 8 60 45 73 60 45]
[20 80 73 7 28 73 60 73 65 7]
[73 45 8 27 73 66 8 46 27 65]
[17 60 12 73 8 27 28 73 45 27]
[64 17 17 46 7 20 73 60 20 80]
[76 20 20 60 73 8 60 80 73 17]
[35 43 7 20 17 24 50 37 73 36]]
```
although the exact numbers may be different. Check to make sure the data is shifted over one step for `y`.
---
## Defining the network with PyTorch
Below is where you'll define the network.
<img src="assets/charRNN.png" width=500px>
Next, you'll use PyTorch to define the architecture of the network. We start by defining the layers and operations we want. Then, define a method for the forward pass. You've also been given a method for predicting characters.
### Model Structure
In `__init__` the suggested structure is as follows:
* Create and store the necessary dictionaries (this has been done for you)
* Define an LSTM layer that takes as params: an input size (the number of characters), a hidden layer size `n_hidden`, a number of layers `n_layers`, a dropout probability `drop_prob`, and a batch_first boolean (True, since we are batching)
* Define a dropout layer with `dropout_prob`
* Define a fully-connected layer with params: input size `n_hidden` and output size (the number of characters)
* Finally, initialize the weights (again, this has been given)
Note that some parameters have been named and given in the `__init__` function, and we use them and store them by doing something like `self.drop_prob = drop_prob`.
---
### LSTM Inputs/Outputs
You can create a basic [LSTM layer](https://pytorch.org/docs/stable/nn.html#lstm) as follows
where `input_size` is the number of characters this cell expects to see as sequential input, and `n_hidden` is the number of units in the hidden layers in the cell. And we can add dropout by adding a dropout parameter with a specified probability; this will automatically add dropout to the inputs or outputs. Finally, in the `forward` function, we can stack up the LSTM cells into layers using `.view`. With this, you pass in a list of cells and it will send the output of one cell into the next cell.
We also need to create an initial hidden state of all zeros. This is done like so
## Time to train
The train function gives us the ability to set the number of epochs, the learning rate, and other parameters.
Below we're using an Adam optimizer and cross entropy loss since we are looking at character class scores as output. We calculate the loss and perform backpropagation, as usual!
A couple of details about training:
>* Within the batch loop, we detach the hidden state from its history; this time setting it equal to a new *tuple* variable because an LSTM has a hidden state that is a tuple of the hidden and cell states.
* We use [`clip_grad_norm_`](https://pytorch.org/docs/stable/_modules/torch/nn/utils/clip_grad.html) to help prevent exploding gradients.
## Instantiating the model
Now we can actually train the network. First we'll create the network itself, with some given hyperparameters. Then, define the mini-batches sizes, and start training!
### Set your training hyperparameters!
## Getting the best model
To set your hyperparameters to get the best performance, you'll want to watch the training and validation losses. If your training loss is much lower than the validation loss, you're overfitting. Increase regularization (more dropout) or use a smaller network. If the training and validation losses are close, you're underfitting so you can increase the size of the network.
## Hyperparameters
Here are the hyperparameters for the network.
In defining the model:
* `n_hidden` - The number of units in the hidden layers.
* `n_layers` - Number of hidden LSTM layers to use.
We assume that dropout probability and learning rate will be kept at the default, in this example.
And in training:
* `batch_size` - Number of sequences running through the network in one pass.
* `seq_length` - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
* `lr` - Learning rate for training
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to [where it originally came from](https://github.com/karpathy/char-rnn#tips-and-tricks).
> ## Tips and Tricks
>### Monitoring Validation Loss vs. Training Loss
>If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
> - If your training loss is much lower than validation loss then this means the network might be **overfitting**. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
> - If your training/validation loss are about equal then your model is **underfitting**. Increase the size of your model (either number of layers or the raw number of neurons per layer)
> ### Approximate number of parameters
> The two most important parameters that control the model are `n_hidden` and `n_layers`. I would advise that you always use `n_layers` of either 2/3. The `n_hidden` can be adjusted based on how much data you have. The two important quantities to keep track of here are:
> - The number of parameters in your model. This is printed when you start training.
> - The size of your dataset. 1MB file is approximately 1 million characters.
>These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
> - I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make `n_hidden` larger.
> - I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
> ### Best models strategy
>The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
>It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
>By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
## Checkpoint
After training, we'll save the model so we can load it again later if we need too. Here I'm saving the parameters needed to create the same architecture, the hidden layer hyperparameters and the text characters.
---
## Making Predictions
Now that the model is trained, we'll want to sample from it and make predictions about next characters! To sample, we pass in a character and have the network predict the next character. Then we take that character, pass it back in, and get another predicted character. Just keep doing this and you'll generate a bunch of text!
### A note on the `predict` function
The output of our RNN is from a fully-connected layer and it outputs a **distribution of next-character scores**.
> To actually get the next character, we apply a softmax function, which gives us a *probability* distribution that we can then sample to predict the next character.
### Top K sampling
Our predictions come from a categorical probability distribution over all the possible characters. We can make the sample text and make it more reasonable to handle (with less variables) by only considering some $K$ most probable characters. This will prevent the network from giving us completely absurd characters while allowing it to introduce some noise and randomness into the sampled text. Read more about [topk, here](https://pytorch.org/docs/stable/torch.html#torch.topk).
### Priming and generating text
Typically you'll want to prime the network so you can build up a hidden state. Otherwise the network will start out generating characters at random. In general the first bunch of characters will be a little rough since it hasn't built up a long history of characters to predict from.
## Loading a checkpoint
| 0.760917 | 0.965381 |
# Desafio 5
Neste desafio, vamos praticar sobre redução de dimensionalidade com PCA e seleção de variáveis com RFE. Utilizaremos o _data set_ [Fifa 2019](https://www.kaggle.com/karangadiya/fifa19), contendo originalmente 89 variáveis de mais de 18 mil jogadores do _game_ FIFA 2019.
> Obs.: Por favor, não modifique o nome das funções de resposta.
## _Setup_ geral
```
from math import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
import statsmodels.api as sm
import statsmodels.stats as st
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
from loguru import logger
# Algumas configurações para o matplotlib.
#%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
fifa = pd.read_csv("fifa.csv")
columns_to_drop = ["Unnamed: 0", "ID", "Name", "Photo", "Nationality", "Flag",
"Club", "Club Logo", "Value", "Wage", "Special", "Preferred Foot",
"International Reputation", "Weak Foot", "Skill Moves", "Work Rate",
"Body Type", "Real Face", "Position", "Jersey Number", "Joined",
"Loaned From", "Contract Valid Until", "Height", "Weight", "LS",
"ST", "RS", "LW", "LF", "CF", "RF", "RW", "LAM", "CAM", "RAM", "LM",
"LCM", "CM", "RCM", "RM", "LWB", "LDM", "CDM", "RDM", "RWB", "LB", "LCB",
"CB", "RCB", "RB", "Release Clause"
]
try:
fifa.drop(columns_to_drop, axis=1, inplace=True)
except KeyError:
logger.warning(f"Columns already dropped")
```
## Inicia sua análise a partir daqui
```
#deletando as linhas com nan's
fifa.dropna(subset = ["Crossing"], inplace=True)
```
## Questão 1
Qual fração da variância consegue ser explicada pelo primeiro componente principal de `fifa`? Responda como um único float (entre 0 e 1) arredondado para três casas decimais.
```
def q1():
pca = PCA().fit(fifa)
evr = pca.explained_variance_ratio_
return round(float(evr[0]),3)
type(q1())
```
## Questão 2
Quantos componentes principais precisamos para explicar 95% da variância total? Responda como un único escalar inteiro.
```
def q2():
pca = PCA().fit(fifa)
cumulative_variance_ratio = np.cumsum(pca.explained_variance_ratio_)
return int(np.argmax(cumulative_variance_ratio >= 0.95)+1)
type(q2())
```
## Questão 3
Qual são as coordenadas (primeiro e segundo componentes principais) do ponto `x` abaixo? O vetor abaixo já está centralizado. Cuidado para __não__ centralizar o vetor novamente (por exemplo, invocando `PCA.transform()` nele). Responda como uma tupla de float arredondados para três casas decimais.
```
x = [0.87747123, -1.24990363, -1.3191255, -36.7341814,
-35.55091139, -37.29814417, -28.68671182, -30.90902583,
-42.37100061, -32.17082438, -28.86315326, -22.71193348,
-38.36945867, -20.61407566, -22.72696734, -25.50360703,
2.16339005, -27.96657305, -33.46004736, -5.08943224,
-30.21994603, 3.68803348, -36.10997302, -30.86899058,
-22.69827634, -37.95847789, -22.40090313, -30.54859849,
-26.64827358, -19.28162344, -34.69783578, -34.6614351,
48.38377664, 47.60840355, 45.76793876, 44.61110193,
49.28911284
]
def q3():
pca = PCA(2).fit(fifa)
return tuple(np.dot(pca.components_, x).round(3))
len(q3())
```
## Questão 4
Realiza RFE com estimador de regressão linear para selecionar cinco variáveis, eliminando uma a uma. Quais são as variáveis selecionadas? Responda como uma lista de nomes de variáveis.
```
def q4():
X = fifa.drop(['Overall'], axis=1)
y = fifa.Overall
reg = LinearRegression()
rfe = RFE(reg, n_features_to_select=5, step=1)
rfe.fit(X,y)
return list(X.columns[rfe.support_])
len(q4())
```
|
github_jupyter
|
from math import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
import statsmodels.api as sm
import statsmodels.stats as st
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import RFE
from loguru import logger
# Algumas configurações para o matplotlib.
#%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
fifa = pd.read_csv("fifa.csv")
columns_to_drop = ["Unnamed: 0", "ID", "Name", "Photo", "Nationality", "Flag",
"Club", "Club Logo", "Value", "Wage", "Special", "Preferred Foot",
"International Reputation", "Weak Foot", "Skill Moves", "Work Rate",
"Body Type", "Real Face", "Position", "Jersey Number", "Joined",
"Loaned From", "Contract Valid Until", "Height", "Weight", "LS",
"ST", "RS", "LW", "LF", "CF", "RF", "RW", "LAM", "CAM", "RAM", "LM",
"LCM", "CM", "RCM", "RM", "LWB", "LDM", "CDM", "RDM", "RWB", "LB", "LCB",
"CB", "RCB", "RB", "Release Clause"
]
try:
fifa.drop(columns_to_drop, axis=1, inplace=True)
except KeyError:
logger.warning(f"Columns already dropped")
#deletando as linhas com nan's
fifa.dropna(subset = ["Crossing"], inplace=True)
def q1():
pca = PCA().fit(fifa)
evr = pca.explained_variance_ratio_
return round(float(evr[0]),3)
type(q1())
def q2():
pca = PCA().fit(fifa)
cumulative_variance_ratio = np.cumsum(pca.explained_variance_ratio_)
return int(np.argmax(cumulative_variance_ratio >= 0.95)+1)
type(q2())
x = [0.87747123, -1.24990363, -1.3191255, -36.7341814,
-35.55091139, -37.29814417, -28.68671182, -30.90902583,
-42.37100061, -32.17082438, -28.86315326, -22.71193348,
-38.36945867, -20.61407566, -22.72696734, -25.50360703,
2.16339005, -27.96657305, -33.46004736, -5.08943224,
-30.21994603, 3.68803348, -36.10997302, -30.86899058,
-22.69827634, -37.95847789, -22.40090313, -30.54859849,
-26.64827358, -19.28162344, -34.69783578, -34.6614351,
48.38377664, 47.60840355, 45.76793876, 44.61110193,
49.28911284
]
def q3():
pca = PCA(2).fit(fifa)
return tuple(np.dot(pca.components_, x).round(3))
len(q3())
def q4():
X = fifa.drop(['Overall'], axis=1)
y = fifa.Overall
reg = LinearRegression()
rfe = RFE(reg, n_features_to_select=5, step=1)
rfe.fit(X,y)
return list(X.columns[rfe.support_])
len(q4())
| 0.545286 | 0.844473 |
```
import pandas as pd
import numpy as np
import sys
print(sys.path)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.metrics import f1_score, accuracy_score , recall_score , precision_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.preprocessing import LabelEncoder
from numpy import argmax
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedShuffleSplit
from collections import Counter
from sklearn.metrics import confusion_matrix
from xgboost import XGBClassifier
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from xgboost import XGBClassifier
import pickle
df_full=pd.read_csv("data_stackOverflow/final_dataframe_with_1tag_onehot.csv",index_col=0)
df_full.shape
label_col = [col for col in df_full if col.startswith('tag_')]
df_full.drop(label_col,inplace=True,axis=1)
df_full.head(2)
df_full["maintag"].value_counts()
df_full.iloc[df_full['all_tags'].values=="nan/other/other/other"].head(2)
c=df_full['ques_title'].apply(lambda x: len(str(x).split()))
d=df_full['ques_body'].apply(lambda x: len(str(x).split()))
c.mean(),c.max(),c.min(),d.mean(),d.max(),d.min()
df_full['text']=df_full['ques_title']+df_full['ques_body']
df_full.drop(['ques_title','ques_body'],inplace=True,axis=1)
df_full = df_full[df_full.maintag.notnull()]
df_full.shape
label_encoder = LabelEncoder()
df_full['maintag']= label_encoder.fit_transform(df_full['maintag'].astype(str))
df_full['maintag'].unique()
label_encoder.classes_
le_name_mapping = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))
le_name_mapping
df_full.head(2)
score_col = [col for col in df_full if col.startswith('ques_score_')]
```
# Applying tfidf
```
def tf_idf(df,flag,tfidf_text):
if (flag=='train'):
xyz = tfidf_text.fit_transform(df['text'].values.astype('U')).toarray().tolist()
else:
xyz = tfidf_text.transform(df['text'].values.astype('U')).toarray().tolist()
return xyz
```
# Taking 5k samples from every class present in original dataframe
```
sample_size=5000
df_equal_samples=pd.DataFrame(df_full.groupby('maintag').apply(lambda x: x.sample(sample_size)))
equal_train, equal_val_test = train_test_split(df_equal_samples, test_size=0.2)
equal_val,equal_test=train_test_split(equal_val_test, test_size=0.5)
df_equal_samples.shape,equal_train.shape,equal_val.shape,equal_test.shape
equal_x_train = equal_train.drop(['all_tags','maintag'],axis=1)
equal_x_val = equal_val.drop(['all_tags','maintag'],axis=1)
equal_x_test = equal_test.drop(['all_tags','maintag'],axis=1)
equal_y_train=equal_train['maintag']
equal_y_val=equal_val['maintag']
equal_y_test=equal_test['maintag']
equal_x_train.shape,equal_y_train.shape,equal_x_val.shape,equal_y_val.shape,equal_x_test.shape,equal_y_test.shape
equal_x_train.columns
equal_tfidf_text = TfidfVectorizer(lowercase=True,ngram_range=(1,3),max_features=300) # max_df=0.9, min_df=0.1
equal_train_tfidf = pd.DataFrame(tf_idf(equal_x_train,'train',equal_tfidf_text ))
equal_val_tfidf= pd.DataFrame(tf_idf(equal_x_val,'val',equal_tfidf_text ))
equal_test_tfidf= pd.DataFrame(tf_idf(equal_x_test,'test',equal_tfidf_text ))
equal_train_features = pd.DataFrame(np.hstack([equal_train_tfidf, equal_x_train[score_col]]))
equal_val_features= pd.DataFrame(np.hstack([equal_val_tfidf, equal_x_val[score_col]]))
equal_test_features= pd.DataFrame(np.hstack([equal_test_tfidf, equal_x_test[score_col]]))
equal_tfidf_text.vocabulary_,len(equal_tfidf_text.vocabulary_)
print(equal_train_tfidf.shape,equal_val_tfidf.shape,equal_test_tfidf.shape)
print(equal_train_features.shape,equal_val_features.shape,equal_test_features.shape)
equal_text_feature_names = np.array(equal_tfidf_text.get_feature_names())
```
# Applying random Forest
```
model_1 = RandomForestClassifier(n_estimators=200, oob_score='TRUE', n_jobs=-1, random_state=50, max_features="auto",min_samples_leaf=1)
model_1.fit(equal_train_features, equal_y_train)
equal_y_pred = model_1.predict(equal_test_features)
print("accuracy of Random Forest:",accuracy_score(equal_y_pred,equal_y_test))
conf_mat = confusion_matrix(equal_y_test, equal_y_pred)
print(conf_mat)
report=classification_report(equal_y_test, equal_y_pred)
print(report)
```
# Taking Random 100k samples from original dataframe thus having stratified split.
```
df=df_full.sample(n=100000)
df['maintag'].value_counts()
df.to_csv("sample_data.csv")
train, val_test = train_test_split(df, test_size=0.2)
val,test=train_test_split(val_test, test_size=0.5)
df.shape,train.shape,val.shape,test.shape
df.columns
x_train = train.drop(['all_tags','maintag'],axis=1)
x_val = val.drop(['all_tags','maintag'],axis=1)
x_test = test.drop(['all_tags','maintag'],axis=1)
y_train=train['maintag']
y_val=val['maintag']
y_test=test['maintag']
x_train.shape,y_train.shape,x_val.shape,y_val.shape,x_test.shape,y_test.shape
tfidf_text = TfidfVectorizer(lowercase=True,ngram_range=(1,3),max_features=500) # max_df=0.9, min_df=0.1
train_tfidf = pd.DataFrame(tf_idf(x_train,'train',tfidf_text))
val_tfidf= pd.DataFrame(tf_idf(x_val,'val',tfidf_text))
test_tfidf= pd.DataFrame(tf_idf(x_test,'test',tfidf_text))
train_features = pd.DataFrame(np.hstack([train_tfidf, x_train[score_col]]))
val_features= pd.DataFrame(np.hstack([val_tfidf, x_val[score_col]]))
test_features= pd.DataFrame(np.hstack([test_tfidf, x_test[score_col]]))
len(tfidf_text.vocabulary_)
print(train_tfidf.shape,val_tfidf.shape,test_tfidf.shape)
print(train_features.shape,val_features.shape,test_features.shape)
text_feature_names = np.array(tfidf_text.get_feature_names())
model_2 = RandomForestClassifier(n_estimators=100, oob_score='TRUE', n_jobs=-1, random_state=50, max_features="auto",min_samples_leaf=1)
model_2.fit(train_features, y_train)
y_pred = model_2.predict(test_features)
print("accuracy of Random Forest:",accuracy_score(y_pred,y_test))
conf_mat = confusion_matrix(y_test, y_pred)
print(conf_mat)
report=classification_report(y_test, y_pred)
print(report)
importances = model_2.feature_importances_
indices = np.argsort(importances)
plt.figure(1)
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), train_features[indices])
plt.xlabel('Relative Importance')
importances={k: v for v, k in enumerate(importances)}
importances = dict([(value, key) for key, value in importances.items()])
text_dictn=tfidf_text.vocabulary_
feature_dict=dict([(value, key) for key, value in text_dictn.items()])
def mergeDict(dict1, dict2):
''' Merge dictionaries and keep values of common keys in list'''
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
dict3[key] = [value , dict1[key]]
return dict3
final_dict=mergeDict(importances, feature_dict)
#final_dict
rem_list = [500, 501, 502]
res = dict([(key, val) for key, val in
final_dict.items() if key not in rem_list])
listofTuples = sorted(res.items() , reverse=True, key=lambda x: x[1][1])
listofTuples[0:10]
with open("feature_importance.txt", "wb") as fp: #Pickling
pickle.dump(listofTuples, fp)
pickle.dump(model_2, open('RF_model', 'wb'))
```
# Applying Xgboost on the dataset.
```
eval_set = [(train_features, y_train),(val_features, y_val)]
model_3 = XGBClassifier(learning_rate =0.1,n_estimators=100, max_depth=3,nthread=4,seed=27,objective="multi:softmax",num_class=20)
# n_estimators = range(50, 400, 50)
# param_grid = dict(n_estimators=n_estimators)
# kfold = StratifiedKFold(n_splits scoring="neg_log_loss", n_jobs=-1, cv=kfold)
# result = grid_search.fit(X, label_encoded_y)
model_3.fit(train_features,y_train, eval_set=eval_set)
y_pred = model_3.predict(test_features)
print(classification_report(y_test, y_pred, target_names=label_encoder.classes_))
print(confusion_matrix(y_test, y_pred))
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import sys
print(sys.path)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.metrics import f1_score, accuracy_score , recall_score , precision_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score, GridSearchCV
from sklearn.preprocessing import LabelEncoder
from numpy import argmax
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedShuffleSplit
from collections import Counter
from sklearn.metrics import confusion_matrix
from xgboost import XGBClassifier
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from xgboost import XGBClassifier
import pickle
df_full=pd.read_csv("data_stackOverflow/final_dataframe_with_1tag_onehot.csv",index_col=0)
df_full.shape
label_col = [col for col in df_full if col.startswith('tag_')]
df_full.drop(label_col,inplace=True,axis=1)
df_full.head(2)
df_full["maintag"].value_counts()
df_full.iloc[df_full['all_tags'].values=="nan/other/other/other"].head(2)
c=df_full['ques_title'].apply(lambda x: len(str(x).split()))
d=df_full['ques_body'].apply(lambda x: len(str(x).split()))
c.mean(),c.max(),c.min(),d.mean(),d.max(),d.min()
df_full['text']=df_full['ques_title']+df_full['ques_body']
df_full.drop(['ques_title','ques_body'],inplace=True,axis=1)
df_full = df_full[df_full.maintag.notnull()]
df_full.shape
label_encoder = LabelEncoder()
df_full['maintag']= label_encoder.fit_transform(df_full['maintag'].astype(str))
df_full['maintag'].unique()
label_encoder.classes_
le_name_mapping = dict(zip(label_encoder.classes_, label_encoder.transform(label_encoder.classes_)))
le_name_mapping
df_full.head(2)
score_col = [col for col in df_full if col.startswith('ques_score_')]
def tf_idf(df,flag,tfidf_text):
if (flag=='train'):
xyz = tfidf_text.fit_transform(df['text'].values.astype('U')).toarray().tolist()
else:
xyz = tfidf_text.transform(df['text'].values.astype('U')).toarray().tolist()
return xyz
sample_size=5000
df_equal_samples=pd.DataFrame(df_full.groupby('maintag').apply(lambda x: x.sample(sample_size)))
equal_train, equal_val_test = train_test_split(df_equal_samples, test_size=0.2)
equal_val,equal_test=train_test_split(equal_val_test, test_size=0.5)
df_equal_samples.shape,equal_train.shape,equal_val.shape,equal_test.shape
equal_x_train = equal_train.drop(['all_tags','maintag'],axis=1)
equal_x_val = equal_val.drop(['all_tags','maintag'],axis=1)
equal_x_test = equal_test.drop(['all_tags','maintag'],axis=1)
equal_y_train=equal_train['maintag']
equal_y_val=equal_val['maintag']
equal_y_test=equal_test['maintag']
equal_x_train.shape,equal_y_train.shape,equal_x_val.shape,equal_y_val.shape,equal_x_test.shape,equal_y_test.shape
equal_x_train.columns
equal_tfidf_text = TfidfVectorizer(lowercase=True,ngram_range=(1,3),max_features=300) # max_df=0.9, min_df=0.1
equal_train_tfidf = pd.DataFrame(tf_idf(equal_x_train,'train',equal_tfidf_text ))
equal_val_tfidf= pd.DataFrame(tf_idf(equal_x_val,'val',equal_tfidf_text ))
equal_test_tfidf= pd.DataFrame(tf_idf(equal_x_test,'test',equal_tfidf_text ))
equal_train_features = pd.DataFrame(np.hstack([equal_train_tfidf, equal_x_train[score_col]]))
equal_val_features= pd.DataFrame(np.hstack([equal_val_tfidf, equal_x_val[score_col]]))
equal_test_features= pd.DataFrame(np.hstack([equal_test_tfidf, equal_x_test[score_col]]))
equal_tfidf_text.vocabulary_,len(equal_tfidf_text.vocabulary_)
print(equal_train_tfidf.shape,equal_val_tfidf.shape,equal_test_tfidf.shape)
print(equal_train_features.shape,equal_val_features.shape,equal_test_features.shape)
equal_text_feature_names = np.array(equal_tfidf_text.get_feature_names())
model_1 = RandomForestClassifier(n_estimators=200, oob_score='TRUE', n_jobs=-1, random_state=50, max_features="auto",min_samples_leaf=1)
model_1.fit(equal_train_features, equal_y_train)
equal_y_pred = model_1.predict(equal_test_features)
print("accuracy of Random Forest:",accuracy_score(equal_y_pred,equal_y_test))
conf_mat = confusion_matrix(equal_y_test, equal_y_pred)
print(conf_mat)
report=classification_report(equal_y_test, equal_y_pred)
print(report)
df=df_full.sample(n=100000)
df['maintag'].value_counts()
df.to_csv("sample_data.csv")
train, val_test = train_test_split(df, test_size=0.2)
val,test=train_test_split(val_test, test_size=0.5)
df.shape,train.shape,val.shape,test.shape
df.columns
x_train = train.drop(['all_tags','maintag'],axis=1)
x_val = val.drop(['all_tags','maintag'],axis=1)
x_test = test.drop(['all_tags','maintag'],axis=1)
y_train=train['maintag']
y_val=val['maintag']
y_test=test['maintag']
x_train.shape,y_train.shape,x_val.shape,y_val.shape,x_test.shape,y_test.shape
tfidf_text = TfidfVectorizer(lowercase=True,ngram_range=(1,3),max_features=500) # max_df=0.9, min_df=0.1
train_tfidf = pd.DataFrame(tf_idf(x_train,'train',tfidf_text))
val_tfidf= pd.DataFrame(tf_idf(x_val,'val',tfidf_text))
test_tfidf= pd.DataFrame(tf_idf(x_test,'test',tfidf_text))
train_features = pd.DataFrame(np.hstack([train_tfidf, x_train[score_col]]))
val_features= pd.DataFrame(np.hstack([val_tfidf, x_val[score_col]]))
test_features= pd.DataFrame(np.hstack([test_tfidf, x_test[score_col]]))
len(tfidf_text.vocabulary_)
print(train_tfidf.shape,val_tfidf.shape,test_tfidf.shape)
print(train_features.shape,val_features.shape,test_features.shape)
text_feature_names = np.array(tfidf_text.get_feature_names())
model_2 = RandomForestClassifier(n_estimators=100, oob_score='TRUE', n_jobs=-1, random_state=50, max_features="auto",min_samples_leaf=1)
model_2.fit(train_features, y_train)
y_pred = model_2.predict(test_features)
print("accuracy of Random Forest:",accuracy_score(y_pred,y_test))
conf_mat = confusion_matrix(y_test, y_pred)
print(conf_mat)
report=classification_report(y_test, y_pred)
print(report)
importances = model_2.feature_importances_
indices = np.argsort(importances)
plt.figure(1)
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), train_features[indices])
plt.xlabel('Relative Importance')
importances={k: v for v, k in enumerate(importances)}
importances = dict([(value, key) for key, value in importances.items()])
text_dictn=tfidf_text.vocabulary_
feature_dict=dict([(value, key) for key, value in text_dictn.items()])
def mergeDict(dict1, dict2):
''' Merge dictionaries and keep values of common keys in list'''
dict3 = {**dict1, **dict2}
for key, value in dict3.items():
if key in dict1 and key in dict2:
dict3[key] = [value , dict1[key]]
return dict3
final_dict=mergeDict(importances, feature_dict)
#final_dict
rem_list = [500, 501, 502]
res = dict([(key, val) for key, val in
final_dict.items() if key not in rem_list])
listofTuples = sorted(res.items() , reverse=True, key=lambda x: x[1][1])
listofTuples[0:10]
with open("feature_importance.txt", "wb") as fp: #Pickling
pickle.dump(listofTuples, fp)
pickle.dump(model_2, open('RF_model', 'wb'))
eval_set = [(train_features, y_train),(val_features, y_val)]
model_3 = XGBClassifier(learning_rate =0.1,n_estimators=100, max_depth=3,nthread=4,seed=27,objective="multi:softmax",num_class=20)
# n_estimators = range(50, 400, 50)
# param_grid = dict(n_estimators=n_estimators)
# kfold = StratifiedKFold(n_splits scoring="neg_log_loss", n_jobs=-1, cv=kfold)
# result = grid_search.fit(X, label_encoded_y)
model_3.fit(train_features,y_train, eval_set=eval_set)
y_pred = model_3.predict(test_features)
print(classification_report(y_test, y_pred, target_names=label_encoder.classes_))
print(confusion_matrix(y_test, y_pred))
| 0.30715 | 0.523542 |
### Spark MLLib - Decision Tree
**Description**
- Easy to understand and explain.
- Predictor variables are used to build a tree that progressively predicts target values.
- Training data is used to build the decision tree and predict the target value.
- The decision tree becomes a model that is used to make predictions with new data.
**Pros:** Easy to understand and explain, works with missing values and is fast.
**Cons:** Limited accuracy, Bias can occur frequently and does not work well with many predictor variables.
**Application:** Credit approval, preliminary categorization.
### Classifying Iris Dataset Flower Species
```
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import StringIndexer
from pyspark.ml.linalg import Vectors
from pyspark.sql import Row
spSession = SparkSession.builder.master('local').appName('IrisPrediction').getOrCreate()
rddIris01 = sc.textFile('aux/datasets/iris.csv')
```
**We can cache the RDD to optimize performance.**
```
rddIris01.cache()
rddIris01.count()
rddIris01.take(5)
header = rddIris01.first()
rddIris02 = rddIris01.filter(lambda row: row != header)
rddIris02.count()
```
### Data Cleaning
```
def dataCleaning(strRow):
listAttr = strRow.split(',')
row = Row(
SEPAL_LENGTH = float(listAttr[0]),
SEPAL_WIDTH = float(listAttr[1]),
PETAL_LENGTH = float(listAttr[2]),
PETAL_WIDTH = float(listAttr[3]),
SPECIE = listAttr[4]
)
return row
rddIris03 = rddIris02.map(dataCleaning)
rddIris03.take(5)
```
**Converting the RDD to a DataFrame**
```
dfIris = spSession.createDataFrame(rddIris03)
dfIris.cache()
dfIris.take(5)
```
**Creating a numeric index for the label target column**
```
stringIndexer = StringIndexer(inputCol = 'SPECIE', outputCol = 'IDX_SPECIE')
stringIndexerModel = stringIndexer.fit(dfIris)
dfIris = stringIndexerModel.transform(dfIris)
dfIris.select('SPECIE', 'IDX_SPECIE').distinct().collect()
```
### Exploratory Data Analysis
```
dfIris.describe().show()
for column in dfIris.columns:
if not(isinstance(dfIris.select(column).take(1)[0][0], str)):
print(f"IDX_SPECIE correlation with {column}: {dfIris.stat.corr('IDX_SPECIE', column)}")
```
### Data Pre-Processing
**Creating a LabeledPoint (target, Vector[features])**<br />
It removes not relevant columns to the model (or with low correlation)
```
def setLabeledPoint(row):
labeledPoint = (
row['SPECIE'],
row['IDX_SPECIE'],
Vectors.dense([
row['SEPAL_LENGTH'],
row['SEPAL_WIDTH'],
row['PETAL_LENGTH'],
row['PETAL_WIDTH']
])
)
return labeledPoint
rddIris04 = dfIris.rdd.map(setLabeledPoint)
rddIris04.take(5)
dfIris = spSession.createDataFrame(rddIris04, ['specie', 'label', 'features'])
dfIris.select('specie', 'label', 'features').show(5)
```
### Machine Learning
```
(dataTraining, dataTest) = dfIris.randomSplit([.7, .3])
dataTraining.count()
dataTest.count()
dataTraining.count() + dataTest.count() == dfIris.count()
decisionTreeClassifier = DecisionTreeClassifier(maxDepth = 2, labelCol = 'label', featuresCol = 'features')
model = decisionTreeClassifier.fit(dataTraining)
model
print(f'Nodes number: {str(model.numNodes)}')
print(f'Depth: {str(model.depth)}')
predictions = model.transform(dataTest)
predictions.select('specie', 'features', 'prediction').show(5)
evaluator = MulticlassClassificationEvaluator(
predictionCol = 'prediction',
labelCol = 'label',
metricName = 'accuracy')
evaluator.evaluate(predictions)
```
**Confusion Matrix - Summing Up Predictions**
```
predictions.groupBy('label', 'prediction').count().show()
```
|
github_jupyter
|
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.feature import StringIndexer
from pyspark.ml.linalg import Vectors
from pyspark.sql import Row
spSession = SparkSession.builder.master('local').appName('IrisPrediction').getOrCreate()
rddIris01 = sc.textFile('aux/datasets/iris.csv')
rddIris01.cache()
rddIris01.count()
rddIris01.take(5)
header = rddIris01.first()
rddIris02 = rddIris01.filter(lambda row: row != header)
rddIris02.count()
def dataCleaning(strRow):
listAttr = strRow.split(',')
row = Row(
SEPAL_LENGTH = float(listAttr[0]),
SEPAL_WIDTH = float(listAttr[1]),
PETAL_LENGTH = float(listAttr[2]),
PETAL_WIDTH = float(listAttr[3]),
SPECIE = listAttr[4]
)
return row
rddIris03 = rddIris02.map(dataCleaning)
rddIris03.take(5)
dfIris = spSession.createDataFrame(rddIris03)
dfIris.cache()
dfIris.take(5)
stringIndexer = StringIndexer(inputCol = 'SPECIE', outputCol = 'IDX_SPECIE')
stringIndexerModel = stringIndexer.fit(dfIris)
dfIris = stringIndexerModel.transform(dfIris)
dfIris.select('SPECIE', 'IDX_SPECIE').distinct().collect()
dfIris.describe().show()
for column in dfIris.columns:
if not(isinstance(dfIris.select(column).take(1)[0][0], str)):
print(f"IDX_SPECIE correlation with {column}: {dfIris.stat.corr('IDX_SPECIE', column)}")
def setLabeledPoint(row):
labeledPoint = (
row['SPECIE'],
row['IDX_SPECIE'],
Vectors.dense([
row['SEPAL_LENGTH'],
row['SEPAL_WIDTH'],
row['PETAL_LENGTH'],
row['PETAL_WIDTH']
])
)
return labeledPoint
rddIris04 = dfIris.rdd.map(setLabeledPoint)
rddIris04.take(5)
dfIris = spSession.createDataFrame(rddIris04, ['specie', 'label', 'features'])
dfIris.select('specie', 'label', 'features').show(5)
(dataTraining, dataTest) = dfIris.randomSplit([.7, .3])
dataTraining.count()
dataTest.count()
dataTraining.count() + dataTest.count() == dfIris.count()
decisionTreeClassifier = DecisionTreeClassifier(maxDepth = 2, labelCol = 'label', featuresCol = 'features')
model = decisionTreeClassifier.fit(dataTraining)
model
print(f'Nodes number: {str(model.numNodes)}')
print(f'Depth: {str(model.depth)}')
predictions = model.transform(dataTest)
predictions.select('specie', 'features', 'prediction').show(5)
evaluator = MulticlassClassificationEvaluator(
predictionCol = 'prediction',
labelCol = 'label',
metricName = 'accuracy')
evaluator.evaluate(predictions)
predictions.groupBy('label', 'prediction').count().show()
| 0.589953 | 0.978073 |
<a href="https://colab.research.google.com/github/Jaydenzk/DS-repo/blob/master/Post_here_subreddit.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import requests
import math
PAGE_COUNT = 5
RANDOM_STATE = 0
TEST_SIZE = 0.25
def get_reddit_data(page_count):
print('fetching your data, * = 1 request to the Reddit API')
headers = {'User-Agent': 'Predicting Reddit Post Metadata'}
posts = []
top_subreddits = [ 'r/funny', 'r/gaming', 'r/pics',
'r/aww', 'r/science', 'r/worldnews', 'r/Music',
'r/movies', 'r/todayilearned', 'r/videos'
]
for i in range(len(top_subreddits)):
after = ''
for j in range(page_count):
print('*', end='')
url = 'https://www.reddit.com/'+ top_subreddits[i] + '/top.json?t=all&after=' + after
#print(url)
response = requests.get(url, headers=headers)
for k in range(len(response.json()['data']['children'])):
post = {}
post['created_utc'] = int(response.json()['data']['children'][k]['data']['created_utc'])
post['is_video'] = int(response.json()['data']['children'][k]['data']['is_video'])
post['subreddit'] = response.json()['data']['children'][k]['data']['subreddit']
post['title'] = response.json()['data']['children'][k]['data']['title']
post['total_awards_received'] = response.json()['data']['children'][k]['data']['total_awards_received']
post['ups'] = response.json()['data']['children'][k]['data']['ups']
posts.append(post)
after = response.json()['data']['after']
return posts
columns = ['created_utc', 'is_video', 'subreddit', 'title', 'total_awards_received', 'ups']
df = pd.DataFrame(get_reddit_data(PAGE_COUNT), columns=columns)
df = df.sample(frac=1, random_state=RANDOM_STATE)
print('df.Describe():\n', df.describe(), '\n')
print(df.info(), '\n')
print('df.head(10):\n', df.head(10), '\n')
print('SUM OF NA VALUES:\n', df.isna().sum(), '\n')
df.head(25)
from scipy.stats import zscore
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import make_pipeline
X = df['title'].values
y = df['subreddit'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=0)
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
accuracy = (y_predict == y_test).mean()
baseline = pd.Series(y_test).value_counts()[0] / pd.Series(y_test).value_counts().sum()
prediction_count = pd.Series(y_test).value_counts().sum()
print('VC of y_train')
print(pd.Series(y_train).value_counts()[:10], '\n')
print('VC of y_test')
print(pd.Series(y_test).value_counts()[:10], '\n')
print('VC of y_predict')
print(pd.Series(y_predict).value_counts()[:10], '\n')
print('# PRDCTN: ', prediction_count)
print('BASELINE: ', baseline)
print('ACCURACY: ', accuracy)
print(len(y_test), len(y_predict))
train_predict_report(df[0:10000])
import pickle
pickle.dump(model, open( "model.pkl", "wb" ) )
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import requests
import math
PAGE_COUNT = 5
RANDOM_STATE = 0
TEST_SIZE = 0.25
def get_reddit_data(page_count):
print('fetching your data, * = 1 request to the Reddit API')
headers = {'User-Agent': 'Predicting Reddit Post Metadata'}
posts = []
top_subreddits = [ 'r/funny', 'r/gaming', 'r/pics',
'r/aww', 'r/science', 'r/worldnews', 'r/Music',
'r/movies', 'r/todayilearned', 'r/videos'
]
for i in range(len(top_subreddits)):
after = ''
for j in range(page_count):
print('*', end='')
url = 'https://www.reddit.com/'+ top_subreddits[i] + '/top.json?t=all&after=' + after
#print(url)
response = requests.get(url, headers=headers)
for k in range(len(response.json()['data']['children'])):
post = {}
post['created_utc'] = int(response.json()['data']['children'][k]['data']['created_utc'])
post['is_video'] = int(response.json()['data']['children'][k]['data']['is_video'])
post['subreddit'] = response.json()['data']['children'][k]['data']['subreddit']
post['title'] = response.json()['data']['children'][k]['data']['title']
post['total_awards_received'] = response.json()['data']['children'][k]['data']['total_awards_received']
post['ups'] = response.json()['data']['children'][k]['data']['ups']
posts.append(post)
after = response.json()['data']['after']
return posts
columns = ['created_utc', 'is_video', 'subreddit', 'title', 'total_awards_received', 'ups']
df = pd.DataFrame(get_reddit_data(PAGE_COUNT), columns=columns)
df = df.sample(frac=1, random_state=RANDOM_STATE)
print('df.Describe():\n', df.describe(), '\n')
print(df.info(), '\n')
print('df.head(10):\n', df.head(10), '\n')
print('SUM OF NA VALUES:\n', df.isna().sum(), '\n')
df.head(25)
from scipy.stats import zscore
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix
from sklearn.pipeline import make_pipeline
X = df['title'].values
y = df['subreddit'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SIZE, random_state=0)
model = make_pipeline(TfidfVectorizer(), MultinomialNB())
model.fit(X_train, y_train)
y_predict = model.predict(X_test)
accuracy = (y_predict == y_test).mean()
baseline = pd.Series(y_test).value_counts()[0] / pd.Series(y_test).value_counts().sum()
prediction_count = pd.Series(y_test).value_counts().sum()
print('VC of y_train')
print(pd.Series(y_train).value_counts()[:10], '\n')
print('VC of y_test')
print(pd.Series(y_test).value_counts()[:10], '\n')
print('VC of y_predict')
print(pd.Series(y_predict).value_counts()[:10], '\n')
print('# PRDCTN: ', prediction_count)
print('BASELINE: ', baseline)
print('ACCURACY: ', accuracy)
print(len(y_test), len(y_predict))
train_predict_report(df[0:10000])
import pickle
pickle.dump(model, open( "model.pkl", "wb" ) )
| 0.258607 | 0.752081 |
# Unfolded SDK Machine Learning Demo (LA Bike Share)
[![open_in_colab][colab_badge]][colab_notebook_link]
[![open_in_binder][binder_badge]][binder_notebook_link]
[colab_badge]: https://colab.research.google.com/assets/colab-badge.svg
[colab_notebook_link]: https://colab.research.google.com/github/UnfoldedInc/examples/blob/master/notebooks/12%20-%20Pytorch%20Trip%20Duration.ipynb
[binder_badge]: https://mybinder.org/badge_logo.svg
[binder_notebook_link]: https://mybinder.org/v2/gh/UnfoldedInc/examples/master?urlpath=lab/tree/notebooks/12%20-%20Pytorch%20Trip%20Duration.ipynb
For this demo [data](https://www.kaggle.com/cityofLA/los-angeles-metro-bike-share-trip-data) was taken from Kaggle. It contains information about bike share trips throughout a year in Los Angeles. The task is to predict bike trip duration as it may be useful to know when a bike will be returned. Prediction is made using starting share point location, time and some other info.
## Dependencies
This notebook uses the following dependencies:
- `xarray`
- `dask`
- `netCDF4`
- `bottleneck`
- `tqdm`
- `dask-ml`
- `pandas`
- `seaborn`
- `matplotlib`
- `scikit-learn`
- `category_encoders`
- `torch`
- `missingno`
- `unfolded.map-sdk`
- `unfolded.data-sdk`
If running this notebook in Binder, these dependencies should already be installed. If running in Colab, the next cell will install these dependencies. In another environment, you'll need to make sure these dependencies are available by running the following `pip` command in a shell.
```bash
pip install xarray dask netCDF4 bottleneck tqdm dask-ml pandas seaborn matplotlib scikit-learn category_encoders torch missingno unfolded.map-sdk unfolded.data-sdk
```
This notebook was originally tested with the following package versions, but likely works with a broad range of versions:
- xarray==0.19.0
- dask==2021.09.1
- netCDF4==1.5.7
- bottleneck==1.3.2
- tqdm==4.62.3
- dask-ml==1.9.0
- pandas==1.3.3
- seaborn==0.11.2
- matplotlib==3.4.3
- scikit-learn==1.0
- category_encoders==2.2.2
- torch==1.9.1
- missingno==0.5.0
- unfolded.map-sdk==0.5.0
- unfolded.data-sdk==0.5.0
```
# If in Colab, install this notebook's required dependencies
import sys
if "google.colab" in sys.modules:
!pip install 'unfolded.map_sdk>=0.6.0' xarray dask netCDF4 bottleneck tqdm dask-ml pandas seaborn matplotlib scikit-learn category_encoders torch missingno unfolded.data-sdk
```
## Imports
```
import time
import math
import random
import os
import requests
import pandas as pd
import xarray as xr
import seaborn as sns
import numpy as np
from tqdm import tqdm
import category_encoders as ce
import missingno as msno
from uuid import uuid4
import torch
from torch import nn
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error
from unfolded.map_sdk import UnfoldedMap
```
## Load Dataset
Load <code>.nc</code> dataset
```
def load_data(url, path):
if os.path.isfile(path):
pass
else:
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1048576 # 1 Megabyte
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(path, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
dataset_url = "https://actionengine-public.s3.us-east-2.amazonaws.com/metro-bike-share-trip-data.nc"
dataset_path = "metro-bike-share-trip-data-downloaded.nc"
load_data(dataset_url, dataset_path)
```
## Data Clean-up
Open dataset and convert it to dask format
```
ds = xr.open_dataset("metro-bike-share-trip-data-downloaded.nc", chunks={"Start Time": 135000})
ddf = ds.to_dask_dataframe(set_index=True)
ddf.head()
print("Dataset has ", ddf.shape[0].compute(), " rows")
```
Look at how many NaNs are in columns
```
msno.bar(ddf.compute())
```
Delete raws without necessary information: starting or ending point latutude and longitude, start time, etc
```
ddf = ddf.dropna(
subset=[
"Starting Station ID",
"Starting Station Latitude",
"Starting Station Longitude",
"Ending Station Latitude",
"Ending Station Longitude",
"Start Time",
"Precinct Boundaries",
"Census Tracts",
"Plan Duration",
]
)
```
Delete rows with 0 in <b>Starting Station Latitude</b> column and fill all NaNs with zeroes.
```
ddf = ddf[(ddf["Starting Station Latitude"] != 0)]
ddf = ddf.fillna(0)
print("Dataset has ", ddf.shape[0].compute(), " rows")
```
### Trip Duration Histogram
```
hist = sns.histplot(data=ddf["Duration"], bins=100)
hist.set(xlabel="Duration")
hist.set_title("Trip duration")
plt.show()
```
Remove outliers from <b>Duration</b> column. The IQR (Interquartile Range) method is used. The lower border is Q1-1.5\*IQR and the upper border is Q3+1.5\*IQR <br>
[To read more](https://en.wikipedia.org/wiki/Interquartile_range)
```
def outlier_borders(data):
q25, q75 = np.percentile(data, 25), np.percentile(data, 75)
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = q25 - cut_off, q75 + cut_off
return lower, upper
lower, upper = outlier_borders(ddf["Duration"])
no_outliers = [x for x in ddf["Duration"] if x > lower and x < upper]
ddf = ddf[(ddf["Duration"] < upper)]
hist = sns.histplot(data=no_outliers, bins=20)
hist.set(xlabel="Duration")
hist.set_title("Trip duration with no outliers")
plt.show()
```
Manually remove two outlier records (to not remove useful dots)
```
ddf = ddf[(ddf["Starting Station Longitude"] > -118.3)]
print("Dataset has ", ddf.shape[0].compute(), " rows")
```
## Visualization
### Starting Points Map
Points on a map represent starting stations location. The lighter a point, the more trips started from it.
```
stations_map = UnfoldedMap()
stations_map
# TODO: make this plot open in 3D automatically and add height to hexagons
ddf["Starting Station Latitude"] = ddf["Starting Station Latitude"].compute().round(6)
ddf["Starting Station Longitude"] = ddf["Starting Station Longitude"].compute().round(6)
ddf["Ending Station Latitude"] = ddf["Ending Station Latitude"].compute().round(6)
ddf["Ending Station Longitude"] = ddf["Ending Station Longitude"].compute().round(6)
start_stations = ddf[
["Starting Station Latitude", "Starting Station Longitude"]
].compute()
start_stations = start_stations.groupby(
["Starting Station Latitude", "Starting Station Longitude"]
).size()
start_stations = pd.DataFrame(start_stations.reset_index())
start_stations.rename(columns={0: "count"}, inplace=True)
stations_dataset_id = uuid4()
stations_map.add_dataset(
{"uuid": stations_dataset_id, "label": "Stations dataset", "data": start_stations},
auto_create_layers=False,
)
stations_map.add_layer(
{
"id": "Starting points",
"type": "hexagon",
"config": {
"label": "Starting points",
"data_id": stations_dataset_id,
"columns": {
"lat": "Starting Station Latitude",
"lng": "Starting Station Longitude",
},
"is_visible": True,
"color_scale": "quantize",
"color_field": {"name": "count", "type": "int"},
"visConfig": {"worldUnitSize": 0.1,},
"visualChannels": {
"colorScale": "quantile",
"sizeScale": "linear"
}
},
}
)
stations_map.set_view_state({"longitude": -118.25, "latitude": 34.04, "zoom": 12})
```
### Routes Map
Arcs on a map connect starting and ending point of a trip. The lighter an arc, the more trips are between points it connects.
```
routes_map = UnfoldedMap()
routes_map
routes = ddf[
[
"Starting Station Latitude",
"Starting Station Longitude",
"Ending Station Latitude",
"Ending Station Longitude",
]
].compute()
routes = routes.groupby(
[
"Starting Station Latitude",
"Starting Station Longitude",
"Ending Station Latitude",
"Ending Station Longitude",
]
).size()
routes = pd.DataFrame(routes.reset_index())
routes.rename(columns={0: "count"}, inplace=True)
routes_dataset_id = uuid4()
routes_map.add_dataset(
{"uuid": routes_dataset_id, "label": "Routes dataset", "data": routes},
auto_create_layers=False,
)
routes_map.add_layer(
{
"id": "Routes",
"type": "arc",
"config": {
"label": "Routes",
"data_id": routes_dataset_id,
"columns": {
"lat0": "Starting Station Latitude",
"lng0": "Starting Station Longitude",
"lat1": "Ending Station Latitude",
"lng1": "Ending Station Longitude",
},
"is_visible": True,
"visConfig": {"opacity": 0.8, "thickness": 0.3},
"color_scale": "quantile",
"color_field": {"name": "count", "type": "int"},
"vis_config": {
"opacity": 0.8,
"thickness": 0.3,
"colorRange": {
"colors": [
"#5A1846",
"#900C3F",
"#C70039",
"#E3611C",
"#F1920E",
"#FFC300",
],
},
"target_color": "count",
},
},
}
)
routes_map.set_view_state({"longitude": -118.25, "latitude": 34.04, "zoom": 12})
```
### Visualization of Category Data
```
trip_category_stats = ddf["Trip Route Category"].value_counts().compute()
colors = sns.color_palette("pastel")[0:5]
plt.title("Trip Route Category")
plt.pie(
trip_category_stats.values,
labels=trip_category_stats.index,
colors=colors,
autopct="%.0f%%",
)
plt.show()
plan_category_stats = ddf["Passholder Type"].value_counts().compute()
colors = sns.color_palette("pastel")[0:5]
plt.title("Passholder Type")
plt.pie(
plan_category_stats.values,
labels=plan_category_stats.index,
colors=colors,
autopct="%.0f%%",
)
plt.show()
```
As shown in figures above, most people prefer to take one way trips and get a monthly pass.
## Preprocessing
### Preprocess Datetime Features
All data has to be turned into numerical form, so datetime feature column <b>Start Time</b> is decomposed into several feature columns (<b>Year</b>, <b>Month</b>, etc)
```
ddf["Start Time"] = pd.to_datetime(ddf["Start Time"].compute())
def extract_time_features(timestamp):
return [
timestamp.year,
timestamp.month,
timestamp.day,
timestamp.hour,
timestamp.minute,
]
ddf_to_concat = ddf.compute().reset_index(drop=True)
time_features_ddf = ddf_to_concat["Start Time"].apply(extract_time_features)
time_features_df = pd.DataFrame(
time_features_ddf.tolist(), columns=["Year", "Month", "Day", "Hour", "Minute"]
)
df_with_time_features = pd.concat([ddf_to_concat, time_features_df], axis=1)
df_with_time_features = df_with_time_features.drop(columns=["Start Time"])
df_with_time_features
```
### Split dataset on Train, Validation and Test Parts
```
y = df_with_time_features["Duration"]
X = df_with_time_features.drop(
columns=["Duration", "Ending Station Latitude", "Ending Station Longitude"]
)
X_cut, X_test, y_cut, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_cut, y_cut, test_size=0.1, random_state=42
)
X_train = X_train.reset_index(drop=True)
X_val = X_val.reset_index(drop=True)
X_test = X_test.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
y_val = y_val.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
```
### Apply Count Encoder to Categorical Features
As already mentioned, all data has to be turned into numerical form, including categorical variables. To do this, count encoding method was chosen. It replaces each categorical value in a column with the number proportional to the number of times this value appears in this column. <br>
[To read more](http://contrib.scikit-learn.org/category_encoders/count.html)
```
columns_for_count_encoder = [
"Passholder Type",
"Trip Route Category",
"Starting Station ID",
"Plan Duration",
"Neighborhood Councils (Certified)",
"Council Districts",
"Zip Codes",
"Precinct Boundaries",
"Census Tracts",
]
count_encoder = ce.CountEncoder(
cols=columns_for_count_encoder, return_df=True, normalize=True
)
count_encoder = count_encoder.fit(X_train)
X_train = count_encoder.transform(X_train)
X_val = count_encoder.transform(X_val)
X_test = count_encoder.transform(X_test)
X_train
```
### Apply Scaling to Numerical Columns
For features with values in different range normalization should be applied to make all ranges the same. Instead, features with larger range would influence the result more. Here MinMax Scaler was used to make range of all variables between 0 and 1.<br>
[To read more](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)
```
columns_to_normalize = [
"Starting Station Latitude",
"Starting Station Longitude",
"Year",
"Month",
"Day",
"Hour",
"Minute",
]
normalizer = MinMaxScaler()
normalizer = normalizer.fit(X_train[columns_to_normalize])
X_train[columns_to_normalize] = normalizer.transform(X_train[columns_to_normalize])
X_val[columns_to_normalize] = normalizer.transform(X_val[columns_to_normalize])
X_test[columns_to_normalize] = normalizer.transform(X_test[columns_to_normalize])
X_train
```
### Apply Log and Scaling to the Target Column
Neural networks need variables deviations to be standard, so log function was applied to target variable along with MinMax Scaler<br>
[To read more](https://en.wikipedia.org/wiki/Data_transformation_(statistics))
```
hist = sns.histplot(data=y_train, bins=20)
hist.set(xlabel="Duration")
hist.set_title("Trip duration")
plt.show()
y_train = y_train.apply(np.log)
y_val = y_val.apply(np.log)
y_test = y_test.apply(np.log)
hist = sns.histplot(data=y_train, bins=20)
hist.set(xlabel="log(Duration)")
hist.set_title("Trip duration log")
plt.show()
normalizer_y = MinMaxScaler()
normalizer_y = normalizer_y.fit(y_train.to_numpy().reshape(-1, 1))
y_train = normalizer_y.transform(y_train.to_numpy().reshape(-1, 1)).squeeze()
y_val = normalizer_y.transform(y_val.to_numpy().reshape(-1, 1)).squeeze()
y_test = normalizer_y.transform(y_test.to_numpy().reshape(-1, 1)).squeeze()
```
## Model
The model has 5 linear layers with ReLU activation function
```
class Net(nn.Module):
def __init__(self, n_feature, n_output):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(n_feature, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.ReLU(),
nn.Linear(16, 8),
nn.ReLU(),
nn.Linear(8, n_output),
)
def forward(self, x):
return self.layers(x)
```
## Model Training
<code>USE_PRETRAINED_MODEL</code> should be set to <code>True</code> if you want to download and use a pretrained model. If you want to train your model, please set it to <code>False</code>.
```
USE_PRETRAINED_MODEL = True
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return "%dm %ds" % (m, s)
def fit():
start = time.time()
losses_train = []
losses_val = []
num_batches_train = (n - 1) // batch_size + 1
num_batches_val = (X_val_t.shape[0] - 1) // batch_size + 1
for epoch in range(epochs):
epoch_train_loss = 0
epoch_val_loss = 0
for i in range(num_batches_train):
optimizer.zero_grad()
random_indexes = random.sample(range(0, X_train_t.shape[0] - 1), batch_size)
xb = X_train_t[random_indexes].float()
yb = y_train_t[random_indexes].float().reshape(-1, 1)
pred = net(xb)
loss = loss_func(pred, yb)
loss.backward()
optimizer.step()
epoch_train_loss += loss.item()
for i in range(num_batches_val):
start_i = i * batch_size
end_i = start_i + batch_size
xb = X_val_t[start_i:end_i].float()
yb = y_val_t[start_i:end_i].float().reshape(-1, 1)
pred = net(xb)
loss = loss_func(pred, yb)
epoch_val_loss += loss.item()
epoch_train_loss /= num_batches_train
epoch_val_loss /= num_batches_val
losses_train.append(epoch_train_loss)
losses_val.append(epoch_val_loss)
print(
"Epoch "
+ str(epoch)
+ " %s (%d %d%%) Train loss: %.4f Validation loss: %.4f"
% (
timeSince(start),
epoch,
epoch / epochs * 100,
epoch_train_loss,
epoch_val_loss,
)
)
return losses_train, losses_val
X_train_t, y_train_t, X_val_t, y_val_t, X_test_t, y_test_t = map(
torch.tensor,
(X_train.to_numpy(), y_train, X_val.to_numpy(), y_val, X_test.to_numpy(), y_test),
)
if USE_PRETRAINED_MODEL:
model_url = "https://actionengine-public.s3.us-east-2.amazonaws.com/model"
model_path = "model"
load_data(model_url, model_path)
net = torch.load("model")
net.eval()
else:
net = Net(n_feature=16, n_output=1) # define the network
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
loss_func = nn.MSELoss() # this is for regression mean squared loss
batch_size = 20
n = X_train.shape[0]
epochs = 100
losses_train, losses_val = fit()
print(
"Min train loss: ", min(losses_train), " Min validation loss: ", min(losses_val)
)
torch.save(net, "model")
# show loss graph
loss_data = pd.DataFrame(
{
"Epochs": [j for j in range(0, epochs)],
"Train loss": losses_train,
"Validation loss": losses_val,
}
)
sns.lineplot(
x="Epochs", y="value", hue="variable", data=pd.melt(loss_data, ["Epochs"])
).set_title("Training process")
```
## Model Evaluation
### Metrics
[MSE](https://en.wikipedia.org/wiki/Mean_squared_error) and [MAE](https://en.wikipedia.org/wiki/Mean_absolute_error) metrics for normalized data
```
test_len = X_test_t.shape[0]
mse_metric = torch.nn.MSELoss()
mae_metric = torch.nn.L1Loss()
preds = []
mse = 0
mae = 0
for i in range(test_len):
xb = X_test_t[i].float()
yb = y_test_t[i].float().reshape(1)
pred = net(xb)
preds.append(pred.item())
mse += mse_metric(pred, yb).item()
mae += mae_metric(pred, yb).item()
mse /= test_len
mae /= test_len
print("MSE: ", mse)
print("MAE: ", mae)
```
MAE and [MAPE](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error) metrics for original data
```
unnormalized_y = normalizer_y.inverse_transform(y_test.reshape(-1, 1))
y_exp = np.exp(unnormalized_y)
unnormalized_preds = normalizer_y.inverse_transform(np.array(preds).reshape(-1, 1))
preds_exp = np.exp(unnormalized_preds)
print("MAE: ", mean_absolute_error(preds_exp, y_exp))
print("MAPE: ", mean_absolute_percentage_error(preds_exp, y_exp))
```
The model makes mistakes at around 40%. For example, if ground truth value is 700, the model is likely to answer 980 or 420
### Comparison to Ground Truth Values
```
for i in random.sample(range(0, len(y_exp) - 1), 10):
print("Real: ", y_exp[i], " Predicted: ", preds_exp[i])
```
### Testing Using Synthetic Dataset
To imitate a real-world task a synthetic dataset is generated. It contains a row for each hour in a particular day for each station. The model should predict a duration of a trip, that starts from such station in such time.
Dataset generation and normalization.
```
d = {column_name: [] for column_name in X.columns}
columns_from_X = [
"Starting Station ID",
"Starting Station Latitude",
"Starting Station Longitude",
"Neighborhood Councils (Certified)",
"Council Districts",
"Zip Codes",
"Precinct Boundaries",
"Census Tracts",
]
columns_same = {
"Passholder Type": "Monthly Pass",
"Trip Route Category": "One Way",
"Plan Duration": 30,
"Year": 2017,
"Month": 3,
"Day": 22,
"Minute": 0,
}
for i in range(118): # there are 118 stations
for hour in range(24):
part_station = X[
X["Starting Station Latitude"]
== start_stations.iloc[i]["Starting Station Latitude"]
].reset_index(drop=True)
part_station_0 = part_station.iloc[0]
for column_name in columns_from_X:
d[column_name].append(part_station_0[column_name])
for column_name in columns_same:
d[column_name].append(columns_same[column_name])
d["Hour"].append(hour)
generated_ds = pd.DataFrame(d)
generated_ds_encoded = count_encoder.transform(generated_ds)
generated_ds_encoded[columns_to_normalize] = normalizer.transform(
generated_ds_encoded[columns_to_normalize]
)
generated_ds
```
Make predictions and unnormalize them
```
tensor_generated = torch.tensor(generated_ds_encoded.to_numpy())
tensor_generated_len = tensor_generated.shape[0]
preds_for_gen_data = []
for i in range(tensor_generated_len):
xb = tensor_generated[i].float()
pred = net(xb)
preds_for_gen_data.append(pred.item())
unnormalized_preds_for_gen_data = normalizer_y.inverse_transform(
np.array(preds_for_gen_data).reshape(-1, 1)
)
preds_exp_for_gen_data = np.exp(unnormalized_preds_for_gen_data)
generated_ds_preds = generated_ds[
["Starting Station Latitude", "Starting Station Longitude", "Hour"]
].copy()
generated_ds_preds["Pred Duration"] = preds_exp_for_gen_data
dates = []
for hour_value in generated_ds_preds["Hour"]:
dates.append(str(pd.to_datetime("2017-03-22 " + str(hour_value) + ":00:00")))
generated_ds_preds["Date"] = dates
generated_ds_preds
```
Points on a map represent starting stations location. The lighter a point, the longer a trip would start from it. There is also a time filter available. You can watch how trip duration estimation varies from time.
```
hourly_map = UnfoldedMap(height=800)
hourly_map
hourly_dataset_id = uuid4()
hourly_dataset_filter_id = uuid4()
hourly_map.add_dataset(
{
"uuid": hourly_dataset_id,
"label": "Hourly duration dataset",
"data": generated_ds_preds,
},
auto_create_layers=False,
)
hourly_map.set_filter(
{
"id": str(hourly_dataset_filter_id),
"field": "Date",
"type": "timeRange",
"value": [1490140800000, 1490223600000],
}
)
hourly_map.add_layer(
{
"id": "Hourly duration",
"type": "point",
"config": {
"label": "Hourly duration",
"data_id": hourly_dataset_id,
"columns": {
"lat": "Starting Station Latitude",
"lng": "Starting Station Longitude",
},
"is_visible": True,
"color_scale": "quantize",
"color_field": {"name": "Pred Duration", "type": "float"},
"visConfig": {"radius": 17},
},
}
)
hourly_map.set_view_state({"longitude": -118.25, "latitude": 34.024, "zoom": 12})
```
|
github_jupyter
|
pip install xarray dask netCDF4 bottleneck tqdm dask-ml pandas seaborn matplotlib scikit-learn category_encoders torch missingno unfolded.map-sdk unfolded.data-sdk
# If in Colab, install this notebook's required dependencies
import sys
if "google.colab" in sys.modules:
!pip install 'unfolded.map_sdk>=0.6.0' xarray dask netCDF4 bottleneck tqdm dask-ml pandas seaborn matplotlib scikit-learn category_encoders torch missingno unfolded.data-sdk
import time
import math
import random
import os
import requests
import pandas as pd
import xarray as xr
import seaborn as sns
import numpy as np
from tqdm import tqdm
import category_encoders as ce
import missingno as msno
from uuid import uuid4
import torch
from torch import nn
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error, mean_absolute_percentage_error
from unfolded.map_sdk import UnfoldedMap
def load_data(url, path):
if os.path.isfile(path):
pass
else:
response = requests.get(url, stream=True)
total_size_in_bytes = int(response.headers.get("content-length", 0))
block_size = 1048576 # 1 Megabyte
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(path, "wb") as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
dataset_url = "https://actionengine-public.s3.us-east-2.amazonaws.com/metro-bike-share-trip-data.nc"
dataset_path = "metro-bike-share-trip-data-downloaded.nc"
load_data(dataset_url, dataset_path)
ds = xr.open_dataset("metro-bike-share-trip-data-downloaded.nc", chunks={"Start Time": 135000})
ddf = ds.to_dask_dataframe(set_index=True)
ddf.head()
print("Dataset has ", ddf.shape[0].compute(), " rows")
msno.bar(ddf.compute())
ddf = ddf.dropna(
subset=[
"Starting Station ID",
"Starting Station Latitude",
"Starting Station Longitude",
"Ending Station Latitude",
"Ending Station Longitude",
"Start Time",
"Precinct Boundaries",
"Census Tracts",
"Plan Duration",
]
)
ddf = ddf[(ddf["Starting Station Latitude"] != 0)]
ddf = ddf.fillna(0)
print("Dataset has ", ddf.shape[0].compute(), " rows")
hist = sns.histplot(data=ddf["Duration"], bins=100)
hist.set(xlabel="Duration")
hist.set_title("Trip duration")
plt.show()
def outlier_borders(data):
q25, q75 = np.percentile(data, 25), np.percentile(data, 75)
iqr = q75 - q25
cut_off = iqr * 1.5
lower, upper = q25 - cut_off, q75 + cut_off
return lower, upper
lower, upper = outlier_borders(ddf["Duration"])
no_outliers = [x for x in ddf["Duration"] if x > lower and x < upper]
ddf = ddf[(ddf["Duration"] < upper)]
hist = sns.histplot(data=no_outliers, bins=20)
hist.set(xlabel="Duration")
hist.set_title("Trip duration with no outliers")
plt.show()
ddf = ddf[(ddf["Starting Station Longitude"] > -118.3)]
print("Dataset has ", ddf.shape[0].compute(), " rows")
stations_map = UnfoldedMap()
stations_map
# TODO: make this plot open in 3D automatically and add height to hexagons
ddf["Starting Station Latitude"] = ddf["Starting Station Latitude"].compute().round(6)
ddf["Starting Station Longitude"] = ddf["Starting Station Longitude"].compute().round(6)
ddf["Ending Station Latitude"] = ddf["Ending Station Latitude"].compute().round(6)
ddf["Ending Station Longitude"] = ddf["Ending Station Longitude"].compute().round(6)
start_stations = ddf[
["Starting Station Latitude", "Starting Station Longitude"]
].compute()
start_stations = start_stations.groupby(
["Starting Station Latitude", "Starting Station Longitude"]
).size()
start_stations = pd.DataFrame(start_stations.reset_index())
start_stations.rename(columns={0: "count"}, inplace=True)
stations_dataset_id = uuid4()
stations_map.add_dataset(
{"uuid": stations_dataset_id, "label": "Stations dataset", "data": start_stations},
auto_create_layers=False,
)
stations_map.add_layer(
{
"id": "Starting points",
"type": "hexagon",
"config": {
"label": "Starting points",
"data_id": stations_dataset_id,
"columns": {
"lat": "Starting Station Latitude",
"lng": "Starting Station Longitude",
},
"is_visible": True,
"color_scale": "quantize",
"color_field": {"name": "count", "type": "int"},
"visConfig": {"worldUnitSize": 0.1,},
"visualChannels": {
"colorScale": "quantile",
"sizeScale": "linear"
}
},
}
)
stations_map.set_view_state({"longitude": -118.25, "latitude": 34.04, "zoom": 12})
routes_map = UnfoldedMap()
routes_map
routes = ddf[
[
"Starting Station Latitude",
"Starting Station Longitude",
"Ending Station Latitude",
"Ending Station Longitude",
]
].compute()
routes = routes.groupby(
[
"Starting Station Latitude",
"Starting Station Longitude",
"Ending Station Latitude",
"Ending Station Longitude",
]
).size()
routes = pd.DataFrame(routes.reset_index())
routes.rename(columns={0: "count"}, inplace=True)
routes_dataset_id = uuid4()
routes_map.add_dataset(
{"uuid": routes_dataset_id, "label": "Routes dataset", "data": routes},
auto_create_layers=False,
)
routes_map.add_layer(
{
"id": "Routes",
"type": "arc",
"config": {
"label": "Routes",
"data_id": routes_dataset_id,
"columns": {
"lat0": "Starting Station Latitude",
"lng0": "Starting Station Longitude",
"lat1": "Ending Station Latitude",
"lng1": "Ending Station Longitude",
},
"is_visible": True,
"visConfig": {"opacity": 0.8, "thickness": 0.3},
"color_scale": "quantile",
"color_field": {"name": "count", "type": "int"},
"vis_config": {
"opacity": 0.8,
"thickness": 0.3,
"colorRange": {
"colors": [
"#5A1846",
"#900C3F",
"#C70039",
"#E3611C",
"#F1920E",
"#FFC300",
],
},
"target_color": "count",
},
},
}
)
routes_map.set_view_state({"longitude": -118.25, "latitude": 34.04, "zoom": 12})
trip_category_stats = ddf["Trip Route Category"].value_counts().compute()
colors = sns.color_palette("pastel")[0:5]
plt.title("Trip Route Category")
plt.pie(
trip_category_stats.values,
labels=trip_category_stats.index,
colors=colors,
autopct="%.0f%%",
)
plt.show()
plan_category_stats = ddf["Passholder Type"].value_counts().compute()
colors = sns.color_palette("pastel")[0:5]
plt.title("Passholder Type")
plt.pie(
plan_category_stats.values,
labels=plan_category_stats.index,
colors=colors,
autopct="%.0f%%",
)
plt.show()
ddf["Start Time"] = pd.to_datetime(ddf["Start Time"].compute())
def extract_time_features(timestamp):
return [
timestamp.year,
timestamp.month,
timestamp.day,
timestamp.hour,
timestamp.minute,
]
ddf_to_concat = ddf.compute().reset_index(drop=True)
time_features_ddf = ddf_to_concat["Start Time"].apply(extract_time_features)
time_features_df = pd.DataFrame(
time_features_ddf.tolist(), columns=["Year", "Month", "Day", "Hour", "Minute"]
)
df_with_time_features = pd.concat([ddf_to_concat, time_features_df], axis=1)
df_with_time_features = df_with_time_features.drop(columns=["Start Time"])
df_with_time_features
y = df_with_time_features["Duration"]
X = df_with_time_features.drop(
columns=["Duration", "Ending Station Latitude", "Ending Station Longitude"]
)
X_cut, X_test, y_cut, y_test = train_test_split(X, y, test_size=0.1, random_state=42)
X_train, X_val, y_train, y_val = train_test_split(
X_cut, y_cut, test_size=0.1, random_state=42
)
X_train = X_train.reset_index(drop=True)
X_val = X_val.reset_index(drop=True)
X_test = X_test.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
y_val = y_val.reset_index(drop=True)
y_test = y_test.reset_index(drop=True)
columns_for_count_encoder = [
"Passholder Type",
"Trip Route Category",
"Starting Station ID",
"Plan Duration",
"Neighborhood Councils (Certified)",
"Council Districts",
"Zip Codes",
"Precinct Boundaries",
"Census Tracts",
]
count_encoder = ce.CountEncoder(
cols=columns_for_count_encoder, return_df=True, normalize=True
)
count_encoder = count_encoder.fit(X_train)
X_train = count_encoder.transform(X_train)
X_val = count_encoder.transform(X_val)
X_test = count_encoder.transform(X_test)
X_train
columns_to_normalize = [
"Starting Station Latitude",
"Starting Station Longitude",
"Year",
"Month",
"Day",
"Hour",
"Minute",
]
normalizer = MinMaxScaler()
normalizer = normalizer.fit(X_train[columns_to_normalize])
X_train[columns_to_normalize] = normalizer.transform(X_train[columns_to_normalize])
X_val[columns_to_normalize] = normalizer.transform(X_val[columns_to_normalize])
X_test[columns_to_normalize] = normalizer.transform(X_test[columns_to_normalize])
X_train
hist = sns.histplot(data=y_train, bins=20)
hist.set(xlabel="Duration")
hist.set_title("Trip duration")
plt.show()
y_train = y_train.apply(np.log)
y_val = y_val.apply(np.log)
y_test = y_test.apply(np.log)
hist = sns.histplot(data=y_train, bins=20)
hist.set(xlabel="log(Duration)")
hist.set_title("Trip duration log")
plt.show()
normalizer_y = MinMaxScaler()
normalizer_y = normalizer_y.fit(y_train.to_numpy().reshape(-1, 1))
y_train = normalizer_y.transform(y_train.to_numpy().reshape(-1, 1)).squeeze()
y_val = normalizer_y.transform(y_val.to_numpy().reshape(-1, 1)).squeeze()
y_test = normalizer_y.transform(y_test.to_numpy().reshape(-1, 1)).squeeze()
class Net(nn.Module):
def __init__(self, n_feature, n_output):
super().__init__()
self.layers = nn.Sequential(
nn.Linear(n_feature, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 16),
nn.ReLU(),
nn.Linear(16, 8),
nn.ReLU(),
nn.Linear(8, n_output),
)
def forward(self, x):
return self.layers(x)
USE_PRETRAINED_MODEL = True
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return "%dm %ds" % (m, s)
def fit():
start = time.time()
losses_train = []
losses_val = []
num_batches_train = (n - 1) // batch_size + 1
num_batches_val = (X_val_t.shape[0] - 1) // batch_size + 1
for epoch in range(epochs):
epoch_train_loss = 0
epoch_val_loss = 0
for i in range(num_batches_train):
optimizer.zero_grad()
random_indexes = random.sample(range(0, X_train_t.shape[0] - 1), batch_size)
xb = X_train_t[random_indexes].float()
yb = y_train_t[random_indexes].float().reshape(-1, 1)
pred = net(xb)
loss = loss_func(pred, yb)
loss.backward()
optimizer.step()
epoch_train_loss += loss.item()
for i in range(num_batches_val):
start_i = i * batch_size
end_i = start_i + batch_size
xb = X_val_t[start_i:end_i].float()
yb = y_val_t[start_i:end_i].float().reshape(-1, 1)
pred = net(xb)
loss = loss_func(pred, yb)
epoch_val_loss += loss.item()
epoch_train_loss /= num_batches_train
epoch_val_loss /= num_batches_val
losses_train.append(epoch_train_loss)
losses_val.append(epoch_val_loss)
print(
"Epoch "
+ str(epoch)
+ " %s (%d %d%%) Train loss: %.4f Validation loss: %.4f"
% (
timeSince(start),
epoch,
epoch / epochs * 100,
epoch_train_loss,
epoch_val_loss,
)
)
return losses_train, losses_val
X_train_t, y_train_t, X_val_t, y_val_t, X_test_t, y_test_t = map(
torch.tensor,
(X_train.to_numpy(), y_train, X_val.to_numpy(), y_val, X_test.to_numpy(), y_test),
)
if USE_PRETRAINED_MODEL:
model_url = "https://actionengine-public.s3.us-east-2.amazonaws.com/model"
model_path = "model"
load_data(model_url, model_path)
net = torch.load("model")
net.eval()
else:
net = Net(n_feature=16, n_output=1) # define the network
optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
loss_func = nn.MSELoss() # this is for regression mean squared loss
batch_size = 20
n = X_train.shape[0]
epochs = 100
losses_train, losses_val = fit()
print(
"Min train loss: ", min(losses_train), " Min validation loss: ", min(losses_val)
)
torch.save(net, "model")
# show loss graph
loss_data = pd.DataFrame(
{
"Epochs": [j for j in range(0, epochs)],
"Train loss": losses_train,
"Validation loss": losses_val,
}
)
sns.lineplot(
x="Epochs", y="value", hue="variable", data=pd.melt(loss_data, ["Epochs"])
).set_title("Training process")
test_len = X_test_t.shape[0]
mse_metric = torch.nn.MSELoss()
mae_metric = torch.nn.L1Loss()
preds = []
mse = 0
mae = 0
for i in range(test_len):
xb = X_test_t[i].float()
yb = y_test_t[i].float().reshape(1)
pred = net(xb)
preds.append(pred.item())
mse += mse_metric(pred, yb).item()
mae += mae_metric(pred, yb).item()
mse /= test_len
mae /= test_len
print("MSE: ", mse)
print("MAE: ", mae)
unnormalized_y = normalizer_y.inverse_transform(y_test.reshape(-1, 1))
y_exp = np.exp(unnormalized_y)
unnormalized_preds = normalizer_y.inverse_transform(np.array(preds).reshape(-1, 1))
preds_exp = np.exp(unnormalized_preds)
print("MAE: ", mean_absolute_error(preds_exp, y_exp))
print("MAPE: ", mean_absolute_percentage_error(preds_exp, y_exp))
for i in random.sample(range(0, len(y_exp) - 1), 10):
print("Real: ", y_exp[i], " Predicted: ", preds_exp[i])
d = {column_name: [] for column_name in X.columns}
columns_from_X = [
"Starting Station ID",
"Starting Station Latitude",
"Starting Station Longitude",
"Neighborhood Councils (Certified)",
"Council Districts",
"Zip Codes",
"Precinct Boundaries",
"Census Tracts",
]
columns_same = {
"Passholder Type": "Monthly Pass",
"Trip Route Category": "One Way",
"Plan Duration": 30,
"Year": 2017,
"Month": 3,
"Day": 22,
"Minute": 0,
}
for i in range(118): # there are 118 stations
for hour in range(24):
part_station = X[
X["Starting Station Latitude"]
== start_stations.iloc[i]["Starting Station Latitude"]
].reset_index(drop=True)
part_station_0 = part_station.iloc[0]
for column_name in columns_from_X:
d[column_name].append(part_station_0[column_name])
for column_name in columns_same:
d[column_name].append(columns_same[column_name])
d["Hour"].append(hour)
generated_ds = pd.DataFrame(d)
generated_ds_encoded = count_encoder.transform(generated_ds)
generated_ds_encoded[columns_to_normalize] = normalizer.transform(
generated_ds_encoded[columns_to_normalize]
)
generated_ds
tensor_generated = torch.tensor(generated_ds_encoded.to_numpy())
tensor_generated_len = tensor_generated.shape[0]
preds_for_gen_data = []
for i in range(tensor_generated_len):
xb = tensor_generated[i].float()
pred = net(xb)
preds_for_gen_data.append(pred.item())
unnormalized_preds_for_gen_data = normalizer_y.inverse_transform(
np.array(preds_for_gen_data).reshape(-1, 1)
)
preds_exp_for_gen_data = np.exp(unnormalized_preds_for_gen_data)
generated_ds_preds = generated_ds[
["Starting Station Latitude", "Starting Station Longitude", "Hour"]
].copy()
generated_ds_preds["Pred Duration"] = preds_exp_for_gen_data
dates = []
for hour_value in generated_ds_preds["Hour"]:
dates.append(str(pd.to_datetime("2017-03-22 " + str(hour_value) + ":00:00")))
generated_ds_preds["Date"] = dates
generated_ds_preds
hourly_map = UnfoldedMap(height=800)
hourly_map
hourly_dataset_id = uuid4()
hourly_dataset_filter_id = uuid4()
hourly_map.add_dataset(
{
"uuid": hourly_dataset_id,
"label": "Hourly duration dataset",
"data": generated_ds_preds,
},
auto_create_layers=False,
)
hourly_map.set_filter(
{
"id": str(hourly_dataset_filter_id),
"field": "Date",
"type": "timeRange",
"value": [1490140800000, 1490223600000],
}
)
hourly_map.add_layer(
{
"id": "Hourly duration",
"type": "point",
"config": {
"label": "Hourly duration",
"data_id": hourly_dataset_id,
"columns": {
"lat": "Starting Station Latitude",
"lng": "Starting Station Longitude",
},
"is_visible": True,
"color_scale": "quantize",
"color_field": {"name": "Pred Duration", "type": "float"},
"visConfig": {"radius": 17},
},
}
)
hourly_map.set_view_state({"longitude": -118.25, "latitude": 34.024, "zoom": 12})
| 0.55917 | 0.893542 |
# IST256 Applications Programming For Information Systems
## Fall 2021
## Michael Fudge
# Welcome to Large Group
# Understanding This Course's Learning Model
Learning is distributed across the week, and forced upon you through participation activities. Why is it forced? To help you build successful habits to learning!
1. **Read before large group.** This is your initial exposure to the topic. Record questions, doubts, and uncertainties you have as you read. This is participation graded. Z01-Z10
2. **Large group**. Reinforce what you learned through your reading. You observe these topics within the Jupyterhub context. It is best to take notes and ask questions. No writing code at this point.
3. **Lab**. Your first foray into writing code, you are eased into it. There is reading code and writing code, and walk-through video should you need it. This is participation graded. L01-L13
4. **Small Group**. This is your first foray into problem solving with code. It's done in a group setting. Participation is graded. S01-S13
5. **Homework**. This is an opportunity to demonstrate you can problem-solve a code solution on your own with minimal assistance. There are advice videos, and of course support. This is graded. H01-H13
6. **Metacognition** You will be asked to think about what you have learned and express your learning journey as part of turning in your work. What you have learned from the work is more important than the work itself.
# What will we do in Large Group?
- A time for reinforcing what you read in the Zybook / Severance / Other Readings.
- We also go over the previous week's homework.
- Concepts are reinforced through code examples, in JupyterHub.
- Active learning lecture with polls and Chat.
- Don't be afraid to ask questions. This is YOUR TIME. USE IT.
# How do I Ask Questions in Large Group?
You are welcome to raise your hand, but that can be intimidating so we offer an in-class chat service.
- Microsoft Teams Download here: [https://www.microsoft.com/en-us/microsoft-teams/download-app](https://www.microsoft.com/en-us/microsoft-teams/download-app)
- Login with your syr.edu email address and NetID password.
- The chat channel is [https://chat.ist256.com](https://chat.ist256.com)
# Activity:
## Say hello in the group chat....
# How do you Measure Engagement in Large Group?
Polls are conducted throughout the large group session.
- [https://poll.ist256.com](https://poll.ist256.com)
- Login with your syr.edu email address and NetID password.
- Your responses are recorded.
# Sample Poll Question
Pineapple on Pizza?
A. YES
B. NO
C. Only if you are evil
D. Prefer not to answer
## Vote Now: [https://poll.ist256.com](https://poll.ist256.com)
# Your Weekly Routine
- Monday: assigned readings and large group lecture / reinforce concepts
- Tuesday: complete your lab / coding walk-thru
- Wednesday: small group sessons / practice coding
- Saturday: last day to turn in homework / reinforce what was learned through practice
- No late work, so be mindful of due dates check Blackboard / My Grades or at [https://ist256.com](https://ist256.com)
# Labor Day Schedule
9/1 - First Small Group
- Meet Your Professor and small group mates
- Complete your lab BEFORE 9/1!
- Go through our first small group problem-solving activity.
- Review due dates on syllabus and set expectations for next week.
- Z01, L01, S01, and H01 are due 9/4
9/6 - No class, Labor day
- Lecure pre-recorded
- Z01 due 9/7, not 9/6.
9/8 - 2nd Small Group
- Normal Small group activities of lab review and exercises
- Regular submission schedule
# IST256 Lesson 01
## Introduction to Python Programming
- Zybook Ch 1
- Severance Ch 1
- Your prepared questions
## Links
- Participation: [https://poll.ist256.com](https://poll.ist256.com) (Sign in with *netid*@syr.edu and your SU Password.)
- Class Chat: [https://chat.ist256.com](https://chat.ist256.com) (Microsoft Teams.)
# Connect Activity
Have you programmed a computer before?
A. YES
B. NO
C. No Sure
D. Rather Not Say
## Vote Now: [https://poll.ist256.com](https://poll.ist256.com)
# What is programming (a.k.a. Coding)?
- **Programming** is the act of you telling the computer to perform a series of tasks.
- You must tell the computer **exactly** what you want it to do, and must do so in a way that it can understand.
- The instructions we give the computer is called **code** or **program**.
- Code which follows an sequence of steps is known as an **algorithm**.
- Algorithms do not need to be written in code.
# How Mike Eats M&M's ++
```
1. Open bag
2. While bag is not empty:
3. Pour out handful of M&M's
4. Toss into mouth
5. Chew and Swallow Euphorically
6. Pout now that the bag is empty.
```
++ Example of an algorithm not written in code
# Q: Why is Programming Notoriourly Diffciult?
# A: It's Not
### "Writing code is relatively straightforward; solving problems with code is not." -- Your Professor
### "The purpose of coding is to solve problems." -- Your Professor, Again
# Problem Solving With Code
- **Is**
- Difficult
- Time Consuming
- Frustrating
- Rewarding
- **Requires**
- Patience
- Precision
- Persistence
- Practice (Lots of it)
# Course Learning Outcomes Will Not Come Easy
- Analyze complex problems by thinking computationally and systematically.
- Solve practical, real-world problems using a modern computer programming language..
- Demonstrate the ability to read, write, discuss and code confidently.
- Understand how to code in teams, collaborate with others and manage source code.
- Acquire new programming knowledge independently.
# Why Learn the Python Programming Language?
- Easy to learn with simple syntax.
- Has a wealth of existing code you can leverage in your programs.
- You can do complex things with very little typing.
- Very popular with information professionals (Data Science, Analytics, Information Security).
- Consistently one of the top 5 programming languages on the TIOBE Index [https://www.tiobe.com/tiobe-index/](https://www.tiobe.com/tiobe-index/)
# Enough Talk! Let's Get Doing!
# Your First Problem Solving Approach:
## Input => Process => Output
1. Identify the problem inputs (requirements)
2. Identify the problem outputs (results)
3. Write an algorithm to transform inputs to outputs.
4. If you don't know how to do a step... research it!
# Python Output
- The built in function `print()` is used to display output in Python.
- It takes one argument in single or double quotes known as a string literal.
- The string literal is enclosed in single or double quotes. `'hi'` or `"hi"`
- Multiple string literals may be separated by a comma.
# Watch Me Code 1
Demonstrating Python's built in `print()` function:
- String literals
- Single and double quotes
- More than one argument to the function
# Check Yourself 1
Which Python statement will output `welcome` ?
A. `print(welcome)`
B. `print("welcome")`
C. `print "welcome"`
D. `print welcome`
## Vote Now: [https://poll.ist256.com](https://poll.ist256.com)
# Variables
- **Variables** are named areas of computer memory for storing data.
- The **variable name** can be anything but should make symbolic sense to the programmer.
- We **write** to the variable’s memory location with the *assignment statement* (=)
- We **read** from the variable by calling its name.
- Variable names must begin with a letter or _ and must only contain letters, numbers or _.
# Python Input
- The built in function `input()` accepts input from the user.
- It takes one argument a **string literal** which prompts (provides instructions to the user).
- Input is commonly assigned to a **variable** so that the input can be used later on.
# Watch Me Code 2
- The built in `input()` function
- Using a prompt
- Storing the output from `input()` into a variable
- Printing the variable
# Check Yourself 2
For the following code, which is the prompt?
`x = input('y')`
A. `input`
B. `'y'`
C. `=`
D. `x`
## Vote Now: [https://poll.ist256.com](https://poll.ist256.com)
# F-Strings
- F-Strings are Python's answer to **string interpolation**.
- This replaces the variable name with its value within a string.
- Called an F-string because the `f` tells Python to interpolate the string.
```
name = 'George'
print("{name} was curious.")
print(f"{name} was curious.")
```
# Watch Me Code 3
Put the `input()` and `print()` statements together.
- Inputs and outputs into a program
- Prompts that change based on input
- Demonstrate use of `sep` and `end` arguments to `print()`
- F-Strings for formatting variables through interpolation.
# End To End Example
### Limerick Generator!
- Write a program to prompt to create a custom limerick poem based on the inputs.
Here's the story I want to make:
- There once was a man named `[mans-name]`. He liked to eat `[food-rhymes-with-name]`. One day he ate so much `[food-rhymes-with-name]`, that he got the `[ailment]`, and had to run to the `[place-rhymes-with-name]`.
- Input? Output? Process?
|
github_jupyter
|
1. Open bag
2. While bag is not empty:
3. Pour out handful of M&M's
4. Toss into mouth
5. Chew and Swallow Euphorically
6. Pout now that the bag is empty.
name = 'George'
print("{name} was curious.")
print(f"{name} was curious.")
| 0.359027 | 0.911535 |
# Numpy Array Operations
### Here you can read about some important Numpy functions.
- array(): Creates a numpy array from given data.
- arange(): Similar to range() in base Python but it returns numpy arrays and have much more features.
- vstack(): Stacks 2 or more arrays/lists vertically.
- reshape(): Reshapes a list/numpy array without changing the data or [row-wise] order.
- dot(): Returns the matrix product of 2 matrices.
**Note:** The naming convention used is, that if a variable is named _f1_ex2_some_name_, that means it is a part of example 2 of function 1.
```
!pip install jovian --upgrade -q
import jovian
jovian.commit(project='numpy-array-operations')
```
Let's begin by importing Numpy and listing out the functions covered in this notebook.
```
import numpy as np
# List of functions explained
function1 = np.array
function2 = np.arange
function3 = np.vstack
function4 = np.reshape
function5 = np.dot
```
## Function 1 - np.array()
Returns the given data [usually a list or tuple] in form of numpy array
```
# Example 1 - Creating a numpy array from Python List
f1_ex1_python_list = [[1, 2, 3], [4, 5, 6]]
print('Initial data: ', f1_ex1_python_list)
print('Type of initial data: ', type(f1_ex1_python_list))
print()
f1_ex1_numpy_array = np.array(f1_ex1_python_list)
print('Data parsed from np.array() function: ', f1_ex1_numpy_array)
print('Type of data parsed from np.array() function: ', type(f1_ex1_numpy_array))
```
**In the above example, the function np.array() takes in data in form of Python list and returns the same data in form of a numpy array.**
```
# Example 2 - Specifying datatype of the resulting array
f1_ex2_python_list = [[7, 8, 9], [10, 11, 12]]
print('First element of first row of Python list: ', f1_ex2_python_list[0][0])
print('Type of first element of first row of Python list: ', type(f1_ex2_python_list[0][0]))
print()
f1_ex2_numpy_array = np.array(f1_ex2_python_list, dtype = 'float64')
print('First element of first row of numpy array: ', f1_ex2_numpy_array[0][0])
print('Type of first element of first row of numpy array: ', type(f1_ex2_numpy_array[0][0]))
```
**In the above example, even if the source data is a list of integers, the resultant numpy array is of datatype float as given explicitly in the np.array() function.**
```
# Example 3 - It gives VisibleDeprecationWarning when the the number of columns is not same in all rows.
f1_ex3_python_list = [[13, 14, 15], [16, 17, 18, 19]]
print('Initial data: ', f1_ex3_python_list)
print('Type of initial data: ', type(f1_ex3_python_list))
print()
f1_ex3_numpy_array = np.array(f1_ex3_python_list)
print('Data parsed from np.array() function: ', f1_ex3_numpy_array)
print('Type of data parsed from np.array() function: ', type(f1_ex3_numpy_array))
```
**As we see in the above examples, number of columns [number of elements in a row] should be same for each row. To solve, we can append a null value to the row(s) with less number of elements(comumns) than others.**
```
# Example 4 - alternative way to avoid the exception
f1_ex4_python_list_alt = [[13, 14, 15, None], [16, 17, 18, 19]]
print('Initial data: ', f1_ex4_python_list_alt)
print('Type of initial data: ', type(f1_ex4_python_list_alt))
print()
f1_ex4_numpy_array_alt = np.array(f1_ex4_python_list_alt)
print('Data parsed from np.array() function: ', f1_ex4_numpy_array_alt)
print('Type of data parsed from np.array() function: ', type(f1_ex4_numpy_array_alt))
```
**Above method is just a way to avoid the exception, it may or may not support your particular use-case.**
```
jovian.commit()
```
## Function 2 - np.arange()
Returns an numpy array of evenly spaced numbers with given dimensions.
```
# Example 1 - Creating an array of even numbers between 0 [inclusive] and 20 [exclusive] [Starting from 0]
f2_ex1_first_ten_even_numbers = np.arange(start = 0, stop = 20, step = 2)
print('A list of first 10 even numbers created by np.arange():')
print(f2_ex1_first_ten_even_numbers)
```
**In the above example, 'start' will be the first element of the array and 'step' is the common difference between any 2 adjacent elements.**
**The resultant array will contain numbers which are strictly smaller than the 'stop' parameter.**
```
# Example 2 - We can also give a negative step
f2_ex2_even_numbers_in_descending_order = np.arange(start = 20, stop = 0, step = -2)
print('Array of even number between 0 and 20 in descending order:')
print(f2_ex2_even_numbers_in_descending_order)
```
**We can see that now 20 in included in the result while 0 is not.**
```
# Example 3 - It will return an empty array if start is smaller than stop and step is negative.
f1_ex3_first_ten_even_numbers = np.arange(start = 0, stop = 20, step = -2)
print('As we give a negative step when start is smaller than stop, we get:')
print(f1_ex3_first_ten_even_numbers)
```
**As we can see, if adding step to start doesn't decrease the absolute difference between the start and stop, there will be an infinitely long list if we start calculating the result without checking this case.**
**To avoid this, we should always check this special case before calling the function as it does not throw an exception in this case, we are prone to logical errors here.**
**We can use this function in use-case like where we need a list of first 10 whole numbers, first 5 multiples of 3, etc. as by using this, we'll not have to write a loop for these use-cases.**
```
jovian.commit()
```
## Function 3 - np.vstack()
Concatenates the given arrays along the first axis ie along the columns.
```
# Example 1 - Concatenating two Python lists
f3_ex1_list1 = [1, 3, 5]
f3_ex1_list2 = [7, 9, 10]
print('Individual array are: ')
print(f3_ex1_list1)
print(f3_ex1_list2)
f3_ex1_stacked_array = np.vstack((f3_ex1_list1, f3_ex1_list2))
print('\n\nConcatenated array is: ')
print(f3_ex1_stacked_array)
```
**We need to pass the tuple containing all the arrays [numpy arrays or python lists] and it will return those stacked along columns.**
```
# Example 2 - Concatenating 2 - Dimensional numpy lists.
f3_ex2_ndarray_1 = np.array([[1, 2, 3], [4, 5, 6]])
f3_ex2_ndarray_2 = np.array([[7, 8, 9], [10, 11, 12]])
print('Individual array are: ')
print(f3_ex2_ndarray_1)
print(f3_ex2_ndarray_2)
f3_ex2_concatenated_array = np.vstack((f3_ex2_ndarray_1, f3_ex2_ndarray_2))
print('\n\nConcatenated array is: ')
print(f3_ex2_concatenated_array)
```
**As depicted, we can stack any two arrays with equal number of columns.**
```
# Example 3 - It throws error when the number of columns is not consistent along all the given arrays.
f3_ex3_ndarray_1 = np.array([[1, 2, 3], [4, 5, 6]])
f3_ex3_ndarray_2 = np.array([[7, 8, 9, 10], [11, 12, 13, 14]])
print('Individual array are: ')
print(f3_ex3_ndarray_1)
print(f3_ex3_ndarray_2)
f3_ex3_concatenated_array = np.vstack((f3_ex3_ndarray_1, f3_ex3_ndarray_2))
print('\n\nConcatenated array is: ')
print(f3_ex3_concatenated_array)
```
**Above exception can be avoided using same trick as given in Example 4 of function 1.**
**It is just a particular case of the more general np.concatenate() function. Using this function instead increases the code readability and decreases the amount of parameters needed to be given as input.**
```
jovian.commit()
```
## Function 4 - np.reshape()
It takes input a Python list or a numpy array and returns another numpy array object with same data but with different shape.
```
# Example 1 - Let's try converting a 4 x 3 array to a 2 x 6 array
f4_ex1_4x3_numpy_array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
print('Initial 4 x 3 array:')
print(f4_ex1_4x3_numpy_array)
f4_ex1_2x6_reshaped_array = np.reshape(f4_ex1_4x3_numpy_array, newshape = (2, 6))
print('\n\nReshaped 2 x 6 array:')
print(f4_ex1_2x6_reshaped_array)
```
**As stated in the definition, it returns the same data, but in different order.**
```
# Example 2 - Another common use case of this function is to generate a desired 2D array from a 1D array. Let's see
f4_ex2_1d_python_array = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]
print('Initial 1D Python list:')
print(f4_ex2_1d_python_array)
f4_ex2_4x3_reshaped_array = np.reshape(f4_ex2_1d_python_array, newshape = (4, 3))
print('\n\nReshaped 4 x 3 numpy array:')
print(f4_ex2_4x3_reshaped_array)
```
**Explanation about example**
```
''' Example 3 - It returns a ValueError exception when the number of elements in the given array is not equal to
the number of elements in an array of desired shape.'''
f4_ex3_2x2_numpy_array = np.array([[5, 15], [25, 35]])
print('Initial 2 x 2 array:')
print(f4_ex3_2x2_numpy_array)
print("First let's try converting the given array to a 4 x 1 array")
f4_ex3_4x1_reshaped_array = np.reshape(f4_ex3_2x2_numpy_array, newshape = (4, 1))
print('\n\nReshaped 4 x 1 array:')
print(f4_ex3_4x1_reshaped_array)
print("Now let's try converting the same array to a 4 x 4 array")
f4_ex3_4x4_reshaped_array = np.reshape(f4_ex3_2x2_numpy_array, newshape = (4, 4))
print('\n\nReshaped 4 x 4 array:') # This line is never going to run
print(f4_ex3_4x4_reshaped_array)
```
**As you might have already figured out, if we want an array of size 4 x 4, we must input an array in which the number of elements is exactly equal to 16.**
**One important point to notice is that in most cases, it will not change the underlying object and the input and output arrays will just be different views of the same object**
```
jovian.commit()
```
## Function 5 - np.dot()
Used to calculate the matrix product of 2 matrices [matrices can be Python lists or numpy arrays]
```
# Example 1 - Let's calculate the matrix product of two 2D numpy arrays
f5_ex1_array_1 = np.array([[1, 2], [3, 4]])
f5_ex1_array_2 = np.array([[5, 6], [7, 8]])
print('Array 1 is:')
print(f5_ex1_array_1)
print('Array 2 is:')
print(f5_ex1_array_2)
f5_ex1_matrix_product = np.dot(f5_ex1_array_1, f5_ex1_array_2)
print('\n\nProduct matrix as calculated by np.dot function:')
print(f5_ex1_matrix_product)
```
**As expected, it returns the matrix product of two given arrays.**
```
# Example 2 - It can also be used to multiply matrices with scalers.
f5_ex2_some_int = 5
f5_ex2_2x2_array = np.array([[9, 10], [11, 12]])
print("Let's take a sample matrix:")
print(f5_ex2_2x2_array)
f5_ex2_dot_product = np.dot(f5_ex2_some_int, f5_ex2_2x2_array)
print('\n\nAs we try to myltiply a scalar valued {} by the above matrix, we get:'.format(f5_ex2_some_int))
print(f5_ex2_dot_product)
```
**Here, the function multiplies each element of the matrix with the given scalar.**
```
''' Example 3 - It throws ValueError when the last dimension of the first matrix
is not same as the first dimension of second matrix.'''
f5_ex3_array_1 = np.array([[10, 11], [12, 13]])
f5_ex3_array_2 = np.array([[14, 15], [16, 17], [18, 19]])
print('Shape of array 1 is:')
print(f5_ex3_array_1.shape)
print('Shape of array 2 is:')
print(f5_ex3_array_2.shape)
print('\nAs we already know, multiplying these matrixes is against the rules of matrix multiplication.')
print("Still let's try multiplying them through np.dot()")
f5_ex3_matrix_product = np.dot(f5_ex3_array_1, f5_ex3_array_2)
print('\n\nProduct matrix as calculated by np.dot function:')
print(f5_ex3_matrix_product)
```
**Since this exception usually occurrs due to logical abnormalities, it is recommended not to except it in error handling as the exception can help you to easily identify the issue in the logic**
**Numpy is a very important library in Data Analysis and hence if one aspires to become a Data Analyst or Data Scientist, Numpy should be studied thoroughly.**
```
jovian.commit()
```
## Conclusion
This was my introduction to Numpy functions. Comments are welcome.
## Reference Links
* Numpy Documentation Homepage: https://numpy.org/doc/stable/user/quickstart.html
* Numpy Cheatsheet: https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Numpy_Python_Cheat_Sheet.pdf
* Numpy GitHub Repo [Here you can see the source code of all the Numpy function]: https://github.com/numpy/numpy
```
jovian.commit()
jovian.submit(assignment="zero-to-pandas-a2")
```
|
github_jupyter
|
!pip install jovian --upgrade -q
import jovian
jovian.commit(project='numpy-array-operations')
import numpy as np
# List of functions explained
function1 = np.array
function2 = np.arange
function3 = np.vstack
function4 = np.reshape
function5 = np.dot
# Example 1 - Creating a numpy array from Python List
f1_ex1_python_list = [[1, 2, 3], [4, 5, 6]]
print('Initial data: ', f1_ex1_python_list)
print('Type of initial data: ', type(f1_ex1_python_list))
print()
f1_ex1_numpy_array = np.array(f1_ex1_python_list)
print('Data parsed from np.array() function: ', f1_ex1_numpy_array)
print('Type of data parsed from np.array() function: ', type(f1_ex1_numpy_array))
# Example 2 - Specifying datatype of the resulting array
f1_ex2_python_list = [[7, 8, 9], [10, 11, 12]]
print('First element of first row of Python list: ', f1_ex2_python_list[0][0])
print('Type of first element of first row of Python list: ', type(f1_ex2_python_list[0][0]))
print()
f1_ex2_numpy_array = np.array(f1_ex2_python_list, dtype = 'float64')
print('First element of first row of numpy array: ', f1_ex2_numpy_array[0][0])
print('Type of first element of first row of numpy array: ', type(f1_ex2_numpy_array[0][0]))
# Example 3 - It gives VisibleDeprecationWarning when the the number of columns is not same in all rows.
f1_ex3_python_list = [[13, 14, 15], [16, 17, 18, 19]]
print('Initial data: ', f1_ex3_python_list)
print('Type of initial data: ', type(f1_ex3_python_list))
print()
f1_ex3_numpy_array = np.array(f1_ex3_python_list)
print('Data parsed from np.array() function: ', f1_ex3_numpy_array)
print('Type of data parsed from np.array() function: ', type(f1_ex3_numpy_array))
# Example 4 - alternative way to avoid the exception
f1_ex4_python_list_alt = [[13, 14, 15, None], [16, 17, 18, 19]]
print('Initial data: ', f1_ex4_python_list_alt)
print('Type of initial data: ', type(f1_ex4_python_list_alt))
print()
f1_ex4_numpy_array_alt = np.array(f1_ex4_python_list_alt)
print('Data parsed from np.array() function: ', f1_ex4_numpy_array_alt)
print('Type of data parsed from np.array() function: ', type(f1_ex4_numpy_array_alt))
jovian.commit()
# Example 1 - Creating an array of even numbers between 0 [inclusive] and 20 [exclusive] [Starting from 0]
f2_ex1_first_ten_even_numbers = np.arange(start = 0, stop = 20, step = 2)
print('A list of first 10 even numbers created by np.arange():')
print(f2_ex1_first_ten_even_numbers)
# Example 2 - We can also give a negative step
f2_ex2_even_numbers_in_descending_order = np.arange(start = 20, stop = 0, step = -2)
print('Array of even number between 0 and 20 in descending order:')
print(f2_ex2_even_numbers_in_descending_order)
# Example 3 - It will return an empty array if start is smaller than stop and step is negative.
f1_ex3_first_ten_even_numbers = np.arange(start = 0, stop = 20, step = -2)
print('As we give a negative step when start is smaller than stop, we get:')
print(f1_ex3_first_ten_even_numbers)
jovian.commit()
# Example 1 - Concatenating two Python lists
f3_ex1_list1 = [1, 3, 5]
f3_ex1_list2 = [7, 9, 10]
print('Individual array are: ')
print(f3_ex1_list1)
print(f3_ex1_list2)
f3_ex1_stacked_array = np.vstack((f3_ex1_list1, f3_ex1_list2))
print('\n\nConcatenated array is: ')
print(f3_ex1_stacked_array)
# Example 2 - Concatenating 2 - Dimensional numpy lists.
f3_ex2_ndarray_1 = np.array([[1, 2, 3], [4, 5, 6]])
f3_ex2_ndarray_2 = np.array([[7, 8, 9], [10, 11, 12]])
print('Individual array are: ')
print(f3_ex2_ndarray_1)
print(f3_ex2_ndarray_2)
f3_ex2_concatenated_array = np.vstack((f3_ex2_ndarray_1, f3_ex2_ndarray_2))
print('\n\nConcatenated array is: ')
print(f3_ex2_concatenated_array)
# Example 3 - It throws error when the number of columns is not consistent along all the given arrays.
f3_ex3_ndarray_1 = np.array([[1, 2, 3], [4, 5, 6]])
f3_ex3_ndarray_2 = np.array([[7, 8, 9, 10], [11, 12, 13, 14]])
print('Individual array are: ')
print(f3_ex3_ndarray_1)
print(f3_ex3_ndarray_2)
f3_ex3_concatenated_array = np.vstack((f3_ex3_ndarray_1, f3_ex3_ndarray_2))
print('\n\nConcatenated array is: ')
print(f3_ex3_concatenated_array)
jovian.commit()
# Example 1 - Let's try converting a 4 x 3 array to a 2 x 6 array
f4_ex1_4x3_numpy_array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
print('Initial 4 x 3 array:')
print(f4_ex1_4x3_numpy_array)
f4_ex1_2x6_reshaped_array = np.reshape(f4_ex1_4x3_numpy_array, newshape = (2, 6))
print('\n\nReshaped 2 x 6 array:')
print(f4_ex1_2x6_reshaped_array)
# Example 2 - Another common use case of this function is to generate a desired 2D array from a 1D array. Let's see
f4_ex2_1d_python_array = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120]
print('Initial 1D Python list:')
print(f4_ex2_1d_python_array)
f4_ex2_4x3_reshaped_array = np.reshape(f4_ex2_1d_python_array, newshape = (4, 3))
print('\n\nReshaped 4 x 3 numpy array:')
print(f4_ex2_4x3_reshaped_array)
''' Example 3 - It returns a ValueError exception when the number of elements in the given array is not equal to
the number of elements in an array of desired shape.'''
f4_ex3_2x2_numpy_array = np.array([[5, 15], [25, 35]])
print('Initial 2 x 2 array:')
print(f4_ex3_2x2_numpy_array)
print("First let's try converting the given array to a 4 x 1 array")
f4_ex3_4x1_reshaped_array = np.reshape(f4_ex3_2x2_numpy_array, newshape = (4, 1))
print('\n\nReshaped 4 x 1 array:')
print(f4_ex3_4x1_reshaped_array)
print("Now let's try converting the same array to a 4 x 4 array")
f4_ex3_4x4_reshaped_array = np.reshape(f4_ex3_2x2_numpy_array, newshape = (4, 4))
print('\n\nReshaped 4 x 4 array:') # This line is never going to run
print(f4_ex3_4x4_reshaped_array)
jovian.commit()
# Example 1 - Let's calculate the matrix product of two 2D numpy arrays
f5_ex1_array_1 = np.array([[1, 2], [3, 4]])
f5_ex1_array_2 = np.array([[5, 6], [7, 8]])
print('Array 1 is:')
print(f5_ex1_array_1)
print('Array 2 is:')
print(f5_ex1_array_2)
f5_ex1_matrix_product = np.dot(f5_ex1_array_1, f5_ex1_array_2)
print('\n\nProduct matrix as calculated by np.dot function:')
print(f5_ex1_matrix_product)
# Example 2 - It can also be used to multiply matrices with scalers.
f5_ex2_some_int = 5
f5_ex2_2x2_array = np.array([[9, 10], [11, 12]])
print("Let's take a sample matrix:")
print(f5_ex2_2x2_array)
f5_ex2_dot_product = np.dot(f5_ex2_some_int, f5_ex2_2x2_array)
print('\n\nAs we try to myltiply a scalar valued {} by the above matrix, we get:'.format(f5_ex2_some_int))
print(f5_ex2_dot_product)
''' Example 3 - It throws ValueError when the last dimension of the first matrix
is not same as the first dimension of second matrix.'''
f5_ex3_array_1 = np.array([[10, 11], [12, 13]])
f5_ex3_array_2 = np.array([[14, 15], [16, 17], [18, 19]])
print('Shape of array 1 is:')
print(f5_ex3_array_1.shape)
print('Shape of array 2 is:')
print(f5_ex3_array_2.shape)
print('\nAs we already know, multiplying these matrixes is against the rules of matrix multiplication.')
print("Still let's try multiplying them through np.dot()")
f5_ex3_matrix_product = np.dot(f5_ex3_array_1, f5_ex3_array_2)
print('\n\nProduct matrix as calculated by np.dot function:')
print(f5_ex3_matrix_product)
jovian.commit()
jovian.commit()
jovian.submit(assignment="zero-to-pandas-a2")
| 0.621885 | 0.986828 |
# *When* should you create an object?
## Complicated Function Signatures
Another good example of a use for classes is when you have *multiple* functions that have *similar, complicated* signatures. A *signature* is the collection of arguments that get passed into the function. If you have *one, single function* with many required arguments, you might want to consider splitting the function up into multiple functions that are smaller and less complicated to call. If you have *many functions*, each with the same required arguments, you might want to consider grouping those functions into a class and making the common arguments in all of your functions class data.
We can see this start to happen using the same example we looked at in the previous section on Complicated Data Structures, but imagining it from a different perspective. Imagine we chose to represent the *instrument* data with many small data structures.
### Example Data
Imagine a similar *instrument* data file to what we had before, but now with variables `A`, `B`, `C`, and `D`.
```
%%writefile instrument2.csv
A,0.612,44.978
B,0.891,-0.0178
A,1.132,45.741
C,1.251,21.385
C,1.542,23.723
B,1.852,-0.6319
D,1.988,101.123
D,2.187,100.852
A,2.376,42.178
B,3.017,-2.7863
A,3.861,41.389
C,4.345,27.013
D,4.687,98.678
A,5.142,42.687
D,6.187,102.752
```
We can define a similar function to what we defined before, be this time we will be explicit about knowledge that the datafile only contains information about variables `A`, `B`, `C`, and `D`.
```
def read_instrument_data(filename):
values = {}
times = {}
with open(filename) as f:
for record in f:
var,time,value = record.split(',')
if var not in values:
values[var] = []
times[var] = []
values[var].append(float(value))
times[var].append(float(time))
return times['A'], values['A'], times['B'], values['B'], times['C'], values['C'], times['D'], values['D']
```
And we now are assumed to explicitly extract out the times and values into separate lists for each variable.
```
a_times, a_values, b_times, b_values, c_times, c_values, d_times, d_values = read_instrument_data('instrument2.csv')
```
Now, the `print(data)` test doesn't have any problem! Everything is simple and explicit.
```
print(a_times)
print(a_values)
print(b_times)
print(b_values)
```
And so on.
Everything is clearly labeled and the data is "self-explanatory." So, no problem, right?
### Example Functions
Imagine the myriad things you can compute from the above variables! Many of the computations may depend on any number of these variables. Some may need the time data, and some may not. Some may require only one variable's data, and some may require them all!
Since I haven't used variables that correspond to anything *real* in our world, I can't create actual computations that you can recognize, but I hope you can see where this is going. As an example, imagine the following functions.
```
def integrate_trapezoid(x, y):
return sum(0.5*(x[i] - x[i-1])*(y[i] + y[i-1]) for i in range(1, len(x)))
def compute_v1(a_t, a_v, b_t, b_v):
return integrate_trapezoid(a_t, a_v) - integrate_trapezoid(b_t, b_v)
def compute_v2(a_v, b_v, c_v, d_v):
all_v = a_v + b_v + c_v + d_v
return sum(all_v) / len(all_v)
def compute_v3(a_t, a_v, b_t, b_v, c_t, c_v, d_t, d_v):
a_b = integrate_trapezoid(a_t, a_v) - integrate_trapezoid(b_t, b_v)
c_d = integrate_trapezoid(c_t, c_v) + integrate_trapezoid(d_t, d_v)
return a_b / c_d
def compute_v4(a_t, b_t, c_t, d_t):
return sorted(a_t + b_t + c_t + d_t)
```
And so on...
You should be able to imagine how more functions can be added to the list, and that the number of functions can grow quickly and dramatically.
Let's try out what we've got.
```
compute_v1(a_times, a_values, b_times, b_values)
compute_v2(a_values, b_values, c_values, d_values)
compute_v3(a_times, a_values, b_times, b_values, c_times, c_values, d_times, d_values)
compute_v4(a_times, b_times, c_times, d_times)
```
Just writing out those functions takes time and effort to avoid errors!
## Problems and Limitations
What kinds of potential problems do you see with this approach? Are there any?
1. While the data is explicitly labeled and easy to understand (i.e., the `print(data)` test), there is a lot to manage.
2. Because the number of variables is large, the signatures of the functions can be quite lengthy. Long signatures can be lead to errors because of simple typing mistakes, and those errors can be hard to spot. Especially since long signatures tend to lead to abbreviated variable names to prevent having to type so many characters!
3. Many of the functions have similar signatures, but not the same. Can you remember which function needs which variables as arguments? Or would you need to look it up every time?
What other problems can you think of?
## Key Takeaway
The *instrument* that is providing the data is measuring different variables related to the *same thing*. Maybe it's measuring properties of the atmosphere, ocean, land, or surface ice. Maybe it's just measuring local properties of some "environment," whatever you want to call it. And regardless of what that "environment" is, the computations you perform must also describe the same environment. Hence, *conceptually*, the data and the computations (i.e., functions) are (*again*) intrinsically related to one another!
And you can relieve a lot of headaches in using all of these functions, if you encapsulated the data into a single object and simplified the signatures of the functions.
***How might you do that?***
### Exercise: Try grouping the data and functions above into an object
```
# Try writing a class that groups the data and functions above
```
| | | |
| :- | -- | -: |
| [[Home]](../index.ipynb) | <img width="100%" height="1" src="../images/empty.png"/> | [« Previous](08.ipynb) \| [Next »](10.ipynb) |
|
github_jupyter
|
%%writefile instrument2.csv
A,0.612,44.978
B,0.891,-0.0178
A,1.132,45.741
C,1.251,21.385
C,1.542,23.723
B,1.852,-0.6319
D,1.988,101.123
D,2.187,100.852
A,2.376,42.178
B,3.017,-2.7863
A,3.861,41.389
C,4.345,27.013
D,4.687,98.678
A,5.142,42.687
D,6.187,102.752
def read_instrument_data(filename):
values = {}
times = {}
with open(filename) as f:
for record in f:
var,time,value = record.split(',')
if var not in values:
values[var] = []
times[var] = []
values[var].append(float(value))
times[var].append(float(time))
return times['A'], values['A'], times['B'], values['B'], times['C'], values['C'], times['D'], values['D']
a_times, a_values, b_times, b_values, c_times, c_values, d_times, d_values = read_instrument_data('instrument2.csv')
print(a_times)
print(a_values)
print(b_times)
print(b_values)
def integrate_trapezoid(x, y):
return sum(0.5*(x[i] - x[i-1])*(y[i] + y[i-1]) for i in range(1, len(x)))
def compute_v1(a_t, a_v, b_t, b_v):
return integrate_trapezoid(a_t, a_v) - integrate_trapezoid(b_t, b_v)
def compute_v2(a_v, b_v, c_v, d_v):
all_v = a_v + b_v + c_v + d_v
return sum(all_v) / len(all_v)
def compute_v3(a_t, a_v, b_t, b_v, c_t, c_v, d_t, d_v):
a_b = integrate_trapezoid(a_t, a_v) - integrate_trapezoid(b_t, b_v)
c_d = integrate_trapezoid(c_t, c_v) + integrate_trapezoid(d_t, d_v)
return a_b / c_d
def compute_v4(a_t, b_t, c_t, d_t):
return sorted(a_t + b_t + c_t + d_t)
compute_v1(a_times, a_values, b_times, b_values)
compute_v2(a_values, b_values, c_values, d_values)
compute_v3(a_times, a_values, b_times, b_values, c_times, c_values, d_times, d_values)
compute_v4(a_times, b_times, c_times, d_times)
# Try writing a class that groups the data and functions above
| 0.348534 | 0.934395 |
# Retail Analysis with Walmart Data
#### DESCRIPTION :
> One of the leading retail stores in the US, Walmart, would like to predict the sales and demand accurately.
There are certain events and holidays which impact sales on each day. There are sales data available for 45
stores of Walmart. The business is facing a challenge due to unforeseen demands and runs out of stock some times,
due to the inappropriate machine learning algorithm. An ideal ML algorithm will predict demand accurately and
ingest factors like economic conditions including CPI, Unemployment Index, etc.
> Walmart runs several promotional markdown events throughout the year. These markdowns precede prominent holidays,
the four largest of all, which are the Super Bowl, Labour Day, Thanksgiving, and Christmas. The weeks including
these holidays are weighted five times higher in the evaluation than non-holiday weeks. Part of the challenge
presented by this competition is modeling the effects of markdowns on these holiday weeks in the absence of
complete/ideal historical data. Historical sales data for 45 Walmart stores located in different regions are
available.
#### Holiday Events :
> 1. __Super Bowl :__ 12-Feb-10, 11-Feb-11, 10-Feb-12, 8-Feb-13
> 2. __Labour Day :__ 10-Sep-10, 9-Sep-11, 7-Sep-12, 6-Sep-13
> 3. __Thanksgiving :__ 26-Nov-10, 25-Nov-11, 23-Nov-12, 29-Nov-13
> 4. __Christmas :__ 31-Dec-10, 30-Dec-11, 28-Dec-12, 27-Dec-13
### Analysis Tasks.
> #### Basic Statistics tasks :
> * Which store has maximum sales
> * Which store has maximum standard deviation i.e., the sales vary a lot. Also, find out the coefficient of mean to standard deviation
> * Which store/s has good quarterly growth rate in Q3’2012
> * Some holidays have a negative impact on sales. Find out holidays which have higher sales than the mean sales in non-holiday season for all stores together
> * Provide a monthly and semester view of sales in units and give insights
> #### Statistical Model :
For Store 1 – Build prediction models to forecast demand
> * Linear Regression – Utilize variables like date and restructure dates as 1 for 5 Feb 2010 (starting from the earliest date in order). Hypothesize if CPI, unemployment, and fuel price have any impact on sales.
> * Change dates into days by creating new variable.
> * __Select the model which gives best accuracy.__
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.stats as stats
import sklearn as sk
import statsmodels as sm
from datetime import datetime as dt
from datetime import datetime as date
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv('Walmart_Store_sales.csv')
data.head()
```
## EDA 🔨
```
data.info()
```
* Change date to datetime format
* __Weekly_Sales is target variable here__
* No missing values.
* Shape is (6435, 8)
```
# Date formatting :
data['Date'] = pd.to_datetime(data['Date'])
data.info()
data['Holiday_Flag'].value_counts()
data['DateType'] = [dt.strptime(date, '%Y-%m-%d').date() for date in data['Date'].astype(str).values.tolist()]
```
## Holidays :
```
data['Superbowl'] = np.where((data['DateType'] == dt(2010, 2, 12).date()) | (data['DateType'] == dt(2011, 2, 11).date())|
(data['DateType'] == dt(2012, 2, 10).date()) | (data['DateType'] == dt(2013, 2, 8).date()),1, 0)
data['Labor_Day'] = np.where((data['DateType'] == dt(2010, 9, 10).date()) | (data['DateType'] == dt(2011, 9, 9).date()) |
(data['DateType'] == dt(2012, 9, 7).date()) | (data['DateType'] == dt(2013, 9, 6).date()),1, 0)
data['Christmas'] = np.where((data['DateType'] == dt(2010, 12, 31).date()) | (data['DateType'] == dt(2011, 12, 30).date())|
(data['DateType'] == dt(2012, 12, 28).date()) | (data['DateType'] == dt(2013, 12, 27).date()),1, 0)
data['Thanksgiving'] = np.where((data['DateType'] == dt(2010, 11, 26).date())| (data['DateType'] == dt(2011, 11, 25).date())|
(data['DateType'] == dt(2012, 11, 23).date())|(data['DateType'] == dt(2013, 11, 29).date()),1,0)
print(data.Superbowl.value_counts())
print(data.Labor_Day.value_counts())
print(data.Thanksgiving.value_counts())
print(data.Christmas.value_counts())
```
## 1. Which store has maximum sales
```
store_sales = data.groupby(['Store'])['Weekly_Sales'].sum().sort_values(ascending = False)
round(store_sales, 1).head()
```
#### __So, the store 20 has maximum sales.__
## 2. Which store has maximum standard deviation i.e., the sales vary a lot. Also, find out the coefficient of mean to standard deviation
```
store_std = data.groupby(['Store'])['Weekly_Sales'].std().sort_values(ascending = False)
round(store_std, 2).head()
store_mean = data.groupby(['Store'])['Weekly_Sales'].mean().sort_values(ascending = False)
coeff_variance = round(store_std / store_mean, 2)
coeff_variance.sort_values(ascending = False).head()
```
### Insights :
>* __coeff_variance__ gives the coefficient of variance w.r.t particular stores of the dataset, as shown above.
>* Store __14__ has the maximum standard deviation.
## 3. Which store/s has good quarterly growth rate in Q3’2012
```
data['Year'] = data['Date'].dt.year
data['Month'] = data['Date'].dt.month
data['Quarter'] = data['Date'].dt.quarter
data.drop(columns = 'DateType', inplace = True)
data.head(3)
q3 = data[(data['Quarter'] == 3) & (data['Year'] == 2012)].groupby('Store')['Weekly_Sales'].sum().sort_values(ascending = False)
q3.head(3)
```
### Insight :
* Store __4__ has good quarterly growth rate in Q3’2012 with __maximum__ profit of $25652119.35$ compared with other stores.
#### Growth Rate :
* Growth rate formula is defined as the ratio of difference in present value to past value by past value whole multiplied with 100 (since it is in percentage)
* ((Present value — Past value )/Past value )*100
```
Q3_date_from = pd.Timestamp(date(2012, 7, 1))
Q3_date_to = pd.Timestamp(date(2012, 9, 30))
Q2_date_from = pd.Timestamp(date(2012, 4, 1))
Q2_date_to = pd.Timestamp(date(2012, 6, 30))
#Collecting the data of Q3 and Q2 from original dataset.
Q2data = data[(data['Date'] > Q2_date_from) & (data['Date'] < Q2_date_to)]
Q3data = data[(data['Date'] > Q3_date_from) & (data['Date'] < Q3_date_to)]
#finding the sum weekly sales of each store in Q2
Q2 = pd.DataFrame(Q2data.groupby('Store')['Weekly_Sales'].sum())
Q2.reset_index(inplace=True)
Q2.rename(columns={'Weekly_Sales': 'Q2_Weekly_Sales'},inplace = True)
#finding the sum weekly sales of each store in Q2
Q3 = pd.DataFrame(Q3data.groupby('Store')['Weekly_Sales'].sum())
Q3.reset_index(inplace=True)
Q3.rename(columns = {'Weekly_Sales': 'Q3_Weekly_Sales'},inplace = True)
#mergeing Q2 and Q3 data on Store as a common column
Q3_Growth= Q2.merge(Q3,how = 'inner',on = 'Store')
#Calculating Growth rate of each Store and collecting it into a dataframe
Q3_Growth['Growth_Rate'] = (Q3_Growth['Q3_Weekly_Sales'] - Q3_Growth['Q2_Weekly_Sales']) / Q3_Growth['Q2_Weekly_Sales']
Q3_Growth['Growth_Rate'] = round(Q3_Growth['Growth_Rate'],2)
Q3_Growth.sort_values('Growth_Rate',ascending = False).head(3)
Q3_Growth.sort_values('Growth_Rate',ascending = False).tail(3)
```
### Insights :
> * From above information we can say that Q3 growth rate is in __loss.__
> * Store 16 has the __least loss__ of 3% compared with other stores.
> * Store 14 has the __highest loss__ total of 18%
## 4. Some holidays have a negative impact on sales. Find out holidays which have higher sales than the mean sales in non-holiday season for all stores together
```
round(data.groupby(['Holiday_Flag'])['Weekly_Sales'].sum(),1)
print('0 :', 6.231919e+09)
print('1 :', 5.052996e+08)
Spr_sales = data.groupby(['Superbowl'])['Weekly_Sales'].mean()
Ld_sales = data.groupby(['Labor_Day'])['Weekly_Sales'].mean()
Thanksg_sales = data.groupby(['Thanksgiving'])['Weekly_Sales'].mean()
Christmas_sales = data.groupby(['Christmas'])['Weekly_Sales'].mean()
Non_Holi_Sales = data[(data['Holiday_Flag'] == 0)].groupby('Holiday_Flag')['Weekly_Sales'].mean()
print(round(Spr_sales, 2))
print(round(Ld_sales, 2))
print(round(Thanksg_sales, 2))
print(round(Christmas_sales, 2))
print(round(Non_Holi_Sales, 2))
```
## Visualizing :📚
### 1. Sales in Super Bowl holiday.
```
plt.style.use('seaborn-whitegrid')
plt.figure(figsize = (10, 6))
Spr_sales.plot(kind = 'bar', legend = False, title = 'Sales in Super Bowl holiday', color = 'thistle')
plt.show()
```
### 2. Sales in Labor Day holiday.
```
plt.figure(figsize = (10, 6))
Ld_sales.plot(kind = 'bar', legend = False, title = 'Sales in Labour Day holiday', color = ['plum','lavender'])
plt.show()
```
### 3. Sales in Thanksgiving holiday.
```
plt.figure(figsize = (10, 6))
Thanksg_sales.plot(kind = 'bar', legend = False, title = 'Sales in Thanksgiving holiday', color = ['powderblue','skyblue'])
plt.show()
```
### 4. Sales in Christmas holiday.
```
plt.figure(figsize = (10, 6))
Christmas_sales.plot(kind = 'bar', legend = False, title = 'Sales in Christmas holiday', color = ['plum','lavender'])
plt.show()
```
### 5. Non Holiday Sales.
```
plt.figure(figsize = (10, 6))
Non_Holi_Sales.plot(kind = 'bar', legend = False, title = 'Sales in Non-holiday', color = 'lavender')
plt.show()
```
## 5. Provide a monthly and semester view of sales in units and give insights.
### I. Monthly sales view
```
monthly = data.groupby(pd.Grouper(key = 'Date', freq = '1M')).sum()
monthly = monthly.reset_index()
fig, ax = plt.subplots(figsize=(13,6), dpi = 80)
X = monthly['Date']
Y = monthly['Weekly_Sales']
plt.plot(X,Y, color = 'plum')
plt.title('Month Wise Sales')
plt.xlabel('Monthly')
plt.ylabel('Weekly Sales')
plt.legend(['Sales'])
plt.show()
```
### II. Semester wise sales view
```
Semester = data.groupby(pd.Grouper(key='Date', freq='6M')).sum()
Semester = Semester.reset_index()
fig, ax = plt.subplots(figsize=(13,6), dpi = 80)
X = Semester['Date']
Y = Semester['Weekly_Sales']
plt.plot(X,Y)
plt.title('Semester Wise Sales')
plt.xlabel('Semester')
plt.ylabel('Weekly Sales')
plt.legend('Sales')
plt.show()
```
### Insights :
> * We can infer that there's a big spike in sales from __February-2010__ to __February-2011__. Exactly for one year we can say.
> * Then spike goes __bit down__ in February-2011 after that again there are few __ups-downs__ in further.
> * From __August-2012__ sales goes __down__. **-** We can acknowledge that there is __loss__ in sales.
# For Store 1 – Build prediction models to forecast demand
```
data.drop(columns = ['Superbowl', 'Labor_Day', 'Christmas', 'Thanksgiving'], inplace = True)
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
data['Store']
data['Store'] = data['Store'].astype(str)
data['Store'] = 'Store ' + data['Store'].astype(str)
data.Store
labelEncod = LabelEncoder()
store_1 = data[data['Store'] == 'Store 1']
store_1 = store_1.copy()
store_1.head()
store_1['Days'] = labelEncod.fit_transform(store_1['Date'])
store_1.drop(['Store','Date','Holiday_Flag','Year','Month','Quarter'], axis=1 , inplace = True)
import seaborn as sns
corr = store_1.corr()
plt.figure(figsize = (13,8), dpi = 80)
corrmap = sns.heatmap(store_1.corr(), cmap = 'PuBuGn_r', annot = True)
corrmap
plt.show()
```
### Hypothesize if CPI, unemployment, and fuel price have any impact on sales.
### Insights :
> 1. As we can see __unemployment__ is highly correlated with __days__ and is ___insignificant___ as it correlation with Weekly Sales is __quite low.__
> 2. Also __temperature__ and __Unemployment__ are __negatively__ impacting the sales .
> 3. however __Fuel Price__ and __CPI__ are __positively__ impacting the Sales.
## Model Building : Linear Regression📈
```
from sklearn.model_selection import train_test_split
X = store_1[['Days', 'Fuel_Price', 'CPI', 'Unemployment']]
y = store_1['Weekly_Sales']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 123)
print('Shape of X_train :', X_train.shape)
print('Shape of y_train :', y_train.shape)
print('-'*40)
print('Shape of X_test :', X_test.shape)
print('Shape of y_test :', y_test.shape)
from sklearn.linear_model import LinearRegression
linear_reg = LinearRegression()
linear_reg.fit(X_train, y_train)
y_pred = linear_reg.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('Coefficients: \n',linear_reg.coef_)
print('Variance score: %.2f' % linear_reg.score(X_test, y_test))
```
### Note :
> * From above I can infer that Linear Regression model performs very poorly on our dataset.
> * As we can see the variance is in negative numbers. Which indicates that our model is __Poor__.
> * __Minimum__ variance helps our model to be __ideal / accurate__, Therefore further down I'll implement __RandomForest__ algorithms and check for accuray.
> * If accuracy increses, then I will pick the model.
> * Another way we can check accuracy by showing comparison between actual values and predicted values.
# RandomForestRegressor🎄
```
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(n_estimators = 400, max_depth = 15)
rfr.fit(X_train, y_train)
Y_pred = rfr.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('-'*40)
print('Variance score: %.2f' % rfr.score(X_test, y_test))
Actual_vs_Pred = pd.DataFrame({"Actual Sales" : y_test, "Predicted Sales": y_pred})
Actual_vs_Pred.head()
```
***Predicted Demand for Store 1***
```
# Checking errors :
print('Error :', round(1.597046e+06 - 1517428.87, 2))
print('Error :', round(1.573164e+06 - 1621031.70, 2))
round(((abs(1.573164e+06 - 1621031.70))/1621031.70)*100,2)
# Errors in % w.r.t the particular store sales : predicted sales and actual sales.
Errors = pd.DataFrame({'errors':round(((abs(y_pred - y_test))/y_test)*100,2)})
Errors.head()
```
### Insights :
> * So here we can see, we have predicted __demand__ for store $1$
> * Further down I've shown a table that shows __errors__ in %, w.r.t the store sales demand.
> * Errors are not huge to decline our model also the __variance__ is good.
> * Therefore we accept this Model - __RandomForest Regressor__.
$Swapnil$ $Narwade$
> *[email protected]* 🦄
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import scipy.stats as stats
import sklearn as sk
import statsmodels as sm
from datetime import datetime as dt
from datetime import datetime as date
import warnings
warnings.filterwarnings('ignore')
data = pd.read_csv('Walmart_Store_sales.csv')
data.head()
data.info()
# Date formatting :
data['Date'] = pd.to_datetime(data['Date'])
data.info()
data['Holiday_Flag'].value_counts()
data['DateType'] = [dt.strptime(date, '%Y-%m-%d').date() for date in data['Date'].astype(str).values.tolist()]
data['Superbowl'] = np.where((data['DateType'] == dt(2010, 2, 12).date()) | (data['DateType'] == dt(2011, 2, 11).date())|
(data['DateType'] == dt(2012, 2, 10).date()) | (data['DateType'] == dt(2013, 2, 8).date()),1, 0)
data['Labor_Day'] = np.where((data['DateType'] == dt(2010, 9, 10).date()) | (data['DateType'] == dt(2011, 9, 9).date()) |
(data['DateType'] == dt(2012, 9, 7).date()) | (data['DateType'] == dt(2013, 9, 6).date()),1, 0)
data['Christmas'] = np.where((data['DateType'] == dt(2010, 12, 31).date()) | (data['DateType'] == dt(2011, 12, 30).date())|
(data['DateType'] == dt(2012, 12, 28).date()) | (data['DateType'] == dt(2013, 12, 27).date()),1, 0)
data['Thanksgiving'] = np.where((data['DateType'] == dt(2010, 11, 26).date())| (data['DateType'] == dt(2011, 11, 25).date())|
(data['DateType'] == dt(2012, 11, 23).date())|(data['DateType'] == dt(2013, 11, 29).date()),1,0)
print(data.Superbowl.value_counts())
print(data.Labor_Day.value_counts())
print(data.Thanksgiving.value_counts())
print(data.Christmas.value_counts())
store_sales = data.groupby(['Store'])['Weekly_Sales'].sum().sort_values(ascending = False)
round(store_sales, 1).head()
store_std = data.groupby(['Store'])['Weekly_Sales'].std().sort_values(ascending = False)
round(store_std, 2).head()
store_mean = data.groupby(['Store'])['Weekly_Sales'].mean().sort_values(ascending = False)
coeff_variance = round(store_std / store_mean, 2)
coeff_variance.sort_values(ascending = False).head()
data['Year'] = data['Date'].dt.year
data['Month'] = data['Date'].dt.month
data['Quarter'] = data['Date'].dt.quarter
data.drop(columns = 'DateType', inplace = True)
data.head(3)
q3 = data[(data['Quarter'] == 3) & (data['Year'] == 2012)].groupby('Store')['Weekly_Sales'].sum().sort_values(ascending = False)
q3.head(3)
Q3_date_from = pd.Timestamp(date(2012, 7, 1))
Q3_date_to = pd.Timestamp(date(2012, 9, 30))
Q2_date_from = pd.Timestamp(date(2012, 4, 1))
Q2_date_to = pd.Timestamp(date(2012, 6, 30))
#Collecting the data of Q3 and Q2 from original dataset.
Q2data = data[(data['Date'] > Q2_date_from) & (data['Date'] < Q2_date_to)]
Q3data = data[(data['Date'] > Q3_date_from) & (data['Date'] < Q3_date_to)]
#finding the sum weekly sales of each store in Q2
Q2 = pd.DataFrame(Q2data.groupby('Store')['Weekly_Sales'].sum())
Q2.reset_index(inplace=True)
Q2.rename(columns={'Weekly_Sales': 'Q2_Weekly_Sales'},inplace = True)
#finding the sum weekly sales of each store in Q2
Q3 = pd.DataFrame(Q3data.groupby('Store')['Weekly_Sales'].sum())
Q3.reset_index(inplace=True)
Q3.rename(columns = {'Weekly_Sales': 'Q3_Weekly_Sales'},inplace = True)
#mergeing Q2 and Q3 data on Store as a common column
Q3_Growth= Q2.merge(Q3,how = 'inner',on = 'Store')
#Calculating Growth rate of each Store and collecting it into a dataframe
Q3_Growth['Growth_Rate'] = (Q3_Growth['Q3_Weekly_Sales'] - Q3_Growth['Q2_Weekly_Sales']) / Q3_Growth['Q2_Weekly_Sales']
Q3_Growth['Growth_Rate'] = round(Q3_Growth['Growth_Rate'],2)
Q3_Growth.sort_values('Growth_Rate',ascending = False).head(3)
Q3_Growth.sort_values('Growth_Rate',ascending = False).tail(3)
round(data.groupby(['Holiday_Flag'])['Weekly_Sales'].sum(),1)
print('0 :', 6.231919e+09)
print('1 :', 5.052996e+08)
Spr_sales = data.groupby(['Superbowl'])['Weekly_Sales'].mean()
Ld_sales = data.groupby(['Labor_Day'])['Weekly_Sales'].mean()
Thanksg_sales = data.groupby(['Thanksgiving'])['Weekly_Sales'].mean()
Christmas_sales = data.groupby(['Christmas'])['Weekly_Sales'].mean()
Non_Holi_Sales = data[(data['Holiday_Flag'] == 0)].groupby('Holiday_Flag')['Weekly_Sales'].mean()
print(round(Spr_sales, 2))
print(round(Ld_sales, 2))
print(round(Thanksg_sales, 2))
print(round(Christmas_sales, 2))
print(round(Non_Holi_Sales, 2))
plt.style.use('seaborn-whitegrid')
plt.figure(figsize = (10, 6))
Spr_sales.plot(kind = 'bar', legend = False, title = 'Sales in Super Bowl holiday', color = 'thistle')
plt.show()
plt.figure(figsize = (10, 6))
Ld_sales.plot(kind = 'bar', legend = False, title = 'Sales in Labour Day holiday', color = ['plum','lavender'])
plt.show()
plt.figure(figsize = (10, 6))
Thanksg_sales.plot(kind = 'bar', legend = False, title = 'Sales in Thanksgiving holiday', color = ['powderblue','skyblue'])
plt.show()
plt.figure(figsize = (10, 6))
Christmas_sales.plot(kind = 'bar', legend = False, title = 'Sales in Christmas holiday', color = ['plum','lavender'])
plt.show()
plt.figure(figsize = (10, 6))
Non_Holi_Sales.plot(kind = 'bar', legend = False, title = 'Sales in Non-holiday', color = 'lavender')
plt.show()
monthly = data.groupby(pd.Grouper(key = 'Date', freq = '1M')).sum()
monthly = monthly.reset_index()
fig, ax = plt.subplots(figsize=(13,6), dpi = 80)
X = monthly['Date']
Y = monthly['Weekly_Sales']
plt.plot(X,Y, color = 'plum')
plt.title('Month Wise Sales')
plt.xlabel('Monthly')
plt.ylabel('Weekly Sales')
plt.legend(['Sales'])
plt.show()
Semester = data.groupby(pd.Grouper(key='Date', freq='6M')).sum()
Semester = Semester.reset_index()
fig, ax = plt.subplots(figsize=(13,6), dpi = 80)
X = Semester['Date']
Y = Semester['Weekly_Sales']
plt.plot(X,Y)
plt.title('Semester Wise Sales')
plt.xlabel('Semester')
plt.ylabel('Weekly Sales')
plt.legend('Sales')
plt.show()
data.drop(columns = ['Superbowl', 'Labor_Day', 'Christmas', 'Thanksgiving'], inplace = True)
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import LabelEncoder
data['Store']
data['Store'] = data['Store'].astype(str)
data['Store'] = 'Store ' + data['Store'].astype(str)
data.Store
labelEncod = LabelEncoder()
store_1 = data[data['Store'] == 'Store 1']
store_1 = store_1.copy()
store_1.head()
store_1['Days'] = labelEncod.fit_transform(store_1['Date'])
store_1.drop(['Store','Date','Holiday_Flag','Year','Month','Quarter'], axis=1 , inplace = True)
import seaborn as sns
corr = store_1.corr()
plt.figure(figsize = (13,8), dpi = 80)
corrmap = sns.heatmap(store_1.corr(), cmap = 'PuBuGn_r', annot = True)
corrmap
plt.show()
from sklearn.model_selection import train_test_split
X = store_1[['Days', 'Fuel_Price', 'CPI', 'Unemployment']]
y = store_1['Weekly_Sales']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 123)
print('Shape of X_train :', X_train.shape)
print('Shape of y_train :', y_train.shape)
print('-'*40)
print('Shape of X_test :', X_test.shape)
print('Shape of y_test :', y_test.shape)
from sklearn.linear_model import LinearRegression
linear_reg = LinearRegression()
linear_reg.fit(X_train, y_train)
y_pred = linear_reg.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('Coefficients: \n',linear_reg.coef_)
print('Variance score: %.2f' % linear_reg.score(X_test, y_test))
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor(n_estimators = 400, max_depth = 15)
rfr.fit(X_train, y_train)
Y_pred = rfr.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print('-'*40)
print('Variance score: %.2f' % rfr.score(X_test, y_test))
Actual_vs_Pred = pd.DataFrame({"Actual Sales" : y_test, "Predicted Sales": y_pred})
Actual_vs_Pred.head()
# Checking errors :
print('Error :', round(1.597046e+06 - 1517428.87, 2))
print('Error :', round(1.573164e+06 - 1621031.70, 2))
round(((abs(1.573164e+06 - 1621031.70))/1621031.70)*100,2)
# Errors in % w.r.t the particular store sales : predicted sales and actual sales.
Errors = pd.DataFrame({'errors':round(((abs(y_pred - y_test))/y_test)*100,2)})
Errors.head()
| 0.30549 | 0.973968 |
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Plotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version.
```
import plotly as py
import plotly.graph_objs as go
py.offline.init_notebook_mode(connected=True)
```
### Adding Text to Data in Line and Scatter Plots
```
trace1 = go.Scatter(
x=[0, 1, 2],
y=[1, 1, 1],
mode='lines+markers+text',
name='Lines, Markers and Text',
text=['Text A', 'Text B', 'Text C'],
textposition='top center'
)
trace2 = go.Scatter(
x=[0, 1, 2],
y=[2, 2, 2],
mode='markers+text',
name='Markers and Text',
text=['Text D', 'Text E', 'Text F'],
textposition='bottom center'
)
trace3 = go.Scatter(
x=[0, 1, 2],
y=[3, 3, 3],
mode='lines+text',
name='Lines and Text',
text=['Text G', 'Text H', 'Text I'],
textposition='bottom center'
)
data = [trace1, trace2, trace3]
layout = go.Layout(
showlegend=False
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='text-chart-basic')
```
### Adding Hover Text to Data in Line and Scatter Plots
```
data = [
go.Scatter(
x=[0, 1, 2],
y=[1, 3, 2],
mode='markers',
text=['Text A', 'Text B', 'Text C']
)
]
layout = go.Layout(
title='Hover over the points to see the text'
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='hover-chart-basic')
```
### Simple Annotation
```
trace1 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 1, 3, 2, 4, 3, 4, 6, 5]
)
trace2 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 4, 5, 1, 2, 2, 3, 4, 2]
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False,
annotations=[
dict(
x=2,
y=5,
xref='x',
yref='y',
text='dict Text',
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
)
]
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='simple-annotation')
```
### Multiple Annotations
```
trace1 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 1, 3, 2, 4, 3, 4, 6, 5]
)
trace2 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 4, 5, 1, 2, 2, 3, 4, 2]
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False,
annotations=[
dict(
x=2,
y=5,
xref='x',
yref='y',
text='dict Text',
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
),
dict(
x=4,
y=4,
xref='x',
yref='y',
text='dict Text 2',
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
)
]
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='multiple-annotation')
```
### 3D Annotations
```
import plotly.plotly as py
import plotly.graph_objs as go
data = [go.Scatter3d(
x = ["2017-01-01", "2017-02-10", "2017-03-20"],
y = ["A", "B", "C"],
z = [1, 1000, 100000],
name = "z",
type = "scatter3d"
)]
layout = go.Layout(
scene = dict(
aspectratio = dict(
x = 1,
y = 1,
z = 1
),
camera = dict(
center = dict(
x = 0,
y = 0,
z = 0
),
eye = dict(
x = 1.96903462608,
y = -1.09022831971,
z = 0.405345349304
),
up = dict(
x = 0,
y = 0,
z = 1
)
),
dragmode = "turntable",
xaxis = dict(
title = "",
type = "date"
),
yaxis = dict(
title = "",
type = "category"
),
zaxis = dict(
title = "",
type = "log"
),
annotations = [dict(
showarrow = False,
x = "2017-01-01",
y = "A",
z = 0,
text = "Point 1",
xanchor = "left",
xshift = 10,
opacity = 0.7
), dict(
x = "2017-02-10",
y = "B",
z = 4,
text = "Point 2",
textangle = 0,
ax = 0,
ay = -75,
font = dict(
color = "black",
size = 12
),
arrowcolor = "black",
arrowsize = 3,
arrowwidth = 1,
arrowhead = 1
), dict(
x = "2017-03-20",
y = "C",
z = 5,
ax = 50,
ay = 0,
text = "Point 3",
arrowhead = 1,
xanchor = "left",
yanchor = "bottom"
)]
),
xaxis = dict(title = "x"),
yaxis = dict(title = "y")
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename = "3d annotations")
```
### Custom Text Color and Styling
```
import plotly.plotly as py
import plotly.graph_objs as go
trace1 = go.Scatter(
x=[0, 1, 2],
y=[1, 1, 1],
mode='lines+markers+text',
name='Lines, Markers and Text',
text=['Text A', 'Text B', 'Text C'],
textposition='top right',
textfont=dict(
family='sans serif',
size=18,
color='#1f77b4'
)
)
trace2 = go.Scatter(
x=[0, 1, 2],
y=[2, 2, 2],
mode='lines+markers+text',
name='Lines and Text',
text=['Text G', 'Text H', 'Text I'],
textposition='bottom',
textfont=dict(
family='sans serif',
size=18,
color='#ff7f0e'
)
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='text-chart-styling')
```
### Styling and Coloring Annotations
```
import plotly.plotly as py
import plotly.graph_objs as go
trace1 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 1, 3, 2, 4, 3, 4, 6, 5]
)
trace2 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 4, 5, 1, 2, 2, 3, 4, 2]
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False,
annotations=[
dict(
x=2,
y=5,
xref='x',
yref='y',
text='max=5',
showarrow=True,
font=dict(
family='Courier New, monospace',
size=16,
color='#ffffff'
),
align='center',
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor='#636363',
ax=20,
ay=-30,
bordercolor='#c7c7c7',
borderwidth=2,
borderpad=4,
bgcolor='#ff7f0e',
opacity=0.8
)
]
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='style-annotation')
```
### Disabling Hover Text
```
import plotly.plotly as py
trace = dict(
x=[1, 2, 3,],
y=[10, 30, 15],
type='scatter',
name='first trace',
hoverinfo='none'
)
py.iplot([trace], filename='hoverinfo=none')
```
### Text Font as an Array - Styling Each Text Element
```
import plotly.plotly as py
import plotly.graph_objs as go
fig = go.Figure(
data=[
go.Scattergeo(
lat=[45.5,43.4,49.13,51.1,53.34,45.24,44.64,48.25,49.89,50.45],
lon=[-73.57,-79.24,-123.06,-114.1,-113.28,-75.43,-63.57,-123.21,-97.13,-104.6],
marker={
"color": ["#bebada","#fdb462","#fb8072","#d9d9d9","#bc80bd","#b3de69","#8dd3c7","#80b1d3","#fccde5","#ffffb3"],
"line": {
"width": 1
},
"size": 10
},
mode="markers+text",
name="",
text=["Montreal","Toronto","Vancouver","Calgary","Edmonton","Ottawa","Halifax","Victoria","Winnepeg","Regina"],
textfont={
"color": ["#bebada","#fdb462","#fb8072","#d9d9d9","#bc80bd","#b3de69","#8dd3c7","#80b1d3","#fccde5","#ffffb3"],
"family": ["Arial, sans-serif","Balto, sans-serif","Courier New, monospace","Droid Sans, sans-serif","Droid Serif, serif","Droid Sans Mono, sans-serif","Gravitas One, cursive","Old Standard TT, serif","Open Sans, sans-serif","PT Sans Narrow, sans-serif","Raleway, sans-serif","Times New Roman, Times, serif"],
"size": [22,21,20,19,18,17,16,15,14,13]
},
textposition=["top center","middle left","top center","bottom center","top right","middle left","bottom right","bottom left","top right","top right"]
)
],
layout={
"title": "Canadian cities",
"geo": {
"lataxis": {
"range": [40, 70]
},
"lonaxis": {
"range": [-130, -55]
},
"scope": "north america"
}
}
)
py.iplot(fig, filename='Canadian Cities')
```
### Adding Annotations with xref and yref as Paper
```
import plotly.plotly as py
import plotly.graph_objs as go
data = [
go.Scatter(
x=[1, 2, 3],
y=[1, 2, 3],
name='y',
)
]
layout = go.Layout(
annotations=[
dict(
x=0.5004254919715793,
y=-0.16191064079952971,
showarrow=False,
text='Custom x-axis title',
xref='paper',
yref='paper'
),
dict(
x=-0.04944728761514841,
y=0.4714285714285711,
showarrow=False,
text='Custom y-axis title',
textangle=-90,
xref='paper',
yref='paper'
)
],
autosize=True,
margin=dict(
b=100
),
title='Plot Title',
xaxis=dict(
autorange=False,
range=[-0.05674507980728292, -0.0527310420933204],
type='linear'
),
yaxis=dict(
autorange=False,
range=[1.2876210047544652, 1.2977732997811402],
type='linear'
),
height=550,
width=1137
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
```
#### Reference
See https://plot.ly/python/reference/#layout-annotations for more information and chart attribute options!
|
github_jupyter
|
import plotly as py
import plotly.graph_objs as go
py.offline.init_notebook_mode(connected=True)
trace1 = go.Scatter(
x=[0, 1, 2],
y=[1, 1, 1],
mode='lines+markers+text',
name='Lines, Markers and Text',
text=['Text A', 'Text B', 'Text C'],
textposition='top center'
)
trace2 = go.Scatter(
x=[0, 1, 2],
y=[2, 2, 2],
mode='markers+text',
name='Markers and Text',
text=['Text D', 'Text E', 'Text F'],
textposition='bottom center'
)
trace3 = go.Scatter(
x=[0, 1, 2],
y=[3, 3, 3],
mode='lines+text',
name='Lines and Text',
text=['Text G', 'Text H', 'Text I'],
textposition='bottom center'
)
data = [trace1, trace2, trace3]
layout = go.Layout(
showlegend=False
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='text-chart-basic')
data = [
go.Scatter(
x=[0, 1, 2],
y=[1, 3, 2],
mode='markers',
text=['Text A', 'Text B', 'Text C']
)
]
layout = go.Layout(
title='Hover over the points to see the text'
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='hover-chart-basic')
trace1 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 1, 3, 2, 4, 3, 4, 6, 5]
)
trace2 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 4, 5, 1, 2, 2, 3, 4, 2]
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False,
annotations=[
dict(
x=2,
y=5,
xref='x',
yref='y',
text='dict Text',
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
)
]
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='simple-annotation')
trace1 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 1, 3, 2, 4, 3, 4, 6, 5]
)
trace2 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 4, 5, 1, 2, 2, 3, 4, 2]
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False,
annotations=[
dict(
x=2,
y=5,
xref='x',
yref='y',
text='dict Text',
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
),
dict(
x=4,
y=4,
xref='x',
yref='y',
text='dict Text 2',
showarrow=True,
arrowhead=7,
ax=0,
ay=-40
)
]
)
fig = go.Figure(data=data, layout=layout)
py.offline.iplot(fig, filename='multiple-annotation')
import plotly.plotly as py
import plotly.graph_objs as go
data = [go.Scatter3d(
x = ["2017-01-01", "2017-02-10", "2017-03-20"],
y = ["A", "B", "C"],
z = [1, 1000, 100000],
name = "z",
type = "scatter3d"
)]
layout = go.Layout(
scene = dict(
aspectratio = dict(
x = 1,
y = 1,
z = 1
),
camera = dict(
center = dict(
x = 0,
y = 0,
z = 0
),
eye = dict(
x = 1.96903462608,
y = -1.09022831971,
z = 0.405345349304
),
up = dict(
x = 0,
y = 0,
z = 1
)
),
dragmode = "turntable",
xaxis = dict(
title = "",
type = "date"
),
yaxis = dict(
title = "",
type = "category"
),
zaxis = dict(
title = "",
type = "log"
),
annotations = [dict(
showarrow = False,
x = "2017-01-01",
y = "A",
z = 0,
text = "Point 1",
xanchor = "left",
xshift = 10,
opacity = 0.7
), dict(
x = "2017-02-10",
y = "B",
z = 4,
text = "Point 2",
textangle = 0,
ax = 0,
ay = -75,
font = dict(
color = "black",
size = 12
),
arrowcolor = "black",
arrowsize = 3,
arrowwidth = 1,
arrowhead = 1
), dict(
x = "2017-03-20",
y = "C",
z = 5,
ax = 50,
ay = 0,
text = "Point 3",
arrowhead = 1,
xanchor = "left",
yanchor = "bottom"
)]
),
xaxis = dict(title = "x"),
yaxis = dict(title = "y")
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename = "3d annotations")
import plotly.plotly as py
import plotly.graph_objs as go
trace1 = go.Scatter(
x=[0, 1, 2],
y=[1, 1, 1],
mode='lines+markers+text',
name='Lines, Markers and Text',
text=['Text A', 'Text B', 'Text C'],
textposition='top right',
textfont=dict(
family='sans serif',
size=18,
color='#1f77b4'
)
)
trace2 = go.Scatter(
x=[0, 1, 2],
y=[2, 2, 2],
mode='lines+markers+text',
name='Lines and Text',
text=['Text G', 'Text H', 'Text I'],
textposition='bottom',
textfont=dict(
family='sans serif',
size=18,
color='#ff7f0e'
)
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='text-chart-styling')
import plotly.plotly as py
import plotly.graph_objs as go
trace1 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 1, 3, 2, 4, 3, 4, 6, 5]
)
trace2 = go.Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 4, 5, 1, 2, 2, 3, 4, 2]
)
data = [trace1, trace2]
layout = go.Layout(
showlegend=False,
annotations=[
dict(
x=2,
y=5,
xref='x',
yref='y',
text='max=5',
showarrow=True,
font=dict(
family='Courier New, monospace',
size=16,
color='#ffffff'
),
align='center',
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor='#636363',
ax=20,
ay=-30,
bordercolor='#c7c7c7',
borderwidth=2,
borderpad=4,
bgcolor='#ff7f0e',
opacity=0.8
)
]
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='style-annotation')
import plotly.plotly as py
trace = dict(
x=[1, 2, 3,],
y=[10, 30, 15],
type='scatter',
name='first trace',
hoverinfo='none'
)
py.iplot([trace], filename='hoverinfo=none')
import plotly.plotly as py
import plotly.graph_objs as go
fig = go.Figure(
data=[
go.Scattergeo(
lat=[45.5,43.4,49.13,51.1,53.34,45.24,44.64,48.25,49.89,50.45],
lon=[-73.57,-79.24,-123.06,-114.1,-113.28,-75.43,-63.57,-123.21,-97.13,-104.6],
marker={
"color": ["#bebada","#fdb462","#fb8072","#d9d9d9","#bc80bd","#b3de69","#8dd3c7","#80b1d3","#fccde5","#ffffb3"],
"line": {
"width": 1
},
"size": 10
},
mode="markers+text",
name="",
text=["Montreal","Toronto","Vancouver","Calgary","Edmonton","Ottawa","Halifax","Victoria","Winnepeg","Regina"],
textfont={
"color": ["#bebada","#fdb462","#fb8072","#d9d9d9","#bc80bd","#b3de69","#8dd3c7","#80b1d3","#fccde5","#ffffb3"],
"family": ["Arial, sans-serif","Balto, sans-serif","Courier New, monospace","Droid Sans, sans-serif","Droid Serif, serif","Droid Sans Mono, sans-serif","Gravitas One, cursive","Old Standard TT, serif","Open Sans, sans-serif","PT Sans Narrow, sans-serif","Raleway, sans-serif","Times New Roman, Times, serif"],
"size": [22,21,20,19,18,17,16,15,14,13]
},
textposition=["top center","middle left","top center","bottom center","top right","middle left","bottom right","bottom left","top right","top right"]
)
],
layout={
"title": "Canadian cities",
"geo": {
"lataxis": {
"range": [40, 70]
},
"lonaxis": {
"range": [-130, -55]
},
"scope": "north america"
}
}
)
py.iplot(fig, filename='Canadian Cities')
import plotly.plotly as py
import plotly.graph_objs as go
data = [
go.Scatter(
x=[1, 2, 3],
y=[1, 2, 3],
name='y',
)
]
layout = go.Layout(
annotations=[
dict(
x=0.5004254919715793,
y=-0.16191064079952971,
showarrow=False,
text='Custom x-axis title',
xref='paper',
yref='paper'
),
dict(
x=-0.04944728761514841,
y=0.4714285714285711,
showarrow=False,
text='Custom y-axis title',
textangle=-90,
xref='paper',
yref='paper'
)
],
autosize=True,
margin=dict(
b=100
),
title='Plot Title',
xaxis=dict(
autorange=False,
range=[-0.05674507980728292, -0.0527310420933204],
type='linear'
),
yaxis=dict(
autorange=False,
range=[1.2876210047544652, 1.2977732997811402],
type='linear'
),
height=550,
width=1137
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
| 0.617859 | 0.930836 |
# CDTIME
The ``cdtime`` module implements the CDMS time types, methods, and
calendars. These are made available with the command:
```
import cdtime
```
Two time types are available: ***relative time*** and ***component time***.
* ***Relative time*** is time relative to a fixed base time. It consists of:
- a units string, of the form ‘units since basetime’, and
- a floating-point value
For example, the time **28.0 days since 1996-1-1** has value=28.0, and
**units=’days since 1996-1-1’**
* ***Component time*** consists of the integer fields year, month, day, hour,
minute, and the floating-point field second. A sample component time is
``1996-2-28 12:10:30.0``
The ``cdtime`` module contains functions for converting between these
forms, based on the common calendars used in climate simulation. Basic
arithmetic and comparison operators are also available.
## Calendars
A calendar specifies the number of days in each month, for a given year.
cdtime supports these calendars:
- **cdtime.GregorianCalendar**: years evenly divisible by four are leap
years, except century years not evenly divisible by 400. This is
sometimes called the proleptic Gregorian calendar, meaning that the
algorithm for leap years applies for all years.
- **dtime.MixedCalendar**: mixed Julian/Gregorian calendar. Dates
before 158210-15 are encoded with the Julian calendar, otherwise are
encoded with the Gregorian calendar. The day immediately following
1582-10-4 is 1582-10-15. This is the default calendar.
- **cdtime.JulianCalendar**: years evenly divisible by four are leap
years,
- **cdtime.NoLeapCalendar**: all years have 365 days,
- **cdtime.Calendar360**: all months have 30 days.
Several ``cdtime`` functions have an optional calendar argument. The
default calendar is the ``MixedCalendar``. The default calendar may be
changed with the command:
``cdtime.DefaultCalendar = newCalendar``
## Time Methods
The following methods apply both to relative and component times.
https://cdms.readthedocs.io/en/latest/manual/cdms_3.html#component-time
### Examples
```
import cdtime
c = cdtime.comptime(1996,2,28)
r = cdtime.reltime(28,"days since 1996-1-1")
print(r.add(1,cdtime.Day))
print(c.add(36,cdtime.Hours))
```
**Note:** When adding or subtracting intervals of months or years, only the month and year of the result are significant. The reason is that intervals in months/years are not commensurate with intervals in days or fractional days. This leads to results that may be surprising.
```
c = cdtime.comptime(1979,8,31)
c.add(1,cdtime.Month)
```
In other words, the day component of c was ignored in the addition,
and the day/hour/minute components of the results are just the defaults.
If the interval is in years, the interval is converted internally to
months:
```
c = cdtime.comptime(1979,8,31)
c.add(2,cdtime.Years)
```
### Compare time values
```
from cdtime import *
r = cdtime.reltime(28,"days since 1996-1-1")
c = cdtime.comptime(1996,2,28)
print(c.cmp(r))
print(c > r)
```
### Subtract an interval of time.
```
import cdtime
r = cdtime.reltime(28,"days since 1996-1-1")
c = cdtime.comptime(1996,2,28)
print(r.sub(10, cdtime.Days))
print(c.sub(30, cdtime.Days))
```
### Subtract an interval of time.
For intervals of years or months, see the **note** under add() in the example above.
#### Convert to component time.
```
import cdtime
r = cdtime.reltime(28,"days since 1996-1-1")
r.tocomp()
```
#### Convert to relative time.
```
c = cdtime.comptime(1996,2,28)
print(c.torel("days since 1996-1-1"))
r = reltime(28,"days since 1996-1-1")
print(r.torel("days since 1995"))
print(r.torel("days since 1995").value)
```
|
github_jupyter
|
import cdtime
import cdtime
c = cdtime.comptime(1996,2,28)
r = cdtime.reltime(28,"days since 1996-1-1")
print(r.add(1,cdtime.Day))
print(c.add(36,cdtime.Hours))
c = cdtime.comptime(1979,8,31)
c.add(1,cdtime.Month)
c = cdtime.comptime(1979,8,31)
c.add(2,cdtime.Years)
from cdtime import *
r = cdtime.reltime(28,"days since 1996-1-1")
c = cdtime.comptime(1996,2,28)
print(c.cmp(r))
print(c > r)
import cdtime
r = cdtime.reltime(28,"days since 1996-1-1")
c = cdtime.comptime(1996,2,28)
print(r.sub(10, cdtime.Days))
print(c.sub(30, cdtime.Days))
import cdtime
r = cdtime.reltime(28,"days since 1996-1-1")
r.tocomp()
c = cdtime.comptime(1996,2,28)
print(c.torel("days since 1996-1-1"))
r = reltime(28,"days since 1996-1-1")
print(r.torel("days since 1995"))
print(r.torel("days since 1995").value)
| 0.106029 | 0.924415 |
```
from rdkit import Chem
from rdkit.Chem import Draw
drugbank_input = Chem.SDMolSupplier('../data/drugbank.sdf')
drugbank = [m for m in drugbank_input if m]
```
# Scaffold analysis
scaffolds are, roughly speaking, the "backbone" of a chemical structure: usually a central substructure describing in various details the rings and links between them. They come in many variants, here are some:
 (http://pubs.rsc.org/en/content/articlehtml/2008/np/b715668p)
They are often used as a metric for chemical diversity of a set of molecules, or to define a particular group of structures. I bet some of these will look familiar to you:
 (Murcko presentation)
Nice little presentation:
https://www.slideshare.net/jeremyjyang/molecular-scaffolds-talk
The presentation shows some scaffold analysis tools with nice clicky graphical interface. But we will do it the 'fun' way: let's make some scaffolds in RDKit:
```
from rdkit.Chem.Scaffolds import MurckoScaffold
# scaffolding our favorite satanic structure:
basic_structure = drugbank[666]
atomic_scaffold = MurckoScaffold.GetScaffoldForMol(basic_structure)
graph_scaffold = MurckoScaffold.MakeScaffoldGeneric(atomic_scaffold)
Draw.MolsToGridImage((basic_structure, atomic_scaffold, graph_scaffold))
# let's scaffold entire DrugBank:
def make_atomic_scaffold(mol):
Chem.rdmolops.RemoveStereochemistry(mol)
atomic_scaffold = MurckoScaffold.GetScaffoldForMol(mol)
try:
Chem.SanitizeMol(atomic_scaffold)
except ValueError:
return None
return atomic_scaffold
drugbank_atomic_scaffolds = [make_atomic_scaffold(mol) for mol in drugbank]
len(drugbank_atomic_scaffolds), len([x for x in drugbank_atomic_scaffolds if x])
Draw.MolsToGridImage(drugbank[:6], subImgSize=(300, 300))
Draw.MolsToGridImage(drugbank_atomic_scaffolds[:6], subImgSize=(300, 300))
def make_graph_scaffold(mol):
Chem.rdmolops.RemoveStereochemistry(mol)
atomic_scaffold = MurckoScaffold.GetScaffoldForMol(mol)
try:
Chem.SanitizeMol(atomic_scaffold)
graph_scaffold = MurckoScaffold.MakeScaffoldGeneric(atomic_scaffold)
except ValueError:
return None
return graph_scaffold
drugbank_graph_scaffolds = [make_graph_scaffold(mol) for mol in drugbank]
len(drugbank_graph_scaffolds), len([x for x in drugbank_graph_scaffolds if x])
Draw.MolsToGridImage(drugbank_graph_scaffolds[:6], subImgSize=(300, 300))
```
## Scaffold data aggregation
```
def smiles_counts(smiles):
countdict = {}
for s in smiles:
countdict[s] = countdict.get(s, 0) + 1
return sorted(list(countdict.items()), key=lambda x: x[1], reverse=True)
drugbank_aggregated_atomic_scaffolds = smiles_counts((Chem.MolToSmiles(s) for s in drugbank_atomic_scaffolds if s))
drugbank_aggregated_atomic_scaffolds
Draw.MolsToGridImage([Chem.MolFromSmiles(scaffold) for scaffold, count in drugbank_aggregated_atomic_scaffolds[:12]],
legends=[str(count) for scaffold, count in drugbank_aggregated_atomic_scaffolds[:12]],
subImgSize=(300, 300))
drugbank_aggregated_graph_scaffolds = smiles_counts((Chem.MolToSmiles(s) for s in drugbank_graph_scaffolds if s))
drugbank_aggregated_graph_scaffolds
Draw.MolsToGridImage([Chem.MolFromSmiles(scaffold) for scaffold, count in drugbank_aggregated_graph_scaffolds[:12]],
legends=[str(count) for scaffold, count in drugbank_aggregated_graph_scaffolds[:12]],
subImgSize=(300, 300))
```
|
github_jupyter
|
from rdkit import Chem
from rdkit.Chem import Draw
drugbank_input = Chem.SDMolSupplier('../data/drugbank.sdf')
drugbank = [m for m in drugbank_input if m]
from rdkit.Chem.Scaffolds import MurckoScaffold
# scaffolding our favorite satanic structure:
basic_structure = drugbank[666]
atomic_scaffold = MurckoScaffold.GetScaffoldForMol(basic_structure)
graph_scaffold = MurckoScaffold.MakeScaffoldGeneric(atomic_scaffold)
Draw.MolsToGridImage((basic_structure, atomic_scaffold, graph_scaffold))
# let's scaffold entire DrugBank:
def make_atomic_scaffold(mol):
Chem.rdmolops.RemoveStereochemistry(mol)
atomic_scaffold = MurckoScaffold.GetScaffoldForMol(mol)
try:
Chem.SanitizeMol(atomic_scaffold)
except ValueError:
return None
return atomic_scaffold
drugbank_atomic_scaffolds = [make_atomic_scaffold(mol) for mol in drugbank]
len(drugbank_atomic_scaffolds), len([x for x in drugbank_atomic_scaffolds if x])
Draw.MolsToGridImage(drugbank[:6], subImgSize=(300, 300))
Draw.MolsToGridImage(drugbank_atomic_scaffolds[:6], subImgSize=(300, 300))
def make_graph_scaffold(mol):
Chem.rdmolops.RemoveStereochemistry(mol)
atomic_scaffold = MurckoScaffold.GetScaffoldForMol(mol)
try:
Chem.SanitizeMol(atomic_scaffold)
graph_scaffold = MurckoScaffold.MakeScaffoldGeneric(atomic_scaffold)
except ValueError:
return None
return graph_scaffold
drugbank_graph_scaffolds = [make_graph_scaffold(mol) for mol in drugbank]
len(drugbank_graph_scaffolds), len([x for x in drugbank_graph_scaffolds if x])
Draw.MolsToGridImage(drugbank_graph_scaffolds[:6], subImgSize=(300, 300))
def smiles_counts(smiles):
countdict = {}
for s in smiles:
countdict[s] = countdict.get(s, 0) + 1
return sorted(list(countdict.items()), key=lambda x: x[1], reverse=True)
drugbank_aggregated_atomic_scaffolds = smiles_counts((Chem.MolToSmiles(s) for s in drugbank_atomic_scaffolds if s))
drugbank_aggregated_atomic_scaffolds
Draw.MolsToGridImage([Chem.MolFromSmiles(scaffold) for scaffold, count in drugbank_aggregated_atomic_scaffolds[:12]],
legends=[str(count) for scaffold, count in drugbank_aggregated_atomic_scaffolds[:12]],
subImgSize=(300, 300))
drugbank_aggregated_graph_scaffolds = smiles_counts((Chem.MolToSmiles(s) for s in drugbank_graph_scaffolds if s))
drugbank_aggregated_graph_scaffolds
Draw.MolsToGridImage([Chem.MolFromSmiles(scaffold) for scaffold, count in drugbank_aggregated_graph_scaffolds[:12]],
legends=[str(count) for scaffold, count in drugbank_aggregated_graph_scaffolds[:12]],
subImgSize=(300, 300))
| 0.339609 | 0.884089 |
# Create a simple solar system model
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from collections import namedtuple
```
# Define a planet class
```
class planet():
"A planet in our solar system"
def __init__(self,semimajor,eccentricity):
self.x=np.zeros(2)
self.v=np.zeros(2)
self.a_g=np.zeros(2)
self.t=0.0
self.dt=0.0
self.a=semimajor
self.e=eccentricity
self.istep=0
self.name=""
```
# Define a dictionary w/ some constants
```
solar_system={"M_sun":1.0,"G":39.4784176043574320}
```
# Define some functions for setting circular velocity, and acceleration
```
def SolarCircularVelocity(p):
G=solar_system["G"]
M=solar_system["M_sun"]
r=(p.x[0]**2+p.x[1]**2)**0.5
return(G*M/r)**0.5
```
# Write a function to compute the gravitational acceleration on each planet from the Sun
```
def SolarGravitationalAcceleration(p):
G=solar_system["G"]
m=solar_system["M_sun"]
r=(p.x[0]**2+p.x[1]**2)**0.5
a_grav=-1.0*G*M/r**2
if(p.x[0]==0.0):
if(p.x[1]>0.0):
theta=0.5*np.pi
else:
theta=1.5*np.pi
else:
theta=np.arctan2(p.x[1],p.x[0])
return a_grav*np.cos(theta),a_grav*np.sin(theta)
```
# Compute the timestep
```
def calc_dt(p):
ETA_TIME_STEP=0.0004
eta=ETA_TIME_STEP
v=(p.v[0]**2+p.v[1]**2)**0.5
a=(p.a_g[0]**2+p.a_g[1]**2)**0.5
dt=eta*np.fmin(1./np.fabs(v),1/np.fabs(a)**0.5)
return dt
```
# Define the initial conditions
```
def SetPlanet(p,i):
AU_in_km=1.495979e+8 #an AU in km
#circular velocity
v_c=0.0 #cirvular velocity in AU/yr
v_e=0.0 #velocity at perihelion in AU/yr
#planet-by-planet
#Mercury
if(i==0):
#semi-major axis in AU
p.a=57909227.0/AU_in_km
#eccentricity
p.e=0.20563593
#name
p.name="Mercury"
#Venus
elif(i==1):
p.a=108209475.0/AU_in_km
p.e=0.00677672
p.name="Venus"
#Earth
elif(i==2):
p.a=1.0
p.e=0.01671123
p.name="Earth"
#set remaining properties
p.t=0.0
p.x[0]=p.a*(1.0-p.e)
p.x[1]=0.0
#get equiv circular velocity
v_c=SolarCircularVelocity(p)
#velocit at perihelion
v_e=v_c*(1+p.e)**0.5
#set velocity
p.v[0]=0.0 #no x velocity at perihelion
p.v[1]=v_e #y velocity at perihelion (CCW)
#calculate gravitational acceleration from Sun
p.a_g=SolarGravitationalAcceleration(p)
#set timestep
p.dt=calc_dt(p)
```
# Write leapfrog integrator
```
def x_first_step(x_i,v_i,a_i,dt):
return x_1+0.5*v_i*dt+0.25*a_i*dt**2
def v_full_step(v_i,a_ipoh,dt):
return v_i+a_ipoh*dt;
def x_full_step(x_ipoh,v_ipl,a_ipoh,dt):
return x_ipoh+v_ipl*dt;
```
# Write a function to save the data to file
```
def SaveSolarSystem(p,n_planets,t,dt,iste,ndim):
#loop over the number of planets
for i in range(n_planets):
#define a filename
fname="planet.%s.txt"%p[i].name
if(istep==0):
#create a file on the first timestep
fp=open(fname,"w")
else:
#append the file on subsequent timesteps
fp=open(fname,"a")
#compute the drifted properties of the planet
v_drift=np.zeros(ndim)
for k in range(ndim):
v_drift[k]=p[i].v[k]+0.5*p[i].a_g[k]*p[i].dt
#write the data to file
s="%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f/n" % (istep,t,dt,p[i].istep,p[i].t,p[i].dt,p[i].x[0],p[i].x[1],v_drift[0],v_drift[1],p[i].a_g[0],p[i].ag[1])
fp.write(s)
fp.close()
```
# Write a function to evolve the solar system
```
def EvolveSolarSystem(p,n_planets,t_max):
ndim=2
dt=0.5/365.25
t=0.0
istep=0
SaveSolarSystem(p,n_planets,t,dt,istep,ndim)
while(t<tmax):
if(t+dt>t_max):
dt=t_max-t
for i in range(n_planets):
while(p[i].t<t+dt):
if(p[i].istep==0):
for k in range(ndim):
p[i].x[k]=x_first_step(p[i].x[k],p[i].v[k],p[i].a_g[k],p[i].dt)
p[i].a_g=SolarGravitationalAcceleration(p[i])
p[i].t+=0.5*p[i].dt
p[i].dt=calc_dt(p[i])
p[i].istep+=1
t+=dt
istep+=1
SaveSolarSystem(p,n_planets,t,dt,istep,ndim)
print("Time t=",t)
print("Maximum t=",t_max)
print("Max number of steps=",istep)
```
# Create a routine to read in the data
```
def read_twelve_arrays(fname):
fp=open(fname,"r")
fl=fp.readlines()
n=len(fl)
a=np.zeros(n)
b=np.zeros(n)
c=np.zeros(n)
d=np.zeros(n)
f=np.zeros(n)
g=np.zeros(n)
h=np.zeros(n)
j=np.zeros(n)
k=np.zeros(n)
l=np.zeros(n)
m=np.zeros(n)
p=np.zeros(n)
for i in range(n):
a[i]=float(fl[i].split()[0])
b[i]=float(fl[i].split()[1])
c[i]=float(fl[i].split()[2])
d[i]=float(fl[i].split()[3])
f[i]=float(fl[i].split()[4])
g[i]=float(fl[i].split()[5])
h[i]=float(fl[i].split()[6])
j[i]=float(fl[i].split()[7])
k[i]=float(fl[i].split()[8])
l[i]=float(fl[i].split()[9])
m[i]=float(fl[i].split()[10])
p[i]=float(fl[i].split()[11])
return a,b,c,d,f,g,h,j,k,l,m,p
```
# Perform th integration of the solar system
|
github_jupyter
|
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from collections import namedtuple
class planet():
"A planet in our solar system"
def __init__(self,semimajor,eccentricity):
self.x=np.zeros(2)
self.v=np.zeros(2)
self.a_g=np.zeros(2)
self.t=0.0
self.dt=0.0
self.a=semimajor
self.e=eccentricity
self.istep=0
self.name=""
solar_system={"M_sun":1.0,"G":39.4784176043574320}
def SolarCircularVelocity(p):
G=solar_system["G"]
M=solar_system["M_sun"]
r=(p.x[0]**2+p.x[1]**2)**0.5
return(G*M/r)**0.5
def SolarGravitationalAcceleration(p):
G=solar_system["G"]
m=solar_system["M_sun"]
r=(p.x[0]**2+p.x[1]**2)**0.5
a_grav=-1.0*G*M/r**2
if(p.x[0]==0.0):
if(p.x[1]>0.0):
theta=0.5*np.pi
else:
theta=1.5*np.pi
else:
theta=np.arctan2(p.x[1],p.x[0])
return a_grav*np.cos(theta),a_grav*np.sin(theta)
def calc_dt(p):
ETA_TIME_STEP=0.0004
eta=ETA_TIME_STEP
v=(p.v[0]**2+p.v[1]**2)**0.5
a=(p.a_g[0]**2+p.a_g[1]**2)**0.5
dt=eta*np.fmin(1./np.fabs(v),1/np.fabs(a)**0.5)
return dt
def SetPlanet(p,i):
AU_in_km=1.495979e+8 #an AU in km
#circular velocity
v_c=0.0 #cirvular velocity in AU/yr
v_e=0.0 #velocity at perihelion in AU/yr
#planet-by-planet
#Mercury
if(i==0):
#semi-major axis in AU
p.a=57909227.0/AU_in_km
#eccentricity
p.e=0.20563593
#name
p.name="Mercury"
#Venus
elif(i==1):
p.a=108209475.0/AU_in_km
p.e=0.00677672
p.name="Venus"
#Earth
elif(i==2):
p.a=1.0
p.e=0.01671123
p.name="Earth"
#set remaining properties
p.t=0.0
p.x[0]=p.a*(1.0-p.e)
p.x[1]=0.0
#get equiv circular velocity
v_c=SolarCircularVelocity(p)
#velocit at perihelion
v_e=v_c*(1+p.e)**0.5
#set velocity
p.v[0]=0.0 #no x velocity at perihelion
p.v[1]=v_e #y velocity at perihelion (CCW)
#calculate gravitational acceleration from Sun
p.a_g=SolarGravitationalAcceleration(p)
#set timestep
p.dt=calc_dt(p)
def x_first_step(x_i,v_i,a_i,dt):
return x_1+0.5*v_i*dt+0.25*a_i*dt**2
def v_full_step(v_i,a_ipoh,dt):
return v_i+a_ipoh*dt;
def x_full_step(x_ipoh,v_ipl,a_ipoh,dt):
return x_ipoh+v_ipl*dt;
def SaveSolarSystem(p,n_planets,t,dt,iste,ndim):
#loop over the number of planets
for i in range(n_planets):
#define a filename
fname="planet.%s.txt"%p[i].name
if(istep==0):
#create a file on the first timestep
fp=open(fname,"w")
else:
#append the file on subsequent timesteps
fp=open(fname,"a")
#compute the drifted properties of the planet
v_drift=np.zeros(ndim)
for k in range(ndim):
v_drift[k]=p[i].v[k]+0.5*p[i].a_g[k]*p[i].dt
#write the data to file
s="%6d\t%6.5f\t%6.5f\t%6d\t%6.5f\t%6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f\t% 6.5f/n" % (istep,t,dt,p[i].istep,p[i].t,p[i].dt,p[i].x[0],p[i].x[1],v_drift[0],v_drift[1],p[i].a_g[0],p[i].ag[1])
fp.write(s)
fp.close()
def EvolveSolarSystem(p,n_planets,t_max):
ndim=2
dt=0.5/365.25
t=0.0
istep=0
SaveSolarSystem(p,n_planets,t,dt,istep,ndim)
while(t<tmax):
if(t+dt>t_max):
dt=t_max-t
for i in range(n_planets):
while(p[i].t<t+dt):
if(p[i].istep==0):
for k in range(ndim):
p[i].x[k]=x_first_step(p[i].x[k],p[i].v[k],p[i].a_g[k],p[i].dt)
p[i].a_g=SolarGravitationalAcceleration(p[i])
p[i].t+=0.5*p[i].dt
p[i].dt=calc_dt(p[i])
p[i].istep+=1
t+=dt
istep+=1
SaveSolarSystem(p,n_planets,t,dt,istep,ndim)
print("Time t=",t)
print("Maximum t=",t_max)
print("Max number of steps=",istep)
def read_twelve_arrays(fname):
fp=open(fname,"r")
fl=fp.readlines()
n=len(fl)
a=np.zeros(n)
b=np.zeros(n)
c=np.zeros(n)
d=np.zeros(n)
f=np.zeros(n)
g=np.zeros(n)
h=np.zeros(n)
j=np.zeros(n)
k=np.zeros(n)
l=np.zeros(n)
m=np.zeros(n)
p=np.zeros(n)
for i in range(n):
a[i]=float(fl[i].split()[0])
b[i]=float(fl[i].split()[1])
c[i]=float(fl[i].split()[2])
d[i]=float(fl[i].split()[3])
f[i]=float(fl[i].split()[4])
g[i]=float(fl[i].split()[5])
h[i]=float(fl[i].split()[6])
j[i]=float(fl[i].split()[7])
k[i]=float(fl[i].split()[8])
l[i]=float(fl[i].split()[9])
m[i]=float(fl[i].split()[10])
p[i]=float(fl[i].split()[11])
return a,b,c,d,f,g,h,j,k,l,m,p
| 0.260201 | 0.90764 |
# Monsoon (Sperber)
This notebook demonstrates how to use the PCDMI Monsoon (Sperber) driver.
It is expected that you have downloaded the sample data as demonstrated in [the download notebook](Demo_0_download_data.ipynb)
The following cell reads in the choices you made during the download data step.
```
from user_choices import demo_data_directory, demo_output_directory
```
For immediate help with using the monsoon (sperber) driver, use the `--help` flag, demonstrated here:
```
%%bash
driver_monsoon_sperber.py --help
```
## Basic Example
This metric uses daily precipitation data and computes monsoon scores over 6 preset regions, shown below.
```
from IPython.display import Image
Image(filename = "../../../pcmdi_metrics/monsoon_sperber/doc/monsoon_domain_map.png")
```
First we demonstrate the parameter file for the basic example.
Along with model and observational data, this metric needs to be provided with land fraction masks (`modpath_lf` and `reference_data_lf_path`). The model needs to have `mip` and `exp` specified at a minimum. `frequency`, `realm`, and `realization` are optional. Furthermore, start and end years must be selected for both model and observations.
```
with open("basic_monsoon_sperber_param.py") as f:
print(f.read())
```
To run the driver using only a parameter file for inputs, do the following:
```
%%bash
driver_monsoon_sperber.py -p basic_monsoon_sperber_param.py
```
## Output options
There are several options for output data format. Users can choose to generate metrics in netCDF format along with png graphics.
To save these results in a different folder, the `--result_dir` value is changed. Using `$1` to refer to the `demo_output_directory` variable is a trick for the Jupyter Notebook and is not needed for regular command line use.
```
%%bash -s "$demo_output_directory"
driver_monsoon_sperber.py -p basic_monsoon_sperber_param.py \
--nc_out --plot --results_dir $1/monsoon_sperber/Ex2
```
## Results
At a minimum, this driver will produce a JSON file containing the monsoon metrics in the `result_dir`. If the user requests the binary and plot outputs, those will also be present in the `result_dir`. Looking at the results from Ex2:
```
! ls {demo_output_directory + "/monsoon_sperber/Ex2"}
```
The monsoon metrics are found in the "RESULTS" object in the JSON file. Below we extract and display these metrics.
```
import json
metrics_file = demo_output_directory + "/monsoon_sperber/Ex2/monsoon_sperber_stat_cmip5_historical_da_atm_2000-2005.json"
with open(metrics_file) as f:
results = json.load(f)["RESULTS"]
print(json.dumps(results, indent = 2))
```
For more help interpreting these values, please consult the following paper:
Sperber, K. and H. Annamalai, 2014:
The use of fractional accumulated precipitation for the evaluation of the
annual cycle of monsoons. Climate Dynamics, 43:3219-3244,
doi: 10.1007/s00382-014-2099-3
If `plot = True`, the driver also outputs figures that compare the precipitation pentads between model and observations.
```
Image(filename=demo_output_directory+"/monsoon_sperber/Ex2/cmip5_GISS-E2-H_historical_r1i1p1_monsoon_sperber_2000-2005.png")
```
|
github_jupyter
|
from user_choices import demo_data_directory, demo_output_directory
%%bash
driver_monsoon_sperber.py --help
from IPython.display import Image
Image(filename = "../../../pcmdi_metrics/monsoon_sperber/doc/monsoon_domain_map.png")
with open("basic_monsoon_sperber_param.py") as f:
print(f.read())
%%bash
driver_monsoon_sperber.py -p basic_monsoon_sperber_param.py
%%bash -s "$demo_output_directory"
driver_monsoon_sperber.py -p basic_monsoon_sperber_param.py \
--nc_out --plot --results_dir $1/monsoon_sperber/Ex2
! ls {demo_output_directory + "/monsoon_sperber/Ex2"}
import json
metrics_file = demo_output_directory + "/monsoon_sperber/Ex2/monsoon_sperber_stat_cmip5_historical_da_atm_2000-2005.json"
with open(metrics_file) as f:
results = json.load(f)["RESULTS"]
print(json.dumps(results, indent = 2))
Image(filename=demo_output_directory+"/monsoon_sperber/Ex2/cmip5_GISS-E2-H_historical_r1i1p1_monsoon_sperber_2000-2005.png")
| 0.127435 | 0.975832 |
```
from IPython.display import Markdown as md
### change to reflect your notebook
_nb_loc = "09_deploying/09c_changesig.ipynb"
_nb_title = "Changing signatures of exported model"
### no need to change any of this
_nb_safeloc = _nb_loc.replace('/', '%2F')
md("""
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name={1}&url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fblob%2Fmaster%2F{2}&download_url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fraw%2Fmaster%2F{2}">
<img src="https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png"/> Run in AI Platform Notebook</a>
</td>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/{0}">
<img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
""".format(_nb_loc, _nb_title, _nb_safeloc))
```
# Changing signatures of exported model
In this notebook, we start from an already trained and saved model (as in Chapter 7).
For convenience, we have put this model in a public bucket in gs://practical-ml-vision-book/flowers_5_trained
## Enable GPU and set up helper functions
This notebook and pretty much every other notebook in this repository
will run faster if you are using a GPU.
On Colab:
- Navigate to Edit→Notebook Settings
- Select GPU from the Hardware Accelerator drop-down
On Cloud AI Platform Notebooks:
- Navigate to https://console.cloud.google.com/ai-platform/notebooks
- Create an instance with a GPU or select your instance and add a GPU
Next, we'll confirm that we can connect to the GPU with tensorflow:
```
import tensorflow as tf
print('TensorFlow version' + tf.version.VERSION)
print('Built with GPU support? ' + ('Yes!' if tf.test.is_built_with_cuda() else 'Noooo!'))
print('There are {} GPUs'.format(len(tf.config.experimental.list_physical_devices("GPU"))))
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
```
## Exported model
We start from a trained and saved model from Chapter 7.
<pre>
model.save(...)
</pre>
```
MODEL_LOCATION='gs://practical-ml-vision-book/flowers_5_trained'
!gsutil ls {MODEL_LOCATION}
!saved_model_cli show --tag_set serve --signature_def serving_default --dir {MODEL_LOCATION}
```
## Passing through an input
Note that the signature doesn't tell us the input filename.
Let's add that.
```
import os
import shutil
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_LOCATION)
@tf.function(input_signature=[tf.TensorSpec([None,], dtype=tf.string)])
def predict_flower_type(filenames):
old_fn = model.signatures['serving_default']
result = old_fn(filenames) # has flower_type_int etc.
result['filename'] = filenames
return result
shutil.rmtree('export', ignore_errors=True)
os.mkdir('export')
model.save('export/flowers_model',
signatures={
'serving_default': predict_flower_type
})
!saved_model_cli show --tag_set serve --signature_def serving_default --dir export/flowers_model
import tensorflow as tf
serving_fn = tf.keras.models.load_model('export/flowers_model').signatures['serving_default']
filenames = [
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg'
]
pred = serving_fn(tf.convert_to_tensor(filenames))
print(pred)
```
## Multiple signatures
```
import os
import shutil
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_LOCATION)
old_fn = model.signatures['serving_default']
@tf.function(input_signature=[tf.TensorSpec([None,], dtype=tf.string)])
def pass_through_input(filenames):
result = old_fn(filenames) # has flower_type_int etc.
result['filename'] = filenames
return result
shutil.rmtree('export', ignore_errors=True)
os.mkdir('export')
model.save('export/flowers_model2',
signatures={
'serving_default': old_fn,
'input_pass_through': pass_through_input
})
!saved_model_cli show --tag_set serve --dir export/flowers_model2
!saved_model_cli show --tag_set serve --dir export/flowers_model2 --signature_def serving_default
!saved_model_cli show --tag_set serve --dir export/flowers_model2 --signature_def input_pass_through
```
## Deploying multi-signature model as REST API
```
%%bash
BUCKET="ai-analytics-solutions-mlvisionbook" # CHANGE
gsutil -m cp -r ./export/flowers_model2 gs://${BUCKET}/flowers_model2
%%bash
BUCKET="ai-analytics-solutions-mlvisionbook" # CHANGE
./vertex_deploy.sh \
--endpoint_name=multi \
--model_name=multi \
--model_location=gs://${BUCKET}/flowers_model2
```
## IMPORTANT: CHANGE THIS CELL
Note the endpoint ID and deployed model ID above. Set it in the cell below.
```
# CHANGE THESE TO REFLECT WHERE YOU DEPLOYED THE MODEL
import os
os.environ['ENDPOINT_ID'] = '130472447798411264' # CHANGE
os.environ['MODEL_ID'] = '1810350293179695104' # CHANGE
os.environ['PROJECT'] = 'ai-analytics-solutions' # CHANGE
os.environ['BUCKET'] = 'ai-analytics-solutions-mlvisionbook' # CHANGE
os.environ['REGION'] = 'us-central1' # CHANGE
%%writefile request.json
{
"instances": [
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg"
}
]
}
%%bash
gcloud ai endpoints predict ${ENDPOINT_ID} \
--region=${REGION} \
--json-request=request.json \
--format=json
%%writefile request.json
{
"signature_name": "input_pass_through",
"instances": [
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg"
}
]
}
%%bash
gcloud ai endpoints predict ${ENDPOINT_ID} \
--region=${REGION} \
--json-request=request.json \
--format=json
```
This is a bug. I've filed the bug report. Hopefully will be fixed soon.
```
# Invoke from Python.
import json
from oauth2client.client import GoogleCredentials
import requests
PROJECT = os.environ['PROJECT']
REGION = os.environ['REGION']
ENDPOINT_ID = os.environ['ENDPOINT_ID']
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = "https://{}-aiplatform.googleapis.com/v1/projects/{}/locations/{}/endpoints/{}:predict".format(
REGION, PROJECT, REGION, ENDPOINT_ID)
headers = {"Authorization": "Bearer " + token }
data = {
"signature_name": "input_pass_through", # currently bugged
"instances": [
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg"
}
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
```
## License
Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
from IPython.display import Markdown as md
### change to reflect your notebook
_nb_loc = "09_deploying/09c_changesig.ipynb"
_nb_title = "Changing signatures of exported model"
### no need to change any of this
_nb_safeloc = _nb_loc.replace('/', '%2F')
md("""
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://console.cloud.google.com/ai-platform/notebooks/deploy-notebook?name={1}&url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fblob%2Fmaster%2F{2}&download_url=https%3A%2F%2Fgithub.com%2FGoogleCloudPlatform%2Fpractical-ml-vision-book%2Fraw%2Fmaster%2F{2}">
<img src="https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png"/> Run in AI Platform Notebook</a>
</td>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/GoogleCloudPlatform/practical-ml-vision-book/blob/master/{0}">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/{0}">
<img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
""".format(_nb_loc, _nb_title, _nb_safeloc))
import tensorflow as tf
print('TensorFlow version' + tf.version.VERSION)
print('Built with GPU support? ' + ('Yes!' if tf.test.is_built_with_cuda() else 'Noooo!'))
print('There are {} GPUs'.format(len(tf.config.experimental.list_physical_devices("GPU"))))
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
MODEL_LOCATION='gs://practical-ml-vision-book/flowers_5_trained'
!gsutil ls {MODEL_LOCATION}
!saved_model_cli show --tag_set serve --signature_def serving_default --dir {MODEL_LOCATION}
import os
import shutil
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_LOCATION)
@tf.function(input_signature=[tf.TensorSpec([None,], dtype=tf.string)])
def predict_flower_type(filenames):
old_fn = model.signatures['serving_default']
result = old_fn(filenames) # has flower_type_int etc.
result['filename'] = filenames
return result
shutil.rmtree('export', ignore_errors=True)
os.mkdir('export')
model.save('export/flowers_model',
signatures={
'serving_default': predict_flower_type
})
!saved_model_cli show --tag_set serve --signature_def serving_default --dir export/flowers_model
import tensorflow as tf
serving_fn = tf.keras.models.load_model('export/flowers_model').signatures['serving_default']
filenames = [
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg',
'gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg'
]
pred = serving_fn(tf.convert_to_tensor(filenames))
print(pred)
import os
import shutil
import tensorflow as tf
model = tf.keras.models.load_model(MODEL_LOCATION)
old_fn = model.signatures['serving_default']
@tf.function(input_signature=[tf.TensorSpec([None,], dtype=tf.string)])
def pass_through_input(filenames):
result = old_fn(filenames) # has flower_type_int etc.
result['filename'] = filenames
return result
shutil.rmtree('export', ignore_errors=True)
os.mkdir('export')
model.save('export/flowers_model2',
signatures={
'serving_default': old_fn,
'input_pass_through': pass_through_input
})
!saved_model_cli show --tag_set serve --dir export/flowers_model2
!saved_model_cli show --tag_set serve --dir export/flowers_model2 --signature_def serving_default
!saved_model_cli show --tag_set serve --dir export/flowers_model2 --signature_def input_pass_through
%%bash
BUCKET="ai-analytics-solutions-mlvisionbook" # CHANGE
gsutil -m cp -r ./export/flowers_model2 gs://${BUCKET}/flowers_model2
%%bash
BUCKET="ai-analytics-solutions-mlvisionbook" # CHANGE
./vertex_deploy.sh \
--endpoint_name=multi \
--model_name=multi \
--model_location=gs://${BUCKET}/flowers_model2
# CHANGE THESE TO REFLECT WHERE YOU DEPLOYED THE MODEL
import os
os.environ['ENDPOINT_ID'] = '130472447798411264' # CHANGE
os.environ['MODEL_ID'] = '1810350293179695104' # CHANGE
os.environ['PROJECT'] = 'ai-analytics-solutions' # CHANGE
os.environ['BUCKET'] = 'ai-analytics-solutions-mlvisionbook' # CHANGE
os.environ['REGION'] = 'us-central1' # CHANGE
%%writefile request.json
{
"instances": [
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg"
}
]
}
%%bash
gcloud ai endpoints predict ${ENDPOINT_ID} \
--region=${REGION} \
--json-request=request.json \
--format=json
%%writefile request.json
{
"signature_name": "input_pass_through",
"instances": [
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg"
}
]
}
%%bash
gcloud ai endpoints predict ${ENDPOINT_ID} \
--region=${REGION} \
--json-request=request.json \
--format=json
# Invoke from Python.
import json
from oauth2client.client import GoogleCredentials
import requests
PROJECT = os.environ['PROJECT']
REGION = os.environ['REGION']
ENDPOINT_ID = os.environ['ENDPOINT_ID']
token = GoogleCredentials.get_application_default().get_access_token().access_token
api = "https://{}-aiplatform.googleapis.com/v1/projects/{}/locations/{}/endpoints/{}:predict".format(
REGION, PROJECT, REGION, ENDPOINT_ID)
headers = {"Authorization": "Bearer " + token }
data = {
"signature_name": "input_pass_through", # currently bugged
"instances": [
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9818247_e2eac18894.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/dandelion/9853885425_4a82356f1d_m.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/daisy/9299302012_958c70564c_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8733586143_3139db6e9e_n.jpg"
},
{
"filenames": "gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/8713397358_0505cc0176_n.jpg"
}
]
}
response = requests.post(api, json=data, headers=headers)
print(response.content)
| 0.529263 | 0.894651 |
<a href="https://colab.research.google.com/github/unicamp-dl/IA025_2022S1/blob/main/ex09/Carlos_Ancasi/Aula_9_Exercicio_Carlos_Ancasi.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
nome = 'Carlos Leonardo Ancasi Hinostroza'
print(f'Meu nome é {nome}')
```
# Exercício: Modelo de Linguagem com auto-atenção
Este exercício é similar ao da Aula 8, mas iremos agora treinar uma rede neural com **duas camadas** de auto-atenção **causais** para prever a próxima palavra de um texto, data as palavras anteriores como entrada.
Iremos também trabalhar com sequencias de tamanho variável.
Na camada de auto-atenção, não se esqueça de implementar:
- Embeddings de posição
- Projeções lineares (WQ, WK, WV, WO)
- Conexões residuais
- Camada de feed forward (2-layer MLP)
O dataset usado neste exercício (BrWaC) possui um tamanho razoável e você vai precisar rodar seus experimentos com GPU.
Alguns conselhos úteis:
- **ATENÇÃO:** o dataset é bem grande. Não dê comando de imprimí-lo.
- Durante a depuração, faça seu dataset ficar bem pequeno, para que a depuração seja mais rápida e não precise de GPU. Somente ligue a GPU quando o seu laço de treinamento já está funcionando
- Não deixe para fazer esse exercício na véspera. Ele é trabalhoso.
```
# iremos utilizar a biblioteca dos transformers para ter acesso ao tokenizador do BERT.
!pip install transformers
```
## Importação dos pacotes
```
import collections
import itertools
import functools
import math
import random
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm_notebook
# Check which GPU we are using
!nvidia-smi
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print('Using {}'.format(device))
```
## Implementação do MyDataset
```
from typing import List
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
tokenizer.vocab
dummy_texts = 'Eu gosto de correr'
token_ids = tokenizer(dummy_texts, return_tensors=None, add_special_tokens=False)
token_ids
# print(token_ids)
from typing import List
def tokenize(text: str, tokenizer):
# Recomenda-se usar o tokenizer.batch_encode_plus pois é mais rápido.
return tokenizer(text, return_tensors=None, add_special_tokens=False).input_ids
class MyDataset():
def __init__(self, texts: List[str], tokenizer, max_seq_length: int):
# Escreva aqui seu código.
self.x = []
self.y = []
x = [101]
y = []
x.extend([0]*(max_seq_length-1))
y.extend([0]*max_seq_length)
for texto in texts:
token = tokenize(texto, tokenizer)
for i in range(0, len(token), (max_seq_length - 1) ):
context_size = (max_seq_length - 1)
if i + max_seq_length - 1 > len(token):
context_size = len(token) % (max_seq_length - 1)
x_a = x[:]
x_a[1:context_size+1]=token[i:i+context_size]
y_a = y[:]
y_a[0:context_size] = token[i:i+context_size]
self.x.append(x_a)
self.y.append(y_a)
def __len__(self):
# Escreva aqui seu código.
return len(self.x)
def __getitem__(self, idx):
# Escreva aqui seu código.
return torch.LongTensor(self.x[idx], ), torch.LongTensor(self.y[idx])
```
## Testando se a implementação do MyDataset está correta
```
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
dummy_texts = ['Eu gosto de correr', 'Ela gosta muito de comer pizza']
dummy_dataset = MyDataset(texts=dummy_texts, tokenizer=tokenizer, max_seq_length=9)
dummy_loader = DataLoader(dummy_dataset, batch_size=6, shuffle=False)
assert len(dummy_dataset) == 2
print('Passou no assert de tamanho do dataset.')
first_batch_input, first_batch_target = next(iter(dummy_loader))
correct_first_batch_input = torch.LongTensor(
[[ 101, 3396, 10303, 125, 13239, 0, 0, 0, 0],
[ 101, 1660, 5971, 785, 125, 1847, 13779, 15616, 0]])
correct_first_batch_target = torch.LongTensor(
[[ 3396, 10303, 125, 13239, 0, 0, 0, 0, 0],
[ 1660, 5971, 785, 125, 1847, 13779, 15616, 0, 0]])
print(first_batch_input)
print(first_batch_target)
assert torch.equal(first_batch_input, correct_first_batch_input)
assert torch.equal(first_batch_target, correct_first_batch_target)
print('Passou no assert de dataset.')
```
# Carregamento do dataset
Iremos usar uma pequena amostra do dataset [BrWaC](https://www.inf.ufrgs.br/pln/wiki/index.php?title=BrWaC) para treinar e avaliar nosso modelo de linguagem.
```
!wget -nc https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula9/sample-1gb.txt
# Load datasets
max_seq_length = 9
train_examples = 50000
valid_examples = 100
test_examples = 100
texts = open('sample-1gb.txt').readlines()
print(f'Read {len(texts)} lines.')
max_lines = train_examples + valid_examples + test_examples
print(f'Truncating to {max_lines} lines.')
texts = texts[:max_lines]
training_texts = texts[:-(valid_examples + test_examples)]
valid_texts = texts[-(valid_examples + test_examples):-test_examples]
test_texts = texts[-test_examples:]
training_dataset = MyDataset(texts=training_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
valid_dataset = MyDataset(texts=valid_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
test_dataset = MyDataset(texts=test_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
print(f'training examples: {len(training_dataset)}')
print(f'valid examples: {len(valid_dataset)}')
print(f'test examples: {len(test_dataset)}')
a = torch.tensor([[1,2],[3,4]])
b = (a==1)*a
b
class LanguageModel(torch.nn.Module):
def __init__(self, vocab_size: int, max_seq_length: int, dim: int, n_layers: int, pad_token_id: int):
"""
Implements the Self-attention, decoder-only."
Args:
vocab_size (int): Size of the input vocabulary.
max_seq_length (int): Size of the sequence to consider as context for prediction.
dim (int): Dimension of the embedding layer for each word in the context.
n_layers (int): number of self-attention layers.
pad_token_id (int): id of the pad token that will be ignored in the attention.
"""
# Escreva seu código aqui.
super().__init__()
self.vocab_size = vocab_size
self.max_seq_length = max_seq_length
self.dim = dim
self.n_layers = n_layers
self.pad_token_id = pad_token_id
# C()
self.C_w = nn.Embedding(vocab_size, dim)
# P()
self.P_w = nn.Embedding(max_seq_length, dim)
self.K_w = nn.Linear(dim, dim , bias=False)
self.Q_w = nn.Linear(dim, dim , bias=False)
self.V_w = nn.Linear(dim, dim , bias=False)
self.E_w = nn.Linear(dim, dim , bias=False)
hidden_size = 2*dim
self.linear1 = nn.Linear(dim, hidden_size)
self.relu1 = nn.ReLU()
self.linear2 = nn.Linear(hidden_size, vocab_size, bias=False)
self.softmax = nn.Softmax(dim=-1)
val_negativo = -1000000
self.filtro_aten = (torch.ones(max_seq_length,max_seq_length)*val_negativo).triu(diagonal = 1) + torch.ones(max_seq_length,max_seq_length).triu().transpose()
self.filtro_aten = self.filtro_aten.unsqueeze(dim=0)
def forward(self, inputs):
"""
Args:
inputs is a LongTensor of shape (batch_size, max_seq_length)
Returns:
logits of shape (batch_size, max_seq_length, vocab_size)
"""
# Escreva seu código aqui.
C_emb = self.C_w(inputs) # B,L,D
P_emb = self.P_w(torch.LongTensor(range(0,self.max_seq_length)).to(inputs.device)).unsqueeze(0)
X = C_emb + P_emb # B,L,D
Q = self.Q_w(X) # B,L,D
K = self.K_w(X) # B,L,D
V = self.V_w(X) # B,L,D
scores = torch.matmul(Q, torch.transpose(K,1,2)) # B,L,L
probs = self.softmax(scores) # B,L,L
probs = probs * self.filtro_aten *
return
```
## Teste o modelo com um exemplo
```
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=64,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
sample_input, _ = next(iter(DataLoader(training_dataset)))
sample_input = sample_input.to(device)
sample_output = model(sample_input)
print(f'sample_input.shape: {sample_input.shape}')
print(f'sample_output.shape: {sample_output.shape}')
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Number of model parameters: {num_params}')
```
## Assert da Perplexidade
```
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
def perplexity(logits, target, ignore_token_id: int):
"""
Computes the perplexity.
Args:
logits: a FloatTensor of shape (batch_size, seq_length, vocab_size)
target: a LongTensor of shape (batch_size, seq_length)
Returns:
A float corresponding to the perplexity
"""
logits = logits.reshape(-1, logits.shape[-1])
target = target.reshape(-1)
loss = nn.functional.cross_entropy(logits, target, reduction='mean', ignore_index=ignore_token_id)
return torch.exp(loss)
n_examples = 1000
train_input_ids, train_target_ids = next(iter(DataLoader(training_dataset, batch_size=n_examples)))
train_input_ids = train_input_ids.to(device)
train_target_ids = train_target_ids.to(device)
logits = model(train_input_ids)
my_perplexity = perplexity(logits=logits, target=train_target_ids, ignore_token_id=tokenizer.pad_token_id)
print(f'my perplexity: {int(my_perplexity)}')
print(f'correct initial perplexity: {tokenizer.vocab_size}')
assert math.isclose(my_perplexity, tokenizer.vocab_size, abs_tol=7000)
print('Passou o no assert da perplexidade')
```
## Laço de Treinamento e Validação
```
max_examples = 150_000_000
eval_every_steps = 10000
lr = 3e-4
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=64,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
train_loader = DataLoader(training_dataset, batch_size=64, shuffle=True, drop_last=True)
validation_loader = DataLoader(valid_dataset, batch_size=64)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
def train_step(input_ids, target_ids):
model.train()
model.zero_grad()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
loss.backward()
optimizer.step()
return loss.item()
def validation_step(input_ids, target_ids):
model.eval()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
return loss.item()
train_losses = []
n_examples = 0
step = 0
while n_examples < max_examples:
for train_input_ids, train_target_ids in train_loader:
loss = train_step(train_input_ids.to(device), train_target_ids.to(device))
train_losses.append(loss)
if step % eval_every_steps == 0:
train_ppl = np.exp(np.average(train_losses))
with torch.no_grad():
valid_ppl = np.exp(np.average([
validation_step(val_input_ids.to(device), val_target_ids.to(device))
for val_input_ids, val_target_ids in validation_loader]))
print(f'{step} steps; {n_examples} examples so far; train ppl: {train_ppl:.2f}, valid ppl: {valid_ppl:.2f}')
train_losses = []
n_examples += len(train_input_ids) # Increment of batch size
step += 1
if n_examples >= max_examples:
break
```
## Avaliação final no dataset de teste
Bonus: o modelo com menor perplexidade no dataset de testes ganhará 0.5 ponto na nota final.
```
test_loader = DataLoader(test_dataset, batch_size=64)
with torch.no_grad():
test_ppl = np.exp(np.average([
validation_step(test_input_ids.to(device), test_target_ids.to(device))
for test_input_ids, test_target_ids in test_loader
]))
print(f'test perplexity: {test_ppl}')
```
## Teste seu modelo com uma sentença
Escolha uma sentença gerada pelo modelo que ache interessante.
```
prompt = 'Eu gosto de comer pizza pois me faz'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
```
## Bonus 1
Quem conseguir a menor perplexidade no dataset de testes ganha 0.5 ponto na média final.
## Bonus 2
Qual é a complexidade (em notação O-grande) da função de geração de texto acima?
Quem responder corretamente a pergunta acima e deixar a função com menor complexidade ganha 0.5 ponto na média final.
|
github_jupyter
|
nome = 'Carlos Leonardo Ancasi Hinostroza'
print(f'Meu nome é {nome}')
# iremos utilizar a biblioteca dos transformers para ter acesso ao tokenizador do BERT.
!pip install transformers
import collections
import itertools
import functools
import math
import random
import torch
import torch.nn as nn
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm_notebook
# Check which GPU we are using
!nvidia-smi
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
device = torch.device(dev)
print('Using {}'.format(device))
from typing import List
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
tokenizer.vocab
dummy_texts = 'Eu gosto de correr'
token_ids = tokenizer(dummy_texts, return_tensors=None, add_special_tokens=False)
token_ids
# print(token_ids)
from typing import List
def tokenize(text: str, tokenizer):
# Recomenda-se usar o tokenizer.batch_encode_plus pois é mais rápido.
return tokenizer(text, return_tensors=None, add_special_tokens=False).input_ids
class MyDataset():
def __init__(self, texts: List[str], tokenizer, max_seq_length: int):
# Escreva aqui seu código.
self.x = []
self.y = []
x = [101]
y = []
x.extend([0]*(max_seq_length-1))
y.extend([0]*max_seq_length)
for texto in texts:
token = tokenize(texto, tokenizer)
for i in range(0, len(token), (max_seq_length - 1) ):
context_size = (max_seq_length - 1)
if i + max_seq_length - 1 > len(token):
context_size = len(token) % (max_seq_length - 1)
x_a = x[:]
x_a[1:context_size+1]=token[i:i+context_size]
y_a = y[:]
y_a[0:context_size] = token[i:i+context_size]
self.x.append(x_a)
self.y.append(y_a)
def __len__(self):
# Escreva aqui seu código.
return len(self.x)
def __getitem__(self, idx):
# Escreva aqui seu código.
return torch.LongTensor(self.x[idx], ), torch.LongTensor(self.y[idx])
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
dummy_texts = ['Eu gosto de correr', 'Ela gosta muito de comer pizza']
dummy_dataset = MyDataset(texts=dummy_texts, tokenizer=tokenizer, max_seq_length=9)
dummy_loader = DataLoader(dummy_dataset, batch_size=6, shuffle=False)
assert len(dummy_dataset) == 2
print('Passou no assert de tamanho do dataset.')
first_batch_input, first_batch_target = next(iter(dummy_loader))
correct_first_batch_input = torch.LongTensor(
[[ 101, 3396, 10303, 125, 13239, 0, 0, 0, 0],
[ 101, 1660, 5971, 785, 125, 1847, 13779, 15616, 0]])
correct_first_batch_target = torch.LongTensor(
[[ 3396, 10303, 125, 13239, 0, 0, 0, 0, 0],
[ 1660, 5971, 785, 125, 1847, 13779, 15616, 0, 0]])
print(first_batch_input)
print(first_batch_target)
assert torch.equal(first_batch_input, correct_first_batch_input)
assert torch.equal(first_batch_target, correct_first_batch_target)
print('Passou no assert de dataset.')
!wget -nc https://storage.googleapis.com/unicamp-dl/ia025a_2022s1/aula9/sample-1gb.txt
# Load datasets
max_seq_length = 9
train_examples = 50000
valid_examples = 100
test_examples = 100
texts = open('sample-1gb.txt').readlines()
print(f'Read {len(texts)} lines.')
max_lines = train_examples + valid_examples + test_examples
print(f'Truncating to {max_lines} lines.')
texts = texts[:max_lines]
training_texts = texts[:-(valid_examples + test_examples)]
valid_texts = texts[-(valid_examples + test_examples):-test_examples]
test_texts = texts[-test_examples:]
training_dataset = MyDataset(texts=training_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
valid_dataset = MyDataset(texts=valid_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
test_dataset = MyDataset(texts=test_texts, tokenizer=tokenizer, max_seq_length=max_seq_length)
print(f'training examples: {len(training_dataset)}')
print(f'valid examples: {len(valid_dataset)}')
print(f'test examples: {len(test_dataset)}')
a = torch.tensor([[1,2],[3,4]])
b = (a==1)*a
b
class LanguageModel(torch.nn.Module):
def __init__(self, vocab_size: int, max_seq_length: int, dim: int, n_layers: int, pad_token_id: int):
"""
Implements the Self-attention, decoder-only."
Args:
vocab_size (int): Size of the input vocabulary.
max_seq_length (int): Size of the sequence to consider as context for prediction.
dim (int): Dimension of the embedding layer for each word in the context.
n_layers (int): number of self-attention layers.
pad_token_id (int): id of the pad token that will be ignored in the attention.
"""
# Escreva seu código aqui.
super().__init__()
self.vocab_size = vocab_size
self.max_seq_length = max_seq_length
self.dim = dim
self.n_layers = n_layers
self.pad_token_id = pad_token_id
# C()
self.C_w = nn.Embedding(vocab_size, dim)
# P()
self.P_w = nn.Embedding(max_seq_length, dim)
self.K_w = nn.Linear(dim, dim , bias=False)
self.Q_w = nn.Linear(dim, dim , bias=False)
self.V_w = nn.Linear(dim, dim , bias=False)
self.E_w = nn.Linear(dim, dim , bias=False)
hidden_size = 2*dim
self.linear1 = nn.Linear(dim, hidden_size)
self.relu1 = nn.ReLU()
self.linear2 = nn.Linear(hidden_size, vocab_size, bias=False)
self.softmax = nn.Softmax(dim=-1)
val_negativo = -1000000
self.filtro_aten = (torch.ones(max_seq_length,max_seq_length)*val_negativo).triu(diagonal = 1) + torch.ones(max_seq_length,max_seq_length).triu().transpose()
self.filtro_aten = self.filtro_aten.unsqueeze(dim=0)
def forward(self, inputs):
"""
Args:
inputs is a LongTensor of shape (batch_size, max_seq_length)
Returns:
logits of shape (batch_size, max_seq_length, vocab_size)
"""
# Escreva seu código aqui.
C_emb = self.C_w(inputs) # B,L,D
P_emb = self.P_w(torch.LongTensor(range(0,self.max_seq_length)).to(inputs.device)).unsqueeze(0)
X = C_emb + P_emb # B,L,D
Q = self.Q_w(X) # B,L,D
K = self.K_w(X) # B,L,D
V = self.V_w(X) # B,L,D
scores = torch.matmul(Q, torch.transpose(K,1,2)) # B,L,L
probs = self.softmax(scores) # B,L,L
probs = probs * self.filtro_aten *
return
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=64,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
sample_input, _ = next(iter(DataLoader(training_dataset)))
sample_input = sample_input.to(device)
sample_output = model(sample_input)
print(f'sample_input.shape: {sample_input.shape}')
print(f'sample_output.shape: {sample_output.shape}')
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'Number of model parameters: {num_params}')
random.seed(123)
np.random.seed(123)
torch.manual_seed(123)
def perplexity(logits, target, ignore_token_id: int):
"""
Computes the perplexity.
Args:
logits: a FloatTensor of shape (batch_size, seq_length, vocab_size)
target: a LongTensor of shape (batch_size, seq_length)
Returns:
A float corresponding to the perplexity
"""
logits = logits.reshape(-1, logits.shape[-1])
target = target.reshape(-1)
loss = nn.functional.cross_entropy(logits, target, reduction='mean', ignore_index=ignore_token_id)
return torch.exp(loss)
n_examples = 1000
train_input_ids, train_target_ids = next(iter(DataLoader(training_dataset, batch_size=n_examples)))
train_input_ids = train_input_ids.to(device)
train_target_ids = train_target_ids.to(device)
logits = model(train_input_ids)
my_perplexity = perplexity(logits=logits, target=train_target_ids, ignore_token_id=tokenizer.pad_token_id)
print(f'my perplexity: {int(my_perplexity)}')
print(f'correct initial perplexity: {tokenizer.vocab_size}')
assert math.isclose(my_perplexity, tokenizer.vocab_size, abs_tol=7000)
print('Passou o no assert da perplexidade')
max_examples = 150_000_000
eval_every_steps = 10000
lr = 3e-4
model = LanguageModel(
vocab_size=tokenizer.vocab_size,
max_seq_length=max_seq_length,
dim=64,
n_layers=2,
pad_token_id=tokenizer.pad_token_id,
).to(device)
train_loader = DataLoader(training_dataset, batch_size=64, shuffle=True, drop_last=True)
validation_loader = DataLoader(valid_dataset, batch_size=64)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
def train_step(input_ids, target_ids):
model.train()
model.zero_grad()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
loss.backward()
optimizer.step()
return loss.item()
def validation_step(input_ids, target_ids):
model.eval()
logits = model(input_ids)
logits = logits.reshape(-1, logits.shape[-1])
target_ids = target_ids.reshape(-1)
loss = nn.functional.cross_entropy(logits, target_ids, ignore_index=model.pad_token_id)
return loss.item()
train_losses = []
n_examples = 0
step = 0
while n_examples < max_examples:
for train_input_ids, train_target_ids in train_loader:
loss = train_step(train_input_ids.to(device), train_target_ids.to(device))
train_losses.append(loss)
if step % eval_every_steps == 0:
train_ppl = np.exp(np.average(train_losses))
with torch.no_grad():
valid_ppl = np.exp(np.average([
validation_step(val_input_ids.to(device), val_target_ids.to(device))
for val_input_ids, val_target_ids in validation_loader]))
print(f'{step} steps; {n_examples} examples so far; train ppl: {train_ppl:.2f}, valid ppl: {valid_ppl:.2f}')
train_losses = []
n_examples += len(train_input_ids) # Increment of batch size
step += 1
if n_examples >= max_examples:
break
test_loader = DataLoader(test_dataset, batch_size=64)
with torch.no_grad():
test_ppl = np.exp(np.average([
validation_step(test_input_ids.to(device), test_target_ids.to(device))
for test_input_ids, test_target_ids in test_loader
]))
print(f'test perplexity: {test_ppl}')
prompt = 'Eu gosto de comer pizza pois me faz'
max_output_tokens = 20
model.eval()
for _ in range(max_output_tokens):
input_ids = tokenize(text=prompt, tokenizer=tokenizer)
input_ids_truncated = input_ids[-max_seq_length:] # Usamos apenas os últimos <max_seq_length> tokens como entrada para o modelo.
logits = model(torch.LongTensor([input_ids_truncated]).to(device))
logits = logits[:, -1, :] # Usamos apenas o ultimo token da sequencia
# Ao usarmos o argmax, a saída do modelo em cada passo é o token de maior probabilidade.
# Isso se chama decodificação gulosa (greedy decoding).
predicted_id = torch.argmax(logits).item()
input_ids += [predicted_id] # Concatenamos a entrada com o token escolhido nesse passo.
prompt = tokenizer.decode(input_ids)
print(prompt)
| 0.755637 | 0.905531 |
# **Bioinformatics with Jupyter Notebooks for WormBase:**
## **Analyses 8 - Literature Analyses**
Welcome to the eighth jupyter notebook in the WormBase tutorial series. Over this series of tutorials, we will write code in Python that allows us to retrieve and perform simple analyses with data available on the WormBase sites.
This tutorial will deal with obtaining different literature-related information such as the information that can be obtained using the Textpresso Central website.
Let's get started!
We will start by importing required libraries for the analysis. We use the Europe PMC API for obtaining this information!
```
import requests
import sys
import json
import urllib3
import xml.dom.minidom
from lxml import etree
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
```
Let us first explore the fields that are available in the Europe PMC API.
We first need to query the API for fetching the fields that can be used for data extraction with this API. We then print out the results.
```
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/fields',
headers={ "Content-Type" : "application/json", "Accept" : ""})
if not request.ok:
request.raise_for_status()
sys.exit()
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
```
In case you know the accession ID for a paper, it is very easy to download any supplementary material that is associated with this paper by using the supplementaryFiles end point of the API.
We generate the URL required for our query by entering the accession id of the paper in the id variable.
Then we download the queried results to our system into a '.zip' file.
```
id = 'PMC3027648'
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/' + id + \
'/supplementaryFiles?includeInlineImage=true',
headers = {"Content-Type" : "application/zip", "Accept" : ""}, stream=True)
target_path='supplementaryFiles.zip'
handle = open(target_path, 'wb')
for chunk in request.iter_content(chunk_size=512):
if chunk:
handle.write(chunk)
handle.close()
```
It is extremely useful to query for papers that contain a certain keyword. For this we define a function which you do not need to make any changes to which will query the keyword across the entire Europe PMC database.
```
def searchEuropePMCclient(query, format='XML'):
base_url = 'https://www.ebi.ac.uk/europepmc/webservices/rest/search?'
payload = {'query' : query, 'format' : format}
request = requests.get(base_url, params=payload)
if request.ok:
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
else:
print('Something has gone wrong!!')
```
Assign the keyword that you want to search for using the API to the keyword variable.
```
keyword = 'Caenorhabditis elegans'
searchEuropePMCclient(keyword)
```
Another useful utility provided by the Europe PMC API is the possibility to query for the works of a certain author using either their name or their ORCID ID.
Assign the author's name or ORCID ID to the author_id variable.
```
author_id = '0000-0001-8314-8497'
```
We first generate the required URL for fetching the papers written by the author and then send the request.
```
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/search?query=AUTHORID:' + author_id,
headers={ "Content-Type" : "application/json", "Accept" : ""})
if not request.ok:
request.raise_for_status()
sys.exit()
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
```
It is also possible to list the papers that have cited a certain publication by just entering the source of the paper and its external id which can be its accession id in most cases.
Assign the source and external id of the paper to the variables source and external_id.
The source can be - AGR, CBA, CTX, ETH, HIR, MED, PAT, PMC, PPR
```
source = 'MED'
external_id = '30206121'
```
We then generate the required URL for fetching the papers that cite the queried paper and send the request.
```
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/' + source + '/' + external_id + \
'/citations',
headers={ "Content-Type" : "application/json", "Accept" : ""})
if not request.ok:
request.raise_for_status()
sys.exit()
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
```
This is the end of the tutorial on replicating Textpresso results using the Europe PMC RESTful API to get the literature analyses information. The data is up-to date and is very quick to extract, and is easy to handle.
This tutorial is also the end of the analysis series. In the next tutorial, we will implement and test some simple utilities for the data.
Acknowledgements:
- Textpresso Central (https://textpressocentral.org/tpc/home)
- EuropePMC API (http://europepmc.org/RestfulWebService)
|
github_jupyter
|
import requests
import sys
import json
import urllib3
import xml.dom.minidom
from lxml import etree
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/fields',
headers={ "Content-Type" : "application/json", "Accept" : ""})
if not request.ok:
request.raise_for_status()
sys.exit()
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
id = 'PMC3027648'
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/' + id + \
'/supplementaryFiles?includeInlineImage=true',
headers = {"Content-Type" : "application/zip", "Accept" : ""}, stream=True)
target_path='supplementaryFiles.zip'
handle = open(target_path, 'wb')
for chunk in request.iter_content(chunk_size=512):
if chunk:
handle.write(chunk)
handle.close()
def searchEuropePMCclient(query, format='XML'):
base_url = 'https://www.ebi.ac.uk/europepmc/webservices/rest/search?'
payload = {'query' : query, 'format' : format}
request = requests.get(base_url, params=payload)
if request.ok:
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
else:
print('Something has gone wrong!!')
keyword = 'Caenorhabditis elegans'
searchEuropePMCclient(keyword)
author_id = '0000-0001-8314-8497'
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/search?query=AUTHORID:' + author_id,
headers={ "Content-Type" : "application/json", "Accept" : ""})
if not request.ok:
request.raise_for_status()
sys.exit()
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
source = 'MED'
external_id = '30206121'
request = requests.get('https://www.ebi.ac.uk/europepmc/webservices/rest/' + source + '/' + external_id + \
'/citations',
headers={ "Content-Type" : "application/json", "Accept" : ""})
if not request.ok:
request.raise_for_status()
sys.exit()
result = xml.dom.minidom.parseString(request.text)
result = result.toprettyxml()
print(result)
| 0.073115 | 0.873701 |
# Lesson 4 Class Exercises: Pandas Part 2
With these class exercises we learn a few new things. When new knowledge is introduced you'll see the icon shown on the right:
<span style="float:right; margin-left:10px; clear:both;"></span>
## Get Started
Import the Numpy and Pandas packages
```
import numpy as np
import pandas as pd
```
## Exercise 1: Review of Pandas Part 1
### Task 1: Explore the data
Import the data from the [Lectures in Quantiatives Economics](https://github.com/QuantEcon/lecture-source-py) regarding minimum wages in countries around the world in US Dollars. You can view the data [here](https://github.com/QuantEcon/lecture-source-py/blob/master/source/_static/lecture_specific/pandas_panel/realwage.csv) and you can access the data file here: https://raw.githubusercontent.com/QuantEcon/lecture-source-py/master/source/_static/lecture_specific/pandas_panel/realwage.csv. Then perform the following
Import the data into a variable named `minwages` and print the first 5 lines of data to explore what is there.
```
minwages = pd.read_csv('https://raw.githubusercontent.com/QuantEcon/lecture-source-py/master/source/_static/lecture_specific/pandas_panel/realwage.csv',)
minwages.head(5)
```
Find the shape of the data.
```
minwages.shape
```
List the column names.
```
minwages.columns
```
Identify the data types. Do they match what you would expect?
```
minwages.dtypes
```
Identify columns with missing values.
```
minwages.isna().sum()
```
Identify if there are duplicated entires.
```
minwages.duplicated().sum()
```
How many unique values per row are there. Do these look reasonable for the data type and what you know about what is stored in the column?
```
minwages.nunique()
```
### Task 2: Explore More
Retrieve descriptive statistics for the data.
```
minwages.describe()
```
Identify all of the countries listed in the data.
```
minwages['Country'].unique()
```
Convert the time column to a datetime object.
```
minwages['Time'] = pd.to_datetime(minwages['Time'])
minwages.dtypes
```
List the time points that were used for data collection. How many years of data collection were there? What time of year were the data collected?
```
minwages['Time'].unique()
```
Because we only have one data point collected per year per country, simplify this by adding a new column with just the year. Print the first 5 rows to confirm the column was added.
```
minwages['Year'] = minwages['Time'].dt.year
minwages.head()
minwages['Year'].unique()
```
There are two pay periods. Retrieve them in a list of just the two strings
```
minwages['Pay period'].unique()
minwages['Series'].unique()
```
### Task 3: Clean the data
We have no duplicates in this data so we do not need to consider removing those, but we do have missing values in the `value` column. Lets remove those. Check the dimensions afterwards to make sure they rows with missing values are gone.
```
minwages.dropna(inplace=True)
minwages.shape
```
If your dataframe has an "Unnamed: 0" column remove it, as it's not needed. Note: in the `pd.read_csv()` function you can use the `index_col` argument to set the column in the file that provides the index and that would prevent the "Unnamed: 0" column with this dataset.
```
#minwages.drop(['Unnamed: 0'], axis=1, inplace=True)
```
### Task 4: Indexing
Use boolean indexing to retrieve the rows of annual salary in United States
```
minwages[(minwages['Country'] == 'United States') &
(minwages['Pay period'] == 'Annual')]
```
Do we have enough data to calculate descriptive statistics for annual salary in the United States in 2016?
From here on out, let's only explore the rows that have a "series" value of "In 2015 constant prices at 2015 USD exchange rates"
```
minwages2 = minwages[minwages['Series'] == 'In 2015 constant prices at 2015 USD exchange rates']
minwages2.shape
```
Use `loc` to calculate descriptive statistics for the hourly salary in the United States and then again separately for Ireland. Hint: you will have to set row indexes. Hint: you should reset the index before using `loc`
Now do the same for Annual salary
## Exercise 2: Occurances
First, reset the indexes back to numeric values. Print the first 10 lines to confirm.
Get the count of how many rows there are per year?
## Exercise 3: Grouping
### Task 1: Aggregation
Calculate the average salary for each country across all years.
```
groups = minwages2.groupby(['Country', 'Pay period'])
groups.mean().head()
```
Calculate the average salary and hourly wage for each country across all years. Save the resulting dataframe containing the means into a new variable named `mwmean`.
<span style="float:right; margin-left:10px; clear:both;"></span>
Above we saw how to aggregate using built-in functions of the `DataFrameGroupBy` object. For eaxmple we called the `mean` function directly. These handly functions help with writing succint code. However, you can also use the `aggregate` function to do more! You can learn more on the [aggregate description page](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.aggregate.html)
With `aggregate` we can perform operations across rows and columns, and we can perform more than one operation at a time. Explore the online documentation for the function and see how you would calculate the mean, min, and max for each country and pay period type, as well as the total number of records per country and pay period:
```
groups.aggregate(['mean', 'std', 'count'])
```
Also you can use the aggregate on a single column of the grouped object. For example:
```python
mwgroup = minwages[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])
mwgroup['value'].aggregate(['mean'])
```
Redo the aggregate function in the previous cell but this time apply it to a single column.
### Task 2: Slicing/Indexing
<span style="float:right; margin-left:10px; clear:both;"></span>
In the following code the resulting dataframe should contain only one data column: the mean values. It does, however, have two levels of indexes: Country and Pay period. For example:
```python
mwgroup = minwages[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])
mwmean = mwgroup.mean()
mwmean
```
Try it out:
```
mwgroup = minwages2[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])
mwmean = mwgroup.mean()
mwmean
```
Notice in the output above there are two levels of indexes. This is called MultiIndexing. In reality, there is only one data column and two index levels. So, you can do this:
```python
mwmean['value']
```
But you can't do this:
```python
mwmean['Pay period']
```
Why not? Try it:
```
mwmean['value']
mwmean['Pay period']
```
The reason we cannot exeucte `mwmean['Pay period']` is because `Pay period` is not a data column. It's an index. Let's learn how to use MultiIndexes to retrieve data. You can learn more about it on the [MultiIndex/advanced indexing page](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html#advanced-indexing-with-hierarchical-index)
First, let's take a look at the indexes using the `index` attribute.
```python
mwmean.index
```
Try it:
```
mwmean.index
```
Notice that each index is actually a tuple with two levels. The first is the country names and the second is the pay period. Remember, we can use the `loc` function, to slice a dataframe using indexes. We can do so with a MultiIndexed dataframe as well. For example, to extract all elements with they index named 'Australia':
```python
mwmean.loc[('Australia')]
```
Try it yourself:
You can specify both indexes to pull out a single row. For example, to find the average hourly salary in Australia:
```python
mwmean.loc[('Australia','Hourly')]
```
Try it yourself:
```
mwmean.loc[('Australia','Hourly')]
```
Suppose you wanted to retrieve all of the mean "Hourly" wages. For MultiIndexes, there are multiple ways to slice it, some are not entirely intuitive or flexible enough. Perhaps the easiest is to use the `pd.IndexSlice` object. It allows you to specify an index format that is intuitive to the way you've already learned to slice. For example:
```python
idx = pd.IndexSlice
mwmean.loc[idx[:,'Hourly'],:]
```
In the code above the `idx[:, 'Hourly']` portion is used in the "row" indexor position of the `loc` function. It indicates that we want all possible first-level indexes (specified with the `:`) and we want second-level indexes to be restricted to "Hourly".
Try it out yourself:
```
idx = pd.IndexSlice
rows = idx[:,'Hourly']
mwmean.loc[rows,:]
```
Using what you've learned above about slicing the MultiIndexed dataframe, find out which country has had the highest average annual salary.
You can move the indexes into the dataframe and reset the index to a traditional single-level numeric index by reseting the indexes:
```python
mwmean.reset_index()
```
Try it yourself:
```
mwmean2 = mwmean.reset_index()
mwmean2[mwmean2['Pay period'] == 'Hourly']
```
### Task 3: Filtering the original data.
<span style="float:right; margin-left:10px; clear:both;"></span>
Another way we might want to filter is to find records in the dataset that, after grouping meets some criteria. For example, what if we wanted to find the records for all countries with the average annual salary was greater than $35K?
To do this, we can use the `filter` function of the `DataFrameGroupBy` object. The filter function must take a function as an argument (this is new and may seem weird).
```python
annualwages = minwages[minwages['Pay period'] == 'Annual']
annualwages.groupby(['Country']).filter(
lambda x : x['value'].mean() > 22000
)
```
Try it:
```
annualwages = minwages2[minwages2['Pay period'] == 'Annual']
annualwages.groupby(['Country']).filter(
lambda x : x['value'].mean() > 22000
)
```
### Task 4: Reset the index
If you do not want to use MultiIndexes and you prefer to return any Multiindex dataset back to a traditional 1-level index dataframe you can use the`reset_index` function.
Try it out on the `mwmean` dataframe:
## Exercise 4: Task 6d from the practice notebook
Load the iris dataset.
In the Iris dataset:
+ Create a new column with the label "region" in the iris data frame. This column will indicates geographic regions of the US where measurments were taken. Values should include: 'Southeast', 'Northeast', 'Midwest', 'Southwest', 'Northwest'. Use these randomly.
+ Use `groupby` to get a new data frame of means for each species in each region.
+ Add a `dev_stage` column by randomly selecting from the values "early" and "late".
+ Use `groupby` to get a new data frame of means for each species, in each region and each development stage.
+ Use the `count` function (just like you used the `mean` function) to identify how many rows in the table belong to each combination of species + region + developmental stage.
## Exercise 5: Kaggle Titanic Dataset
A dataset of Titanic passengers and their fates is provided by the online machine learning competition server [Kaggle](https://www.kaggle.com/). See the [Titanic project](https://www.kaggle.com/c/titanic) page for more details.
Let's practice all we have learned thus far to explore and perhaps clean this dataset. You have been provided with the dataset named `Titanic_train.csv`.
### Task 1: Explore the data
First import the data and print the first 10 lines.
```
titanic = pd.read_csv("../../data/Titanic_train.csv")
titanic.head(10)
```
Find the shape of the data.
```
titanic.shape
```
List the column names.
```
titanic.columns
```
Identify the data types. Do they match what you would expect?
```
titanic.dtypes
```
Identify columns with missing values.
```
titanic.isna().sum()
```
Identify if there are duplicated entires.
```
titanic.duplicated().sum()
```
How many unique values per row are there. Do these look reasonable for the data type and what you know about what is stored in the column?
```
titanic.nunique()
```
### Task 2: Clean the data
Do missing values need to be removed? If so, remove them.
Do duplicates need to be removed? If so remove them.
### Task 3: Find Interesting Facts
Count the number of passengers that survied and died in each passenger class
```
titanic.groupby(['Survived', 'Pclass']).count()
```
Were men or women more likely to survive?
```
titanic.groupby(['Survived', 'Sex']).count()
```
What was the average, min and max ticket prices per passenger class?
Hint: look at the help page for the [agg](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.agg.html) function to help simplify this.
```
scounts = titanic.groupby(['Pclass']).agg({'mean', 'max', 'min'})
scounts['Fare']
```
Give descriptive statistics about the survival age.
```
titanic[(titanic['Survived'] == 1)]['Age'].dropna().describe()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
minwages = pd.read_csv('https://raw.githubusercontent.com/QuantEcon/lecture-source-py/master/source/_static/lecture_specific/pandas_panel/realwage.csv',)
minwages.head(5)
minwages.shape
minwages.columns
minwages.dtypes
minwages.isna().sum()
minwages.duplicated().sum()
minwages.nunique()
minwages.describe()
minwages['Country'].unique()
minwages['Time'] = pd.to_datetime(minwages['Time'])
minwages.dtypes
minwages['Time'].unique()
minwages['Year'] = minwages['Time'].dt.year
minwages.head()
minwages['Year'].unique()
minwages['Pay period'].unique()
minwages['Series'].unique()
minwages.dropna(inplace=True)
minwages.shape
#minwages.drop(['Unnamed: 0'], axis=1, inplace=True)
minwages[(minwages['Country'] == 'United States') &
(minwages['Pay period'] == 'Annual')]
minwages2 = minwages[minwages['Series'] == 'In 2015 constant prices at 2015 USD exchange rates']
minwages2.shape
groups = minwages2.groupby(['Country', 'Pay period'])
groups.mean().head()
groups.aggregate(['mean', 'std', 'count'])
mwgroup = minwages[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])
mwgroup['value'].aggregate(['mean'])
mwgroup = minwages[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])
mwmean = mwgroup.mean()
mwmean
mwgroup = minwages2[['Country', 'Pay period', 'value']].groupby(['Country', 'Pay period'])
mwmean = mwgroup.mean()
mwmean
mwmean['value']
mwmean['Pay period']
mwmean['value']
mwmean['Pay period']
mwmean.index
mwmean.index
mwmean.loc[('Australia')]
mwmean.loc[('Australia','Hourly')]
mwmean.loc[('Australia','Hourly')]
idx = pd.IndexSlice
mwmean.loc[idx[:,'Hourly'],:]
idx = pd.IndexSlice
rows = idx[:,'Hourly']
mwmean.loc[rows,:]
mwmean.reset_index()
mwmean2 = mwmean.reset_index()
mwmean2[mwmean2['Pay period'] == 'Hourly']
annualwages = minwages[minwages['Pay period'] == 'Annual']
annualwages.groupby(['Country']).filter(
lambda x : x['value'].mean() > 22000
)
annualwages = minwages2[minwages2['Pay period'] == 'Annual']
annualwages.groupby(['Country']).filter(
lambda x : x['value'].mean() > 22000
)
titanic = pd.read_csv("../../data/Titanic_train.csv")
titanic.head(10)
titanic.shape
titanic.columns
titanic.dtypes
titanic.isna().sum()
titanic.duplicated().sum()
titanic.nunique()
titanic.groupby(['Survived', 'Pclass']).count()
titanic.groupby(['Survived', 'Sex']).count()
scounts = titanic.groupby(['Pclass']).agg({'mean', 'max', 'min'})
scounts['Fare']
titanic[(titanic['Survived'] == 1)]['Age'].dropna().describe()
| 0.425486 | 0.989741 |
# Translate `dzn` to `fzn`
### Check Versions of Tools
```
import os
import subprocess
my_env = os.environ.copy()
output = subprocess.check_output(f'''/home/{my_env['USER']}/minizinc/build/minizinc --version''',
shell=True, universal_newlines=True)
output
import subprocess
output = subprocess.check_output(f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools --version''',
shell=True, universal_newlines=True)
output
```
## Transform `dzn` to `fzn` Using `Mz1-noAbs.mzn`
Then transform the desired `.dzn` file to `.fzn` using the `Mz1-noAbs.mzn` MiniZinc model.
```
import os
dzn_files = []
dzn_path = f'''/home/{my_env['USER']}/data/dzn/'''
for filename in os.listdir(dzn_path):
if filename.endswith(".dzn"):
dzn_files.append(filename)
len(dzn_files)
import sys
fzn_path = f'''/home/{my_env['USER']}/data/fzn/or-tools/Mz1-noAbs/'''
minizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \
-Werror \
--compile --solver org.minizinc.mzn-fzn \
/home/{my_env['USER']}/models/mzn/Mz1-noAbs.mzn '''
translate_count = 0
for dzn in dzn_files:
translate_count += 1
minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \
+ ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'
print(f'''\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')
sys.stdout.flush()
subprocess.check_output(minizinc_transform_cmd, shell=True,
universal_newlines=True)
```
## Transform `dzn` to `fzn` Using `Mz2-noAbs.mzn`
Then transform the desired `.dzn` file to `.fzn` using the `Mz2-noAbs.mzn` MiniZinc model.
```
import os
dzn_files = []
dzn_path = f'''/home/{my_env['USER']}/data/dzn/'''
for filename in os.listdir(dzn_path):
if filename.endswith(".dzn"):
dzn_files.append(filename)
len(dzn_files)
import sys
fzn_path = f'''/home/{my_env['USER']}/data/fzn/or-tools/Mz2-noAbs/'''
minizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \
-Werror \
--compile --solver org.minizinc.mzn-fzn \
/home/{my_env['USER']}/models/mzn/Mz2-noAbs.mzn '''
translate_count = 0
for dzn in dzn_files:
translate_count += 1
minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \
+ ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'
print(f'''\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')
sys.stdout.flush()
subprocess.check_output(minizinc_transform_cmd, shell=True,
universal_newlines=True)
```
## Test Generated `fzn` Files Using `OR-Tools`
```
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/A004-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/A004-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/A012-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/A012-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/R008-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/R008-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/R028-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/R028-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
```
|
github_jupyter
|
import os
import subprocess
my_env = os.environ.copy()
output = subprocess.check_output(f'''/home/{my_env['USER']}/minizinc/build/minizinc --version''',
shell=True, universal_newlines=True)
output
import subprocess
output = subprocess.check_output(f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools --version''',
shell=True, universal_newlines=True)
output
import os
dzn_files = []
dzn_path = f'''/home/{my_env['USER']}/data/dzn/'''
for filename in os.listdir(dzn_path):
if filename.endswith(".dzn"):
dzn_files.append(filename)
len(dzn_files)
import sys
fzn_path = f'''/home/{my_env['USER']}/data/fzn/or-tools/Mz1-noAbs/'''
minizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \
-Werror \
--compile --solver org.minizinc.mzn-fzn \
/home/{my_env['USER']}/models/mzn/Mz1-noAbs.mzn '''
translate_count = 0
for dzn in dzn_files:
translate_count += 1
minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \
+ ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'
print(f'''\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')
sys.stdout.flush()
subprocess.check_output(minizinc_transform_cmd, shell=True,
universal_newlines=True)
import os
dzn_files = []
dzn_path = f'''/home/{my_env['USER']}/data/dzn/'''
for filename in os.listdir(dzn_path):
if filename.endswith(".dzn"):
dzn_files.append(filename)
len(dzn_files)
import sys
fzn_path = f'''/home/{my_env['USER']}/data/fzn/or-tools/Mz2-noAbs/'''
minizinc_base_cmd = f'''/home/{my_env['USER']}/minizinc/build/minizinc \
-Werror \
--compile --solver org.minizinc.mzn-fzn \
/home/{my_env['USER']}/models/mzn/Mz2-noAbs.mzn '''
translate_count = 0
for dzn in dzn_files:
translate_count += 1
minizinc_transform_cmd = minizinc_base_cmd + dzn_path + dzn \
+ ' --output-to-file ' + fzn_path + dzn.replace('.', '-') + '.fzn'
print(f'''\r({translate_count}/{len(dzn_files)}) Translating {dzn_path + dzn} to {fzn_path + dzn.replace('.', '-')}.fzn''', end='')
sys.stdout.flush()
subprocess.check_output(minizinc_transform_cmd, shell=True,
universal_newlines=True)
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/A004-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/A004-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/A012-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/A012-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/R008-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/R008-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
import subprocess
output_Mz1 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz1-noAbs/R028-dzn.fzn''',
shell=True, universal_newlines=True)
output_Mz2 = subprocess.check_output(
f'''/home/{my_env['USER']}/or-tools/bin/fzn-or-tools ../data/fzn/or-tools/Mz2-noAbs/R028-dzn.fzn''',
shell=True, universal_newlines=True)
print(f'''Output Mz1: \n\n{output_Mz1}''')
print(f'''Output Mz2: \n\n{output_Mz2}''')
| 0.084978 | 0.460895 |
# VacationPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
```
### Store Part I results into DataFrame
* Load the csv exported in Part I to a DataFrame
```
datafile = "city_data.csv"
datafile_df = pd.read_csv(datafile)
datafile_df.head()
```
### Humidity Heatmap
* Configure gmaps.
* Use the Lat and Lng as locations and Humidity as the weight.
* Add Heatmap layer to map.
```
# Access maps with unique API key
#gmaps.configure(api_key="AIzaSyCS9o1uaknAsCBpggoeB8HCI0HUeMV8EDE")
gmaps.configure(api_key=g_key)
# Store latitude and longitude in locations
locations = datafile_df[["Lat", "Lng"]]
# Fill NaN values and convert to float
humidity = datafile_df["Humidity"].astype(float)
fig = gmaps.figure()
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
```
### Create new DataFrame fitting weather criteria
* Narrow down the cities to fit weather conditions.
* Drop any rows will null values.
```
weather_df = pd.DataFrame(datafile_df, columns=["City", "Country","Max Temp", "Wind Speed", "Cloudiness", "Lat", "Lng"])
weather_df = weather_df.rename(columns={"Max Temp":"MaxTemp", "Wind Speed":"WindSpeed"})
weather_df = weather_df[(weather_df.MaxTemp <= 80) & (weather_df.MaxTemp >= 70)
& (weather_df.WindSpeed >= 10) & (weather_df.Cloudiness == 0)]
weather_df.head(10)
```
### Hotel Map
* Store into variable named `hotel_df`.
* Add a "Hotel Name" column to the DataFrame.
* Set parameters to search for hotels with 5000 meters.
* Hit the Google Places API for each city's coordinates.
* Store the first Hotel result into the DataFrame.
* Plot markers on top of the heatmap.
```
hotel_df = weather_df.copy()
hotel_df["Hotel Name"] = ""
hotel_df["Hotel_lat"] = ""
hotel_df["Hotel_lng"] = ""
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {
"radius": 5000,
"types": "hotel",
"key": g_key
}
# # use iterrows to iterate through pandas dataframe
for index, row in hotel_df.iterrows():
# get city lattitude and longtitude from dataframe
lat = row['Lat']
lng = row['Lng']
# add the Lat and Lng value to the parameter list
params['location'] = f'{lat},{lng}'
# assemble url and make API request
response = requests.get(base_url, params=params).json()
# extract results
results = response['results']
try:
hotel_df.loc[index, 'Hotel Name'] = results[0]['name']
hotel_df.loc[index, 'Hotel_lat'] = results[0]["geometry"]["location"]["lat"]
hotel_df.loc[index, 'Hotel_lng'] = results[0]["geometry"]["location"]["lng"]
except (KeyError, IndexError):
print("Missing field/result... skipping.")
hotel_df.head()
hotel_df = hotel_df.replace("", np.nan)
hotel_df = hotel_df.dropna()
hotel_df.head()
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Hotel_lat", "Hotel_lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
# Display figure
fig.add_layer(markers)
fig
```
|
github_jupyter
|
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import gmaps
import os
# Import API key
from api_keys import g_key
datafile = "city_data.csv"
datafile_df = pd.read_csv(datafile)
datafile_df.head()
# Access maps with unique API key
#gmaps.configure(api_key="AIzaSyCS9o1uaknAsCBpggoeB8HCI0HUeMV8EDE")
gmaps.configure(api_key=g_key)
# Store latitude and longitude in locations
locations = datafile_df[["Lat", "Lng"]]
# Fill NaN values and convert to float
humidity = datafile_df["Humidity"].astype(float)
fig = gmaps.figure()
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
weather_df = pd.DataFrame(datafile_df, columns=["City", "Country","Max Temp", "Wind Speed", "Cloudiness", "Lat", "Lng"])
weather_df = weather_df.rename(columns={"Max Temp":"MaxTemp", "Wind Speed":"WindSpeed"})
weather_df = weather_df[(weather_df.MaxTemp <= 80) & (weather_df.MaxTemp >= 70)
& (weather_df.WindSpeed >= 10) & (weather_df.Cloudiness == 0)]
weather_df.head(10)
hotel_df = weather_df.copy()
hotel_df["Hotel Name"] = ""
hotel_df["Hotel_lat"] = ""
hotel_df["Hotel_lng"] = ""
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params = {
"radius": 5000,
"types": "hotel",
"key": g_key
}
# # use iterrows to iterate through pandas dataframe
for index, row in hotel_df.iterrows():
# get city lattitude and longtitude from dataframe
lat = row['Lat']
lng = row['Lng']
# add the Lat and Lng value to the parameter list
params['location'] = f'{lat},{lng}'
# assemble url and make API request
response = requests.get(base_url, params=params).json()
# extract results
results = response['results']
try:
hotel_df.loc[index, 'Hotel Name'] = results[0]['name']
hotel_df.loc[index, 'Hotel_lat'] = results[0]["geometry"]["location"]["lat"]
hotel_df.loc[index, 'Hotel_lng'] = results[0]["geometry"]["location"]["lng"]
except (KeyError, IndexError):
print("Missing field/result... skipping.")
hotel_df.head()
hotel_df = hotel_df.replace("", np.nan)
hotel_df = hotel_df.dropna()
hotel_df.head()
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Hotel_lat", "Hotel_lng"]]
# Add marker layer ontop of heat map
markers = gmaps.marker_layer(locations, info_box_content = hotel_info)
# Display figure
fig.add_layer(markers)
fig
| 0.465873 | 0.831588 |
```
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from utils import PlotParams
plotter = PlotParams()
plotter.set_params()
RES_DIR = os.path.join(os.pardir, 'results_new')
SAVE_DIR = os.path.join(os.pardir, 'figs_new')
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
df = pd.read_csv('figure1_info.csv')
df_test = pd.read_csv('figure1_info_test.csv')
```
### MNIST
```
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
train_losses = pd.read_csv(os.path.join(RES_DIR, 'mnist_kl_b-1.0', 'train_losses.log'))
kl_recon = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1]
plt.axhline(kl_recon, color='k', ls='--', label='ELBO')
df_subset = df.loc[(df['Dataset'] == 'mnist') & (df['Divergence'] == 'GJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
df_subset = df.loc[(df['Dataset'] == 'mnist') & (df['Divergence'] == 'dGJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
# train_losses = pd.read_csv(os.path.join(RES_DIR, 'mnist_kl_b-1.0', 'train_losses.log'))
with open(os.path.join(RES_DIR, 'mnist_kl_b-1.0', 'test_eval.log')) as f:
test_losses = json.load(f)
# kl_recon = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1]
plt.axhline(test_losses['recon_loss'], color='k', ls='-', label='ELBO')
df_subset = df_test.loc[(df_test['Dataset'] == 'mnist') & (df_test['Divergence'] == 'GJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
df_subset = df_test.loc[(df_test['Dataset'] == 'mnist') & (df_test['Divergence'] == 'dGJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
plt.savefig(os.path.join(SAVE_DIR, 'fig1_mnist.pdf'))
plt.show()
```
### Fashion
```
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
train_losses = pd.read_csv(os.path.join(RES_DIR, 'fashion_kl_b-1.0', 'train_losses.log'))
kl_recon = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1]
plt.axhline(kl_recon, color='k', ls='--', label='ELBO')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
df_subset = df.loc[(df['Dataset'] == 'fashion') & (df['Divergence'] == 'GJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
df_subset = df.loc[(df['Dataset'] == 'fashion') & (df['Divergence'] == 'dGJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
# plt.savefig(os.path.join(SAVE_DIR, 'fig1_fashion.pdf'))
plt.show()
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
with open(os.path.join(RES_DIR, 'fashion_kl_b-1.0', 'test_eval.log')) as f:
test_losses = json.load(f)
plt.axhline(test_losses['recon_loss'], color='k', ls='-', label='ELBO')
df_subset = df_test.loc[(df_test['Dataset'] == 'fashion') & (df_test['Divergence'] == 'GJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
df_subset = df_test.loc[(df_test['Dataset'] == 'fashion') & (df_test['Divergence'] == 'dGJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
plt.savefig(os.path.join(SAVE_DIR, 'fig1_fashion.pdf'))
plt.show()
```
### Convergence
```
datasets = ['mnist', 'fashion', 'dsprites', 'chairs', 'celeba']
for dset in datasets:
final_alphas = []
for seed in [0, 1, 2, 3]:
sub_dir = os.path.join(RES_DIR, f'{dset}_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
final_alphas = np.array(final_alphas)
print(dset, final_alphas.mean(), final_alphas.std())
seeds = [0, 1, 2, 3, 4]
for div in ['tGJS', 'tdGJS']:
for dset in ['mnist', 'fashion']:
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'{dset}_{div}_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value']
alphas = train_losses.loc[train_losses['Loss'] == 'alpha']['Value']
plt.plot(alphas, distortions, 'k--', alpha=0.2)
plt.scatter(alphas, distortions, c=np.arange(1, 101), ls='-', marker='*', s=200)
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
cbar = plt.colorbar()
cbar.set_label('Epoch', rotation=270, labelpad=10)
# plt.legend([f'{dset} {div}'])
plt.tight_layout()
plt.grid()
plt.savefig(os.path.join(SAVE_DIR, f'seed_train_{dset}_{div}.pdf'))
plt.show()
```
|
github_jupyter
|
import json
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from utils import PlotParams
plotter = PlotParams()
plotter.set_params()
RES_DIR = os.path.join(os.pardir, 'results_new')
SAVE_DIR = os.path.join(os.pardir, 'figs_new')
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
df = pd.read_csv('figure1_info.csv')
df_test = pd.read_csv('figure1_info_test.csv')
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
train_losses = pd.read_csv(os.path.join(RES_DIR, 'mnist_kl_b-1.0', 'train_losses.log'))
kl_recon = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1]
plt.axhline(kl_recon, color='k', ls='--', label='ELBO')
df_subset = df.loc[(df['Dataset'] == 'mnist') & (df['Divergence'] == 'GJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
df_subset = df.loc[(df['Dataset'] == 'mnist') & (df['Divergence'] == 'dGJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
plt.show()
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
# train_losses = pd.read_csv(os.path.join(RES_DIR, 'mnist_kl_b-1.0', 'train_losses.log'))
with open(os.path.join(RES_DIR, 'mnist_kl_b-1.0', 'test_eval.log')) as f:
test_losses = json.load(f)
# kl_recon = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1]
plt.axhline(test_losses['recon_loss'], color='k', ls='-', label='ELBO')
df_subset = df_test.loc[(df_test['Dataset'] == 'mnist') & (df_test['Divergence'] == 'GJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'mnist_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
df_subset = df_test.loc[(df_test['Dataset'] == 'mnist') & (df_test['Divergence'] == 'dGJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
plt.savefig(os.path.join(SAVE_DIR, 'fig1_mnist.pdf'))
plt.show()
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
train_losses = pd.read_csv(os.path.join(RES_DIR, 'fashion_kl_b-1.0', 'train_losses.log'))
kl_recon = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1]
plt.axhline(kl_recon, color='k', ls='--', label='ELBO')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
df_subset = df.loc[(df['Dataset'] == 'fashion') & (df['Divergence'] == 'GJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions.append(train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value'].iloc[-1])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
df_subset = df.loc[(df['Dataset'] == 'fashion') & (df['Divergence'] == 'dGJS') & (df['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro--', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
# plt.savefig(os.path.join(SAVE_DIR, 'fig1_fashion.pdf'))
plt.show()
seeds = [0, 1, 2, 3, 4]
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
with open(os.path.join(RES_DIR, 'fashion_kl_b-1.0', 'test_eval.log')) as f:
test_losses = json.load(f)
plt.axhline(test_losses['recon_loss'], color='k', ls='-', label='ELBO')
df_subset = df_test.loc[(df_test['Dataset'] == 'fashion') & (df_test['Divergence'] == 'GJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'bo-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}$')
plt.scatter(final_alphas, distortions, color='b', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}$')
distortions = []
final_alphas = []
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'fashion_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
with open(os.path.join(sub_dir, 'test_eval.log')) as f:
test_losses = json.load(f)
distortions.append(test_losses['recon_loss'])
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
df_subset = df_test.loc[(df_test['Dataset'] == 'fashion') & (df_test['Divergence'] == 'dGJS') & (df_test['Epochs'] == 100)]
plt.plot(df_subset['alpha'], df_subset['Reconstruction Loss'], 'ro-', markerfacecolor=(1, 1, 1, 0.5), label=r'JS$^{G_{\alpha}}_{*}$')
plt.scatter(final_alphas, distortions, color='r', marker='*', s=200, zorder=10, label=r't-JS$^{G_{\alpha}}_{*}$')
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
plt.legend()
plt.grid()
plt.tight_layout()
plt.savefig(os.path.join(SAVE_DIR, 'fig1_fashion.pdf'))
plt.show()
datasets = ['mnist', 'fashion', 'dsprites', 'chairs', 'celeba']
for dset in datasets:
final_alphas = []
for seed in [0, 1, 2, 3]:
sub_dir = os.path.join(RES_DIR, f'{dset}_tdgjs_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
final_alphas.append(train_losses.loc[train_losses['Loss'] == 'alpha']['Value'].iloc[-1])
final_alphas = np.array(final_alphas)
print(dset, final_alphas.mean(), final_alphas.std())
seeds = [0, 1, 2, 3, 4]
for div in ['tGJS', 'tdGJS']:
for dset in ['mnist', 'fashion']:
for seed in seeds:
sub_dir = os.path.join(RES_DIR, f'{dset}_{div}_s-{seed}')
train_losses = pd.read_csv(os.path.join(sub_dir, 'train_losses.log'))
distortions = train_losses.loc[train_losses['Loss'] == 'recon_loss']['Value']
alphas = train_losses.loc[train_losses['Loss'] == 'alpha']['Value']
plt.plot(alphas, distortions, 'k--', alpha=0.2)
plt.scatter(alphas, distortions, c=np.arange(1, 101), ls='-', marker='*', s=200)
plt.xlim(0, 1)
plt.xlabel(r'$\alpha$')
plt.ylabel('Reconstruction loss')
cbar = plt.colorbar()
cbar.set_label('Epoch', rotation=270, labelpad=10)
# plt.legend([f'{dset} {div}'])
plt.tight_layout()
plt.grid()
plt.savefig(os.path.join(SAVE_DIR, f'seed_train_{dset}_{div}.pdf'))
plt.show()
| 0.370909 | 0.51379 |
# Data Analysis
```
"""
Visualize and analyze data.
@author: Juan Felipe Latorre Gil - [email protected]
"""
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
from ast import literal_eval
import matplotlib.pyplot as plt
plt.style.use(['ggplot','seaborn-paper'])
```
### Load Data
```
path_low = './results/df_low.csv'
path_med = './results//df_med.csv'
path_high = './results/df_high.csv'
df_low = pd.read_csv(path_low, index_col= 'Unnamed: 0')
df_med = pd.read_csv(path_med, index_col= 'Unnamed: 0')
df_high = pd.read_csv(path_high, index_col= 'Unnamed: 0')
data = {'Low': df_low, 'Med': df_med, 'High': df_high}
df_data = pd.DataFrame()
df_data = pd.concat(data)
df_data
df_log = pd.DataFrame()
df_SVC = pd.DataFrame()
df_RF = pd.DataFrame()
df_MLP = pd.DataFrame()
deci=3
for i in (['LR','SVC', 'RF','MLP']):
df_unido = pd.DataFrame()
df_low = pd.DataFrame()
df_low['Thresholds'] = literal_eval(df_data[i].Low.Thresholds)
df_low['Precisions'] = literal_eval(df_data[i].Low.Precisions)
df_low['Recalls'] = literal_eval(df_data[i].Low.Recalls)
df_low['Resolution'] = 'Low'
df_low['False Positives Rate'] = pd.Series(literal_eval(df_data[i].Low.False_Positives_Rate))
df_low['True Positive Rate'] = pd.Series(literal_eval(df_data[i].Low.True_Positive_Rate))
df_low['Processing Time'] = df_data[i].Low.Processing_Time
df_low['ROC AUC'] = df_data[i].Low.ROC_AUC
df_low['PR AUC'] = df_data[i].Low.PR_AUC
df_med = pd.DataFrame()
df_med['Thresholds'] = literal_eval(df_data[i].Med.Thresholds)
df_med['Precisions'] = literal_eval(df_data[i].Med.Precisions)
df_med['Recalls'] = literal_eval(df_data[i].Med.Recalls)
df_med['Resolution'] = 'Med'
df_med['False Positives Rate'] = pd.Series(literal_eval(df_data[i].Med.False_Positives_Rate))
df_med['True Positive Rate'] = pd.Series(literal_eval(df_data[i].Med.True_Positive_Rate))
df_med['Processing Time'] = df_data[i].Med.Processing_Time
df_med['ROC AUC'] = df_data[i].Med.ROC_AUC
df_med['PR AUC'] = df_data[i].Med.PR_AUC
df_unido = df_low.append(df_med, ignore_index=True)
df_high = pd.DataFrame()
df_high['Thresholds'] = literal_eval(df_data[i].High.Thresholds)
df_high['Precisions'] = literal_eval(df_data[i].High.Precisions)
df_high['Recalls'] = literal_eval(df_data[i].High.Recalls)
df_high['Resolution'] = 'High'
df_high['False Positives Rate'] = pd.Series(literal_eval(df_data[i].High.False_Positives_Rate))
df_high['True Positive Rate'] = pd.Series(literal_eval(df_data[i].High.True_Positive_Rate))
df_high['Processing Time'] = df_data[i].High.Processing_Time
df_high['ROC AUC'] = df_data[i].High.ROC_AUC
df_high['PR AUC'] = df_data[i].High.PR_AUC
df_unido = df_unido.append(df_high, ignore_index=True)
df_unido['Model'] = i
if i == 'LR':
df_log = df_unido
elif i == 'SVC':
df_SVC = df_unido
elif i == 'RF':
df_RF = df_unido
elif i == 'MLP':
df_MLP = df_unido
df_data_2 = df_log.append(df_SVC, ignore_index=True)
df_data_2 = df_data_2.append(df_RF, ignore_index=True)
df_data_2 = df_data_2.append(df_MLP, ignore_index=True)
df_data_2['PR AUC'] = df_data_2['PR AUC'].astype(float)
df_data_2['Processing Time'] = df_data_2['Processing Time'].astype(float)
df_data_2['ROC AUC'] = df_data_2['ROC AUC'].astype(float)
df_data_2_p = df_data_2[['Thresholds','Precisions','Resolution','Model', 'Processing Time']]
df_data_2_p.rename(columns={'Precisions':'Score'}, inplace=True)
df_data_2_p['Score Type'] = 'Precision'
df_data_2_r = df_data_2[['Thresholds','Recalls','Resolution','Model', 'Processing Time']]
df_data_2_r.rename(columns={'Recalls':'Score'}, inplace=True)
df_data_2_r['Score Type'] = 'Recall'
df_data_3 = df_data_2_p.append(df_data_2_r, ignore_index=True)
```
# Data Visualization
## Area Under the Precision-Recall Curve Heat Map
```
size = (8, 7)
a4_dims = size
palette_3 = sns.color_palette("mako_r",3)
flatui = [ "#95a5a6", "#2ecc71", "#34495e", "#e74c3c"]
palette_4 = sns.color_palette(flatui)
#sns.palplot(sns.color_palette(flatui))
df_plot = df_data_2.pivot_table(index='Model',
columns='Resolution',
values='PR AUC')
reorderlist = ['MLP', 'RF', 'SVC', 'LR']
df_plot = df_plot.reindex(reorderlist)
custom_dict = {'Low':0,'Med':1,'High':2}
df_plot = pd.DataFrame(df_plot, columns=sorted(custom_dict, key=custom_dict.get))
ax = sns.heatmap(df_plot, annot=True, fmt='.3g', cmap="YlGnBu",
cbar_kws={'label': 'Area Under the Precision-Recall Curve'})
ax.set(xlabel='Resolution', ylabel='Model')
ax.figure.set_size_inches(size)
plt.savefig('./results/graphics/HM_PR.pdf', format='pdf', bbox_inches='tight')
```
## Processing Time Training Heat Map
```
df_plot = df_data_2.pivot_table(index='Model',
columns='Resolution',
values='Processing Time')
reorderlist = ['MLP', 'RF', 'SVC', 'LR']
df_plot = df_plot.reindex(reorderlist)
custom_dict = {'Low':0,'Med':1,'High':2}
df_plot = pd.DataFrame(df_plot, columns=sorted(custom_dict, key=custom_dict.get))
ax = sns.heatmap(df_plot, annot=True, fmt='.3g', cmap="YlGnBu",
cbar_kws={'label': 'Processing Time Training ($s$)'})
ax.set(xlabel='Resolution', ylabel='Model')
ax.figure.set_size_inches(size)
plt.savefig('./results/graphics/HM_PT.pdf', format='pdf', bbox_inches='tight')
```
# Thresholds VS Score
## Logistic Regression
```
df_plot = df_data_3[df_data_3['Model'].isin(['LR'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type",
palette=palette_3, legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-2.5,3.5)
plt.savefig('./results/graphics/ST_LR.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Support Vector Machine
```
df_plot = df_data_3[df_data_3['Model'].isin(['SVC'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type", palette=palette_3,
legend="full", data=df_plot)
ax.set_xlim(-1,4)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.savefig('./results/graphics/ST_SVC.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Random Forest
```
df_plot = df_data_3[df_data_3['Model'].isin(['RF'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.savefig('./results/graphics/ST_RF.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## MultiLayer Perceptron
```
df_plot = df_data_3[df_data_3['Model'].isin(['MLP'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type", palette=palette_3,
legend="full", data= df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.savefig('./results/graphics/ST_MLP.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
# Precision Vs Recall
## All Models and all Resolutions
```
df_plot = df_data_2
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", style="Resolution",
palette=palette_4, legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.7,1.01)
plt.savefig('./results/graphics/PR.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Logistic Regression
```
df_plot = df_data_2[df_data_2['Model']=='LR']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution",
palette=palette_3, legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.05)
plt.savefig('./results/graphics/PR_LR.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Support Vector Machine
```
df_plot = df_data_2[df_data_2['Model']=='SVC']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.2,1.05)
plt.savefig('./results/graphics/PR_SVC.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Random Forest
```
df_plot = df_data_2[df_data_2['Model']=='RF']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.05)
plt.savefig('./results/graphics/PR_RF.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## MultiLayer Perceptron
```
df_plot = df_data_2[df_data_2['Model']=='MLP']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.7,1.02)
plt.savefig('./results/graphics/PR_MLP.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
# ROC Curve
## All Models and all Resolutions
```
palette = sns.color_palette(flatui)
df_plot = df_data_2
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
style="Resolution", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(-0.02,0.3)
ax.set_ylim(0.5,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Logistic Regression
```
palette = sns.color_palette("mako_r",3)
df_plot = df_data_2[df_data_2['Model']=='LR']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_LR.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Support Vector Machine
```
df_plot = df_data_2[df_data_2['Model']=='SVC']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_SVC.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## Random Forest
```
df_plot = df_data_2[df_data_2['Model']=='RF']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_RF.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## MultiLayer Perceptron
```
df_plot = df_data_2[df_data_2['Model']=='MLP']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_MLP.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
# Precision and Recall vs Thresholds
## All Models Low Resolution
```
palette = sns.color_palette("mako_r",4)
df_plot = df_data_3[df_data_3['Resolution']=='Low']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Model", style="Score Type", palette=palette_4,
data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-1,4)
plt.savefig('./results/graphics/ST_LOW.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## All Models Medium Resolution
```
df_plot = df_data_3[df_data_3['Resolution']=='Med']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Model", style="Score Type", palette=palette_4,
data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-1,4)
plt.savefig('./results/graphics/ST_MED.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## All Models High Resolution
```
df_plot = df_data_3[df_data_3['Resolution']=='High']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Model", style="Score Type", palette=palette_4,
data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-1,4)
plt.savefig('./results/graphics/ST_HIGH.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
# Precision VS Recalls
## All Models Low Resolution
```
df_plot = df_data_2[df_data_2['Resolution']=='Low']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", palette=palette_4,
legend="full", sizes=(1,4), data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.02)
plt.savefig('./results/graphics/PR_LOW.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## All Models Medium Resolution
```
df_plot = df_data_2[df_data_2['Resolution']=='Med']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", palette=palette_4,
legend="full", sizes=(1,4), data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.3,1.02)
plt.savefig('./results/graphics/PR_MED.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## All Models High Resolution
```
df_plot = df_data_2[df_data_2['Resolution']=='High']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", palette=palette_4,
legend="full", sizes=(1,4), data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.02)
plt.savefig('./results/graphics/PR_HIGH.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
# ROC Curve and Area Under Curve
## All Models Low Resolution
```
df_plot = df_data_2[df_data_2['Resolution']=='Low']
df_plot['ROC AUC'] = np.round_(df_plot['ROC AUC'],decimals=3)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
size="ROC AUC", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_LOW.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## All Models Medium Resolution
```
df_plot = df_data_2[df_data_2['Resolution']=='Med']
df_plot['ROC AUC'] = np.round_(df_plot['ROC AUC'],decimals=3)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
size="ROC AUC", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_MED.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
## All Models High Resolution
```
df_plot = df_data_2[df_data_2['Resolution']=='High']
df_plot['ROC AUC'] = np.round_(df_plot['ROC AUC'],decimals=3)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
size="ROC AUC", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_HIGH.pdf', format='pdf', bbox_inches='tight')
plt.show()
```
|
github_jupyter
|
"""
Visualize and analyze data.
@author: Juan Felipe Latorre Gil - [email protected]
"""
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
from ast import literal_eval
import matplotlib.pyplot as plt
plt.style.use(['ggplot','seaborn-paper'])
path_low = './results/df_low.csv'
path_med = './results//df_med.csv'
path_high = './results/df_high.csv'
df_low = pd.read_csv(path_low, index_col= 'Unnamed: 0')
df_med = pd.read_csv(path_med, index_col= 'Unnamed: 0')
df_high = pd.read_csv(path_high, index_col= 'Unnamed: 0')
data = {'Low': df_low, 'Med': df_med, 'High': df_high}
df_data = pd.DataFrame()
df_data = pd.concat(data)
df_data
df_log = pd.DataFrame()
df_SVC = pd.DataFrame()
df_RF = pd.DataFrame()
df_MLP = pd.DataFrame()
deci=3
for i in (['LR','SVC', 'RF','MLP']):
df_unido = pd.DataFrame()
df_low = pd.DataFrame()
df_low['Thresholds'] = literal_eval(df_data[i].Low.Thresholds)
df_low['Precisions'] = literal_eval(df_data[i].Low.Precisions)
df_low['Recalls'] = literal_eval(df_data[i].Low.Recalls)
df_low['Resolution'] = 'Low'
df_low['False Positives Rate'] = pd.Series(literal_eval(df_data[i].Low.False_Positives_Rate))
df_low['True Positive Rate'] = pd.Series(literal_eval(df_data[i].Low.True_Positive_Rate))
df_low['Processing Time'] = df_data[i].Low.Processing_Time
df_low['ROC AUC'] = df_data[i].Low.ROC_AUC
df_low['PR AUC'] = df_data[i].Low.PR_AUC
df_med = pd.DataFrame()
df_med['Thresholds'] = literal_eval(df_data[i].Med.Thresholds)
df_med['Precisions'] = literal_eval(df_data[i].Med.Precisions)
df_med['Recalls'] = literal_eval(df_data[i].Med.Recalls)
df_med['Resolution'] = 'Med'
df_med['False Positives Rate'] = pd.Series(literal_eval(df_data[i].Med.False_Positives_Rate))
df_med['True Positive Rate'] = pd.Series(literal_eval(df_data[i].Med.True_Positive_Rate))
df_med['Processing Time'] = df_data[i].Med.Processing_Time
df_med['ROC AUC'] = df_data[i].Med.ROC_AUC
df_med['PR AUC'] = df_data[i].Med.PR_AUC
df_unido = df_low.append(df_med, ignore_index=True)
df_high = pd.DataFrame()
df_high['Thresholds'] = literal_eval(df_data[i].High.Thresholds)
df_high['Precisions'] = literal_eval(df_data[i].High.Precisions)
df_high['Recalls'] = literal_eval(df_data[i].High.Recalls)
df_high['Resolution'] = 'High'
df_high['False Positives Rate'] = pd.Series(literal_eval(df_data[i].High.False_Positives_Rate))
df_high['True Positive Rate'] = pd.Series(literal_eval(df_data[i].High.True_Positive_Rate))
df_high['Processing Time'] = df_data[i].High.Processing_Time
df_high['ROC AUC'] = df_data[i].High.ROC_AUC
df_high['PR AUC'] = df_data[i].High.PR_AUC
df_unido = df_unido.append(df_high, ignore_index=True)
df_unido['Model'] = i
if i == 'LR':
df_log = df_unido
elif i == 'SVC':
df_SVC = df_unido
elif i == 'RF':
df_RF = df_unido
elif i == 'MLP':
df_MLP = df_unido
df_data_2 = df_log.append(df_SVC, ignore_index=True)
df_data_2 = df_data_2.append(df_RF, ignore_index=True)
df_data_2 = df_data_2.append(df_MLP, ignore_index=True)
df_data_2['PR AUC'] = df_data_2['PR AUC'].astype(float)
df_data_2['Processing Time'] = df_data_2['Processing Time'].astype(float)
df_data_2['ROC AUC'] = df_data_2['ROC AUC'].astype(float)
df_data_2_p = df_data_2[['Thresholds','Precisions','Resolution','Model', 'Processing Time']]
df_data_2_p.rename(columns={'Precisions':'Score'}, inplace=True)
df_data_2_p['Score Type'] = 'Precision'
df_data_2_r = df_data_2[['Thresholds','Recalls','Resolution','Model', 'Processing Time']]
df_data_2_r.rename(columns={'Recalls':'Score'}, inplace=True)
df_data_2_r['Score Type'] = 'Recall'
df_data_3 = df_data_2_p.append(df_data_2_r, ignore_index=True)
size = (8, 7)
a4_dims = size
palette_3 = sns.color_palette("mako_r",3)
flatui = [ "#95a5a6", "#2ecc71", "#34495e", "#e74c3c"]
palette_4 = sns.color_palette(flatui)
#sns.palplot(sns.color_palette(flatui))
df_plot = df_data_2.pivot_table(index='Model',
columns='Resolution',
values='PR AUC')
reorderlist = ['MLP', 'RF', 'SVC', 'LR']
df_plot = df_plot.reindex(reorderlist)
custom_dict = {'Low':0,'Med':1,'High':2}
df_plot = pd.DataFrame(df_plot, columns=sorted(custom_dict, key=custom_dict.get))
ax = sns.heatmap(df_plot, annot=True, fmt='.3g', cmap="YlGnBu",
cbar_kws={'label': 'Area Under the Precision-Recall Curve'})
ax.set(xlabel='Resolution', ylabel='Model')
ax.figure.set_size_inches(size)
plt.savefig('./results/graphics/HM_PR.pdf', format='pdf', bbox_inches='tight')
df_plot = df_data_2.pivot_table(index='Model',
columns='Resolution',
values='Processing Time')
reorderlist = ['MLP', 'RF', 'SVC', 'LR']
df_plot = df_plot.reindex(reorderlist)
custom_dict = {'Low':0,'Med':1,'High':2}
df_plot = pd.DataFrame(df_plot, columns=sorted(custom_dict, key=custom_dict.get))
ax = sns.heatmap(df_plot, annot=True, fmt='.3g', cmap="YlGnBu",
cbar_kws={'label': 'Processing Time Training ($s$)'})
ax.set(xlabel='Resolution', ylabel='Model')
ax.figure.set_size_inches(size)
plt.savefig('./results/graphics/HM_PT.pdf', format='pdf', bbox_inches='tight')
df_plot = df_data_3[df_data_3['Model'].isin(['LR'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type",
palette=palette_3, legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-2.5,3.5)
plt.savefig('./results/graphics/ST_LR.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_3[df_data_3['Model'].isin(['SVC'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type", palette=palette_3,
legend="full", data=df_plot)
ax.set_xlim(-1,4)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.savefig('./results/graphics/ST_SVC.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_3[df_data_3['Model'].isin(['RF'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.savefig('./results/graphics/ST_RF.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_3[df_data_3['Model'].isin(['MLP'])]
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Resolution", style="Score Type", palette=palette_3,
legend="full", data= df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.savefig('./results/graphics/ST_MLP.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", style="Resolution",
palette=palette_4, legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.7,1.01)
plt.savefig('./results/graphics/PR.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Model']=='LR']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution",
palette=palette_3, legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.05)
plt.savefig('./results/graphics/PR_LR.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Model']=='SVC']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.2,1.05)
plt.savefig('./results/graphics/PR_SVC.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Model']=='RF']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.05)
plt.savefig('./results/graphics/PR_RF.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Model']=='MLP']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.7,1.02)
plt.savefig('./results/graphics/PR_MLP.pdf', format='pdf', bbox_inches='tight')
plt.show()
palette = sns.color_palette(flatui)
df_plot = df_data_2
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
style="Resolution", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(-0.02,0.3)
ax.set_ylim(0.5,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC.pdf', format='pdf', bbox_inches='tight')
plt.show()
palette = sns.color_palette("mako_r",3)
df_plot = df_data_2[df_data_2['Model']=='LR']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_LR.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Model']=='SVC']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_SVC.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Model']=='RF']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_RF.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Model']=='MLP']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Resolution", palette=palette_3,
legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_MLP.pdf', format='pdf', bbox_inches='tight')
plt.show()
palette = sns.color_palette("mako_r",4)
df_plot = df_data_3[df_data_3['Resolution']=='Low']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Model", style="Score Type", palette=palette_4,
data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-1,4)
plt.savefig('./results/graphics/ST_LOW.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_3[df_data_3['Resolution']=='Med']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Model", style="Score Type", palette=palette_4,
data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-1,4)
plt.savefig('./results/graphics/ST_MED.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_3[df_data_3['Resolution']=='High']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Thresholds", y="Score", hue="Model", style="Score Type", palette=palette_4,
data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
ax.set_xlim(-1,4)
plt.savefig('./results/graphics/ST_HIGH.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Resolution']=='Low']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", palette=palette_4,
legend="full", sizes=(1,4), data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.02)
plt.savefig('./results/graphics/PR_LOW.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Resolution']=='Med']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", palette=palette_4,
legend="full", sizes=(1,4), data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.3,1.02)
plt.savefig('./results/graphics/PR_MED.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Resolution']=='High']
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="Recalls", y="Precisions", hue="Model", palette=palette_4,
legend="full", sizes=(1,4), data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
ax.set_xlim(0.4,1.02)
plt.savefig('./results/graphics/PR_HIGH.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Resolution']=='Low']
df_plot['ROC AUC'] = np.round_(df_plot['ROC AUC'],decimals=3)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
size="ROC AUC", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_LOW.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Resolution']=='Med']
df_plot['ROC AUC'] = np.round_(df_plot['ROC AUC'],decimals=3)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
size="ROC AUC", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_MED.pdf', format='pdf', bbox_inches='tight')
plt.show()
df_plot = df_data_2[df_data_2['Resolution']=='High']
df_plot['ROC AUC'] = np.round_(df_plot['ROC AUC'],decimals=3)
fig, ax = plt.subplots(figsize=a4_dims)
ax = sns.lineplot(x="False Positives Rate", y="True Positive Rate", hue="Model", palette=palette_4,
size="ROC AUC", legend="full", data=df_plot)
#plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.45))
#ax.set_xlim(0.7,1.02)
plt.plot([0,1],[0,1], 'k--')
plt.savefig('./results/graphics/ROC_HIGH.pdf', format='pdf', bbox_inches='tight')
plt.show()
| 0.47317 | 0.803521 |
Copyright Jana Schaich Borg/Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
# MySQL Exercise 11: Queries that Test Relationships Between Test Completion and Dog Characteristics
This lesson we are going to integrate all the SQL syntax we've learned so far to start addressing questions in our Dognition Analysis Plan. I summarized the reasons having an analysis plan is so important in the "Start with an Analysis Plan" video accompanying this week's materials. Analysis plans ensure that you will address questions that are relevant to your business objectives as quickly and efficiently as possible. The quickest way to narrow in the factors in your analysis plan that are likely to create new insights is to combine simple SQL calculations with visualization programs, like Tableau, to identify which factors under consideration have the strongest effects on the business metric you are tasked with improving. You can then design more nuanced statistical models in other software, such as R, based on the factors you have confirmed are likely to be important for understanding and changing your business metric.
<img src="https://duke.box.com/shared/static/davndrvd4jb1awwuq6sd1rgt0ck4o8nm.jpg" width=400 alt="SELECT FROM WHERE ORDER BY" />
I describe a method for designing analysis plans in the Data Visualization and Communication with Tableau course earlier in this Specialization. I call that method Structured Pyramid Analysis Plans, or "sPAPs". I have provided a skeleton of an sPAP for the Dognition data set with the materials for this course that I will use as a road map for the queries we will design and practice in the next two lessons. To orient you, the SMART goal of the analysis project is at the top of the pyramid. This is a specific, measurable, attainable, relevant, and time-bound version of the general project objective, which is to make a recommendation to Dognition about what they could do to increase the number of tests customers complete. The variables you will use to assess the goal should be filled out right under where the SMART goal is written. Then under those variables, you will see ever-widening layers of categories and sub-categories of issues that will be important to analyze in order to achieve your SMART goal.
In this lesson, we will write queries to address the issues in the left-most branch of the sPAP. These issues all relate to "Features of Dogs" that could potentially influence the number of tests the dogs will ultimately complete. We will spend a lot of time discussing and practicing how to translate analysis questions described in words into queries written in SQL syntax.
To begin, load the sql library and database, and make the Dognition database your default database:
```
%load_ext sql
%sql mysql://studentuser:studentpw@localhost/dognitiondb
%sql USE dognitiondb
```
<img src="https://duke.box.com/shared/static/p2eucjdttai08eeo7davbpfgqi3zrew0.jpg" width=600 alt="SELECT FROM WHERE" />
## 1. Assess whether Dognition personality dimensions are related to the number of tests completed
The first variable in the Dognition sPAP we want to investigate is Dognition personality dimensions. Recall from the "Meet Your Dognition Data" video and the written description of the Dognition Data Set included with the Week 2 materials that Dognition personality dimensions represent distinct combinations of characteristics assessed by the Dognition tests. It is certainly plausible that certain personalities of dogs might be more or less likely to complete tests. For example, "einstein" dogs might be particularly likely to complete a lot of tests.
To test the relationship between Dognition personality dimensions and test completion totals, we need a query that will output a summary of the number of tests completed by dogs that have each of the Dognition personality dimensions. The features you will need to include in your query are foreshadowed by key words in this sentence. First, the fact that you need a summary of the number of tests completed suggests you will need an aggregation function. Next, the fact that you want a different summary for each personality dimension suggests that you will need a GROUP BY clause. Third, the fact that you need a "summary of the number of tests completed" rather than just a "summary of the tests completed" suggests that you might have to have multiple stages of aggegrations, which in turn might mean that you will need to use a subquery.
Let's build the query step by step.
**Question 1: To get a feeling for what kind of values exist in the Dognition personality dimension column, write a query that will output all of the distinct values in the dimension column. Use your relational schema or the course materials to determine what table the dimension column is in. Your output should have 11 rows.**
```
%%sql
SELECT DISTINCT dimension
FROM dogs;
```
The results of the query above illustrate there are NULL values (indicated by the output value "none") in the dimension column. Keep that in mind in case it is relevant to future queries.
We want a summary of the total number of tests completed by dogs with each personality dimension. In order to calculate those summaries, we first need to calculate the total number of tests completed by each dog. We can achieve this using a subquery. The subquery will require data from both the dogs and the complete_tests table, so the subquery will need to include a join. We are only interested in dogs who have completed tests, so an inner join is appropriate in this case.
**Question 2: Use the equijoin syntax (described in MySQL Exercise 8) to write a query that will output the Dognition personality dimension and total number of tests completed by each unique DogID. This query will be used as an inner subquery in the next question. LIMIT your output to 100 rows for troubleshooting purposes.**
```
%%sql
SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at)
FROM dogs d, complete_tests c
WHERE d.dog_guid=c.dog_guid
GROUP BY d.dog_guid
LIMIT 100;
```
**Question 3: Re-write the query in Question 2 using traditional join syntax (described in MySQL Exercise 8).**
```
%%sql
SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid
LIMIT 100;
```
Now we need to summarize the total number of tests completed by each unique DogID within each Dognition personality dimension. To do this we will need to choose an appropriate aggregation function for the count column of the query we just wrote.
**Question 4: To start, write a query that will output the average number of tests completed by unique dogs in each Dognition personality dimension. Choose either the query in Question 2 or 3 to serve as an inner query in your main query. If you have trouble, make sure you use the appropriate aliases in your GROUP BY and SELECT statements.**
```
%%sql
SELECT sub.dimension, AVG(complete_tests)
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid) AS sub
GROUP BY sub.dimension;
```
You should retrieve an output of 11 rows with one of the dimensions labeled "None" and another labeled "" (nothing is between the quotation marks).
**Question 5: How many unique DogIDs are summarized in the Dognition dimensions labeled "None" or ""? (You should retrieve values of 13,705 and 71)**
```
%%sql
SELECT COUNT(sub.dog_guid),
CASE
WHEN sub.dimension IS NULL THEN 'Null'
WHEN sub.dimension="" THEN 'StrNull'
ELSE 'Else'
END AS CaseSum
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid) AS sub
GROUP BY CaseSum;
```
It makes sense there would be many dogs with NULL values in the dimension column, because we learned from Dognition that personality dimensions can only be assigned after the initial "Dognition Assessment" is completed, which is comprised of the first 20 Dognition tests. If dogs did not complete the first 20 tests, they would retain a NULL value in the dimension column.
The non-NULL empty string values are more curious. It is not clear where those values would come from.
**Question 6: To determine whether there are any features that are common to all dogs that have non-NULL empty strings in the dimension column, write a query that outputs the breed, weight, value in the "exclude" column, first or minimum time stamp in the complete_tests table, last or maximum time stamp in the complete_tests table, and total number of tests completed by each unique DogID that has a non-NULL empty string in the dimension column.**
```
%%sql
SELECT d.dog_guid,d.breed,d.weight,d.exclude,
MIN(c.created_at),MAX(c.created_at),COUNT(c.created_at)
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.dimension=""
GROUP BY d.dog_guid;
```
A quick inspection of the output from the last query illustrates that almost all of the entries that have non-NULL empty strings in the dimension column also have "exclude" flags of 1, meaning that the entries are meant to be excluded due to factors monitored by the Dognition team. This provides a good argument for excluding the entire category of entries that have non-NULL empty strings in the dimension column from our analyses.
**Question 7: Rewrite the query in Question 4 to exclude DogIDs with (1) non-NULL empty strings in the dimension column, (2) NULL values in the dimension column, and (3) values of "1" in the exclude column. NOTES AND HINTS: You cannot use a clause that says d.exclude does not equal 1 to remove rows that have exclude flags, because Dognition clarified that both NULL values and 0 values in the "exclude" column are valid data. A clause that says you should only include values that are not equal to 1 would remove the rows that have NULL values in the exclude column, because NULL values are never included in equals statements (as we learned in the join lessons). In addition, although it should not matter for this query, practice including parentheses with your OR and AND statements that accurately reflect the logic you intend. Your results should return 402 DogIDs in the ace dimension and 626 dogs in the charmer dimension.**
```
%%sql
SELECT sub.dimension, COUNT(sub.dog_guid), AVG(complete_tests)
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.dimension IS NOT NULL AND
d.dimension <> "" AND (d.exclude IS NULL OR
d.exclude=0)
GROUP BY d.dog_guid) AS sub
GROUP BY sub.dimension;
```
The results of Question 7 suggest there are not appreciable differences in the number of tests completed by dogs with different Dognition personality dimensions. Although these analyses are not definitive on their own, these results suggest focusing on Dognition personality dimensions will not likely lead to significant insights about how to improve Dognition completion rates.
## 2. Assess whether dog breeds are related to the number of tests completed
The next variable in the Dognition sPAP we want to investigate is Dog Breed. We will run one analysis with Breed Group and one analysis with Breed Type.
First, determine how many distinct breed groups there are.
**Questions 8: Write a query that will output all of the distinct values in the breed_group field.**
```
%%sql
SELECT DISTINCT d.breed_group
FROM dogs d;
```
You can see that there are NULL values in the breed_group field. Let's examine the properties of these entries with NULL values to determine whether they should be excluded from our analysis.
**Question 9: Write a query that outputs the breed, weight, value in the "exclude" column, first or minimum time stamp in the complete_tests table, last or maximum time stamp in the complete_tests table, and total number of tests completed by each unique DogID that has a NULL value in the breed_group column.**
```
%%sql
SELECT d.breed,d.weight,d.exclude,
MIN(c.created_at),MAX(c.created_at),COUNT(c.created_at)
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.breed_group IS NULL
GROUP BY d.dog_guid;
```
There are a lot of these entries and there is no obvious feature that is common to all of them, so at present, we do not have a good reason to exclude them from our analysis. Therefore, let's move on to question 10 now....
**Question 10: Adapt the query in Question 7 to examine the relationship between breed_group and number of tests completed. Exclude DogIDs with values of "1" in the exclude column. Your results should return 1774 DogIDs in the Herding breed group.**
```
%%sql
SELECT sub.breed_group, COUNT( DISTINCT sub.dog_guid), AVG(complete_tests)
FROM (SELECT d.dog_guid,d.breed_group,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.breed_group;
```
The results show there are non-NULL entries of empty strings in breed_group column again. Ignoring them for now, Herding and Sporting breed_groups complete the most tests, while Toy breed groups complete the least tests. This suggests that one avenue an analyst might want to explore further is whether it is worth it to target marketing or certain types of Dognition tests to dog owners with dogs in the Herding and Sporting breed_groups. Later in this lesson we will discuss whether using a median instead of an average to summarize the number of completed tests might affect this potential course of action.
**Question 11: Adapt the query in Question 10 to only report results for Sporting, Hound, Herding, and Working breed_groups using an IN clause.**
```
%%sql
SELECT sub.breed_group, COUNT( DISTINCT sub.dog_guid), AVG(complete_tests)
FROM (SELECT d.dog_guid,d.breed_group,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE (d.exclude IS NULL OR d.exclude=0)
AND d.breed_group IN ('Sporting','Hound','Herding','Working')
GROUP BY d.dog_guid) AS sub
GROUP BY sub.breed_group;
```
Next, let's examine the relationship between breed_type and number of completed tests.
**Questions 12: Begin by writing a query that will output all of the distinct values in the breed_type field.**
```
%%sql
SELECT DISTINCT d.breed_type
FROM dogs d
```
**Question 13: Adapt the query in Question 7 to examine the relationship between breed_type and number of tests completed. Exclude DogIDs with values of "1" in the exclude column. Your results should return 8865 DogIDs in the Pure Breed group.**
```
%%sql
SELECT sub.breed_type, COUNT( DISTINCT sub.dog_guid),
sub.complete_tests,AVG(sub.complete_tests)
FROM (SELECT d.dog_guid,d.breed_type,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.breed_type;
```
There does not appear to be an appreciable difference between number of tests completed by dogs of different breed types.
## 3. Assess whether dog breeds and neutering are related to the number of tests completed
To explore the results we found above a little further, let's run some queries that relabel the breed_types according to "Pure_Breed" and "Not_Pure_Breed".
**Question 14: For each unique DogID, output its dog_guid, breed_type, number of completed tests, and use a CASE statement to include an extra column with a string that reads "Pure_Breed" whenever breed_type equals 'Pure Breed" and "Not_Pure_Breed" whenever breed_type equals anything else. LIMIT your output to 50 rows for troubleshooting.**
```
%%sql
SELECT d.dog_guid,d.breed_type,COUNT(c.created_at),
CASE d.breed_type
WHEN 'Pure Breed' THEN 'Pure_Breed'
ELSE 'Not_Pure_Breed'
END AS CasePure
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid
LIMIT 50;
```
**Question 15: Adapt your queries from Questions 7 and 14 to examine the relationship between breed_type and number of tests completed by Pure_Breed dogs and non_Pure_Breed dogs. Your results should return 8336 DogIDs in the Not_Pure_Breed group.**
```
%%sql
SELECT sub.groupie, COUNT(DISTINCT sub.dog_guid) AS NumDogID,
AVG(TestsCompleted) AS AvgTestsCompleted
FROM (SELECT d.dog_guid,d.breed_type,
IF(d.breed_type='Pure Breed','Pure_Breed','Not_Pure_Breed')
AS groupie, COUNT(c.created_at) AS TestsCompleted
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.groupie;
```
**Question 16: Adapt your query from Question 15 to examine the relationship between breed_type, whether or not a dog was neutered (indicated in the dog_fixed field), and number of tests completed by Pure_Breed dogs and non_Pure_Breed dogs. There are DogIDs with null values in the dog_fixed column, so your results should have 6 rows, and the average number of tests completed by non-pure-breeds who are neutered is 10.5681.**
```
%%sql
SELECT sub.groupie, sub.dog_fixed, COUNT(DISTINCT sub.dog_guid) AS DogCount,
AVG(sub.TestsCompleted) AS AvgTestsCompleted
FROM (SELECT d.dog_guid,d.dog_fixed,d.breed_type,
IF(d.breed_type='Pure Breed','Pure_Breed','Not_Pure_Breed')
AS groupie, COUNT(c.created_at) AS TestsCompleted
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.groupie, sub.dog_fixed;
```
These results suggest that although a dog's breed_type doesn't seem to have a strong relationship with how many tests a dog completed, neutered dogs, on average, seem to finish 1-2 more tests than non-neutered dogs. It may be fruitful to explore further whether this effect is consistent across different segments of dogs broken up according to other variables. If the effects are consistent, the next step would be to seek evidence that could clarify whether neutered dogs are finishing more tests due to traits that arise when a dog is neutered, or instead, whether owners who are more likely to neuter their dogs have traits that make it more likely they will want to complete more tests.
## 4. Other dog features that might be related to the number of tests completed, and a note about using averages as summary metrics
Two other dog features included in our sPAP were speed of game completion and previous behavioral training. Examing the relationship between the speed of game completion and number of games completed is best achieved through creating a scatter plot with a best fit line and/or running a statistical regression analysis. It is possible to achieve the statistical regression analysis through very advanced SQL queries, but the strategy that would be required is outside the scope of this course. Therefore, I would recommend exporting relevant data to a program like Tableau, R, or Matlab in order to assess the relationship between the speed of game completion and number of games completed.
Unfortunately, there is no field available in the Dognition data that is relevant to a dog's previous behavioral training, so more data would need to be collected to examine whether previous behavioral training is related to the number of Dognition tests completed.
One last issue I would like to address in this lesson is the issue of whether an average is a good summary to use to represent the values of a certain group. Average calculations are very sensitive to extreme values, or outliers, in the data. This video provides a nice demonstration of how sensitive averages can be:
http://www.statisticslectures.com/topics/outliereffects/
Ideally, you would summarize the data in a group using a median calculation when you either don't know the distribution of values in your data or you already know that outliers are present (the definition of median is covered in the video above). Unfortunately, medians are more computationally intensive than averages, and there is no pre-made function that allows you to calculate medians using SQL. If you wanted to calculate the median, you would need to use an advanced strategy such as the ones described here:
https://www.periscopedata.com/blog/medians-in-sql.html
Despite the fact there is no simple way to calculate medians using SQL, there is a way to get a hint about whether average values are likely to be wildly misleading. As described in the first video (http://www.statisticslectures.com/topics/outliereffects/), strong outliers lead to large standard deviation values. Fortunately, we *CAN* calculate standard deviations in SQL easily using the STDDEV function. Therefore, it is good practice to include standard deviation columns with your outputs so that you have an idea whether the average values outputted by your queries are trustworthy. Whenever standard deviations are a significant portion of the average values of a field, and certainly when standard deviations are larger than the average values of a field, it's a good idea to export your data to a program that can handle more sophisticated statistical analyses before you interpret any results too strongly.
Let's practice including standard deviations in our queries and interpretting their values.
**Question 17: Adapt your query from Question 7 to include a column with the standard deviation for the number of tests completed by each Dognition personality dimension.**
```
%%sql
SELECT sub.dimension, COUNT(sub.dog_guid), AVG(complete_tests),
STDDEV(complete_tests) AS stdev
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.dimension IS NOT NULL AND
d.dimension <> "" AND (d.exclude IS NULL OR
d.exclude=0)
GROUP BY d.dog_guid) AS sub
GROUP BY sub.dimension;
```
The standard deviations are all around 20-25% of the average values of each personality dimension, and they are not appreciably different across the personality dimensions, so the average values are likely fairly trustworthy. Let's try calculating the standard deviation of a different measurement.
**Question 18: Write a query that calculates the average amount of time it took each dog breed_type to complete all of the tests in the exam_answers table. Exclude negative durations from the calculation, and include a column that calculates the standard deviation of durations for each breed_type group:**
```
%%sql
SELECT d.breed_type, AVG(TIMESTAMPDIFF(minute,e.start_time,e.end_time))
AS AveTime, STDDEV(TIMESTAMPDIFF(minute,e.start_time,e.end_time))
AS StdevTime FROM dogs d JOIN exam_answers e
ON d.dog_guid=e.dog_guid WHERE
TIMESTAMPDIFF(minute,e.start_time,e.end_time) > 0
GROUP BY d.breed_type;
```
This time many of the standard deviations have larger magnitudes than the average duration values. This suggests there are outliers in the data that are significantly impacting the reported average values, so the average values are not likely trustworthy. These data should be exported to another program for more sophisticated statistical analysis.
**In the next lesson, we will write queries that assess the relationship between testing circumstances and the number of tests completed. Until then, feel free to practice any additional queries you would like to below!**
|
github_jupyter
|
%load_ext sql
%sql mysql://studentuser:studentpw@localhost/dognitiondb
%sql USE dognitiondb
%%sql
SELECT DISTINCT dimension
FROM dogs;
%%sql
SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at)
FROM dogs d, complete_tests c
WHERE d.dog_guid=c.dog_guid
GROUP BY d.dog_guid
LIMIT 100;
%%sql
SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid
LIMIT 100;
%%sql
SELECT sub.dimension, AVG(complete_tests)
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid) AS sub
GROUP BY sub.dimension;
%%sql
SELECT COUNT(sub.dog_guid),
CASE
WHEN sub.dimension IS NULL THEN 'Null'
WHEN sub.dimension="" THEN 'StrNull'
ELSE 'Else'
END AS CaseSum
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid) AS sub
GROUP BY CaseSum;
%%sql
SELECT d.dog_guid,d.breed,d.weight,d.exclude,
MIN(c.created_at),MAX(c.created_at),COUNT(c.created_at)
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.dimension=""
GROUP BY d.dog_guid;
%%sql
SELECT sub.dimension, COUNT(sub.dog_guid), AVG(complete_tests)
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.dimension IS NOT NULL AND
d.dimension <> "" AND (d.exclude IS NULL OR
d.exclude=0)
GROUP BY d.dog_guid) AS sub
GROUP BY sub.dimension;
%%sql
SELECT DISTINCT d.breed_group
FROM dogs d;
%%sql
SELECT d.breed,d.weight,d.exclude,
MIN(c.created_at),MAX(c.created_at),COUNT(c.created_at)
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.breed_group IS NULL
GROUP BY d.dog_guid;
%%sql
SELECT sub.breed_group, COUNT( DISTINCT sub.dog_guid), AVG(complete_tests)
FROM (SELECT d.dog_guid,d.breed_group,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.breed_group;
%%sql
SELECT sub.breed_group, COUNT( DISTINCT sub.dog_guid), AVG(complete_tests)
FROM (SELECT d.dog_guid,d.breed_group,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE (d.exclude IS NULL OR d.exclude=0)
AND d.breed_group IN ('Sporting','Hound','Herding','Working')
GROUP BY d.dog_guid) AS sub
GROUP BY sub.breed_group;
%%sql
SELECT DISTINCT d.breed_type
FROM dogs d
%%sql
SELECT sub.breed_type, COUNT( DISTINCT sub.dog_guid),
sub.complete_tests,AVG(sub.complete_tests)
FROM (SELECT d.dog_guid,d.breed_type,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.breed_type;
%%sql
SELECT d.dog_guid,d.breed_type,COUNT(c.created_at),
CASE d.breed_type
WHEN 'Pure Breed' THEN 'Pure_Breed'
ELSE 'Not_Pure_Breed'
END AS CasePure
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
GROUP BY d.dog_guid
LIMIT 50;
%%sql
SELECT sub.groupie, COUNT(DISTINCT sub.dog_guid) AS NumDogID,
AVG(TestsCompleted) AS AvgTestsCompleted
FROM (SELECT d.dog_guid,d.breed_type,
IF(d.breed_type='Pure Breed','Pure_Breed','Not_Pure_Breed')
AS groupie, COUNT(c.created_at) AS TestsCompleted
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.groupie;
%%sql
SELECT sub.groupie, sub.dog_fixed, COUNT(DISTINCT sub.dog_guid) AS DogCount,
AVG(sub.TestsCompleted) AS AvgTestsCompleted
FROM (SELECT d.dog_guid,d.dog_fixed,d.breed_type,
IF(d.breed_type='Pure Breed','Pure_Breed','Not_Pure_Breed')
AS groupie, COUNT(c.created_at) AS TestsCompleted
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.exclude IS NULL OR d.exclude=0
GROUP BY d.dog_guid) AS sub
GROUP BY sub.groupie, sub.dog_fixed;
%%sql
SELECT sub.dimension, COUNT(sub.dog_guid), AVG(complete_tests),
STDDEV(complete_tests) AS stdev
FROM (SELECT DISTINCT d.dog_guid,d.dimension,
COUNT(c.created_at) AS complete_tests
FROM dogs d JOIN complete_tests c
ON d.dog_guid=c.dog_guid
WHERE d.dimension IS NOT NULL AND
d.dimension <> "" AND (d.exclude IS NULL OR
d.exclude=0)
GROUP BY d.dog_guid) AS sub
GROUP BY sub.dimension;
%%sql
SELECT d.breed_type, AVG(TIMESTAMPDIFF(minute,e.start_time,e.end_time))
AS AveTime, STDDEV(TIMESTAMPDIFF(minute,e.start_time,e.end_time))
AS StdevTime FROM dogs d JOIN exam_answers e
ON d.dog_guid=e.dog_guid WHERE
TIMESTAMPDIFF(minute,e.start_time,e.end_time) > 0
GROUP BY d.breed_type;
| 0.209227 | 0.99193 |
```
import sys
import os
import numpy as np
from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
sys.path.append("D:\Study\Python\Sleep-Analysis-with-accelerometer")
import prepare_data
sorce_path = os.path.join("D:\Study\Python\Sleep-Analysis-with-accelerometer",
"ICHI14_dataset\data")
data_path = os.path.join("D:\Study\Python\Sleep-Analysis-with-accelerometer",
"statistic_features", "stat_features_standardized.csv")
patient_list = ['002','003','005','007','08a','08b','09a','09b', '10a','011','013','014','15a','15b','016',
'017','018','019','020','021','022','023','025','026','027','028','029','030','031','032',
'033','034','035','036','037','038','040','042','043','044','045','047','048','049','051']
statistics_list = ["std_x", "std_y", "std_z"]
```
#### Saving the statistic features.
#### As scaler=True, firstly we standardize accelerometer data for all dataset, and then calculate statistic features for windows.
```
#prepare_data.save_statistic_features(patient_list, sorce_path=sorce_path, save_path=data_path,
# window_len=60, n_sleep_stages=1, scaler=True)
kf = KFold(n_splits=5, random_state=5, shuffle=True) # Define the split - into 5 folds #5
kf.get_n_splits(patient_list)
for train_index, test_index in kf.split(patient_list):
#train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
print(test_patient_list)
n_others_windows = 30
```
## 1. Logistic regression
### 1.1 Statistic features per window: STD for each of 3 axis
```
statistics_list_std = ["std_x", "std_y", "std_z"]
%%time
accuracy_list = []
f1_score_list = []
for train_index, test_index in kf.split(patient_list):
train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
X_train, y_train = prepare_data.load_stat_features_others_windows(train_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_std)
X_test, y_test = prepare_data.load_stat_features_others_windows(test_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_std)
weights = np.ones(y_train.shape)
weights[y_train==1] = 1.8
model1 = LogisticRegression()
model1.fit(X_train, y_train, sample_weight=weights)
print("\nTrain set result: ")
y_predict = model1.predict(X_train)
accuracy_train = metrics.accuracy_score(y_train, y_predict)
f1_train = metrics.f1_score(y_train, y_predict)
print("Accuracy on train set: ", accuracy_train)
print("F1-score on train set: ", f1_train)
print("\nTest set result: ")
y_predict = model1.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_predict)
f1_test = metrics.f1_score(y_test, y_predict)
accuracy_list.append(accuracy)
f1_score_list.append(f1_test)
print("Accuracy on test set: ", accuracy)
print("F1-score on test set: ", f1_test)
print(metrics.classification_report(y_test, y_predict, target_names=["sleep", "awake"]))
print("Confussion matrix: \n", metrics.confusion_matrix(y_test, y_predict))
print("\n-------------------------------------------------------")
print("\nMean accuracy =", np.mean(accuracy_list))
print("\nMean f1-score =", np.mean(f1_score_list))
```
#### 1.1 Results:
weight for awake class = 1:
30 windows: acc = 0.7487, f1-score = 0.6532
The problem is that we detect more sleep class than awake. So, the algorithm little overestimates the sleep. One of the reasons may be that we have little more windows for sleep class.
weight for awake class = 1.5:
30 windows: acc = 0.7415, f1-score = 0.6794
weight for awake class = 1.7:
30 windows: acc = 0.7342, f1-score = 0.6836
weight for awake class = 1.8:
30 windows: acc = 0.7296, f1-score = 0.6845
32 windows: acc = 0.7307, f1-score = 0.6840
weight for awake class = 1.9:
30 windows: acc = 0.7221, f1-score = 0.6826
weight for awake class = 2:
30 windows: acc = 0.7147, f1-score = 0.6810
To conclude, it seems reasonable to use weight for awake class = 1.8, as we have best f1-score.
#### 1.1.2 Results for only axis X:
weight for awake class = 1:
32 windows: acc = 0.7263, f1-score = 0.6219
weight for awake class = 1.5:
32 windows: acc = 0.7150, f1-score = 0.6514
weight for awake class = 1.7:
32 windows: acc = 0.7082, f1-score = 0.6602
weight for awake class = 1.8:
30 windows: acc = 0.7038, f1-score = 0.6651
32 windows: acc = 0.7047, f1-score = 0.6642
weight for awake class = 1.9:
32 windows: acc = 0.6976, f1-score = 0.6648
### 1.2 Statistic features per window: PTP for each of 3 axis (peak to peak)
```
statistics_list_ptp = ["ptp_x", "ptp_y", "ptp_z"]
%%time
accuracy_list = []
f1_score_list = []
for train_index, test_index in kf.split(patient_list):
train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
X_train, y_train = prepare_data.load_stat_features_others_windows(train_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_ptp)
X_test, y_test = prepare_data.load_stat_features_others_windows(test_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_ptp)
weights = np.ones(y_train.shape)
weights[y_train==1] = 1.8
model1 = LogisticRegression()
model1.fit(X_train, y_train, sample_weight=weights)
print("\nTrain set result: ")
y_predict = model1.predict(X_train)
accuracy_train = metrics.accuracy_score(y_train, y_predict)
f1_train = metrics.f1_score(y_train, y_predict)
print("Accuracy on train set: ", accuracy_train)
print("F1-score on train set: ", f1_train)
print("\nTest set result: ")
y_predict = model1.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_predict)
f1_test = metrics.f1_score(y_test, y_predict)
accuracy_list.append(accuracy)
f1_score_list.append(f1_test)
print("Accuracy on test set: ", accuracy)
print("F1-score on test set: ", f1_test)
print(metrics.classification_report(y_test, y_predict, target_names=["sleep", "awake"]))
print("Confussion matrix: \n", metrics.confusion_matrix(y_test, y_predict))
print("\n-------------------------------------------------------")
print("\nMean accuracy =", np.mean(accuracy_list))
print("\nMean f1-score =", np.mean(f1_score_list))
```
#### 1.2 Results:
weight for awake class = 1:
30 windows: acc = 0.7442, f1-score = 0.6516
weight for awake class = 1.5:
30 windows: acc = 0.7376, f1-score = 0.6767
weight for awake class = 1.7:
30 windows: acc = 0.7317, f1-score = 0.6814
weight for awake class = 1.8:
30 windows: acc = 0.7285, f1-score = 0.6830
weight for awake class = 1.9:
30 windows: acc = 0.7234, f1-score = 0.6833
weight for awake class = 2:
30 windows: acc = 0.7178, f1-score = 0.6831
To conclude, it seems reasonable to use weight for awake class = 1.8, as we have good f1-score and accuracy.
## 2. Gradient Boosting Classifier
### 2.1 Statistic features per window: STD for each of 3 axis
```
statistics_list_std = ["std_x"]
%%time
accuracy_list = []
f1_score_list = []
for train_index, test_index in kf.split(patient_list):
train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
X_train, y_train = prepare_data.load_stat_features_others_windows(train_patient_list,
data_path=data_path,
n_others_windows=n_others_windows)
X_test, y_test = prepare_data.load_stat_features_others_windows(test_patient_list,
data_path=data_path,
n_others_windows=n_others_windows)
weights = np.ones(y_train.shape)
weights[y_train==1] = 1.5
model1 = GradientBoostingClassifier(n_estimators=40, max_depth=4)
model1.fit(X_train, y_train, sample_weight=weights)
print("\nTrain set result: ")
y_predict = model1.predict(X_train)
accuracy_train = metrics.accuracy_score(y_train, y_predict)
f1_train = metrics.f1_score(y_train, y_predict)
print("Accuracy on train set: ", accuracy_train)
print("F1-score on train set: ", f1_train)
print("\nTest set result: ")
y_predict = model1.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_predict)
f1_test = metrics.f1_score(y_test, y_predict)
accuracy_list.append(accuracy)
f1_score_list.append(f1_test)
print("Accuracy on test set: ", accuracy)
print("F1-score on test set: ", f1_test)
print(metrics.classification_report(y_test, y_predict, target_names=["sleep", "awake"]))
print("Confussion matrix: \n", metrics.confusion_matrix(y_test, y_predict))
print("\n-------------------------------------------------------")
print("\nMean accuracy =", np.mean(accuracy_list))
print("\nMean f1-score =", np.mean(f1_score_list))
```
#### 2.1 Results:
##### Parameters: n_estimators = 40, max_depth = 4
weight for awake class = 1:
30 windows: acc = 0.7399, f1-score = 0.6500
weight for awake class = 1.2:
30 windows: acc = 0.7368, f1-score = 0.6616
weight for awake class = 1.3:
30 windows: acc = 0.7332, f1-score = 0.6652
weight for awake class = 1.5:
30 windows: acc = 0.7255, f1-score = 0.6711
weight for awake class = 1.7:
30 windows: acc = 0.7115, f1-score = 0.6685
weight for awake class = 1.8:
30 windows: acc = 0.7092, f1-score = 0.6731 - best
weight for awake class = 1.9:
30 windows: acc = 0.7022, f1-score = 0.6727
##### Parameters: n_estimators = 20, max_depth = 4
weight for awake class = 1.2:
30 windows: acc = 0.7338, f1-score = 0.6530
weight for awake class = 1.5:
30 windows: acc = 0.7206, f1-score = 0.6648
weight for awake class = 1.7:
30 windows: acc = 0.7065, f1-score = 0.6681
weight for awake class = 1.8:
30 windows: acc = 0.7019, f1-score = 0.6724
|
github_jupyter
|
import sys
import os
import numpy as np
from sklearn.model_selection import KFold
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import GradientBoostingClassifier
sys.path.append("D:\Study\Python\Sleep-Analysis-with-accelerometer")
import prepare_data
sorce_path = os.path.join("D:\Study\Python\Sleep-Analysis-with-accelerometer",
"ICHI14_dataset\data")
data_path = os.path.join("D:\Study\Python\Sleep-Analysis-with-accelerometer",
"statistic_features", "stat_features_standardized.csv")
patient_list = ['002','003','005','007','08a','08b','09a','09b', '10a','011','013','014','15a','15b','016',
'017','018','019','020','021','022','023','025','026','027','028','029','030','031','032',
'033','034','035','036','037','038','040','042','043','044','045','047','048','049','051']
statistics_list = ["std_x", "std_y", "std_z"]
#prepare_data.save_statistic_features(patient_list, sorce_path=sorce_path, save_path=data_path,
# window_len=60, n_sleep_stages=1, scaler=True)
kf = KFold(n_splits=5, random_state=5, shuffle=True) # Define the split - into 5 folds #5
kf.get_n_splits(patient_list)
for train_index, test_index in kf.split(patient_list):
#train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
print(test_patient_list)
n_others_windows = 30
statistics_list_std = ["std_x", "std_y", "std_z"]
%%time
accuracy_list = []
f1_score_list = []
for train_index, test_index in kf.split(patient_list):
train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
X_train, y_train = prepare_data.load_stat_features_others_windows(train_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_std)
X_test, y_test = prepare_data.load_stat_features_others_windows(test_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_std)
weights = np.ones(y_train.shape)
weights[y_train==1] = 1.8
model1 = LogisticRegression()
model1.fit(X_train, y_train, sample_weight=weights)
print("\nTrain set result: ")
y_predict = model1.predict(X_train)
accuracy_train = metrics.accuracy_score(y_train, y_predict)
f1_train = metrics.f1_score(y_train, y_predict)
print("Accuracy on train set: ", accuracy_train)
print("F1-score on train set: ", f1_train)
print("\nTest set result: ")
y_predict = model1.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_predict)
f1_test = metrics.f1_score(y_test, y_predict)
accuracy_list.append(accuracy)
f1_score_list.append(f1_test)
print("Accuracy on test set: ", accuracy)
print("F1-score on test set: ", f1_test)
print(metrics.classification_report(y_test, y_predict, target_names=["sleep", "awake"]))
print("Confussion matrix: \n", metrics.confusion_matrix(y_test, y_predict))
print("\n-------------------------------------------------------")
print("\nMean accuracy =", np.mean(accuracy_list))
print("\nMean f1-score =", np.mean(f1_score_list))
statistics_list_ptp = ["ptp_x", "ptp_y", "ptp_z"]
%%time
accuracy_list = []
f1_score_list = []
for train_index, test_index in kf.split(patient_list):
train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
X_train, y_train = prepare_data.load_stat_features_others_windows(train_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_ptp)
X_test, y_test = prepare_data.load_stat_features_others_windows(test_patient_list,
data_path=data_path,
n_others_windows=n_others_windows,
statistics_list=statistics_list_ptp)
weights = np.ones(y_train.shape)
weights[y_train==1] = 1.8
model1 = LogisticRegression()
model1.fit(X_train, y_train, sample_weight=weights)
print("\nTrain set result: ")
y_predict = model1.predict(X_train)
accuracy_train = metrics.accuracy_score(y_train, y_predict)
f1_train = metrics.f1_score(y_train, y_predict)
print("Accuracy on train set: ", accuracy_train)
print("F1-score on train set: ", f1_train)
print("\nTest set result: ")
y_predict = model1.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_predict)
f1_test = metrics.f1_score(y_test, y_predict)
accuracy_list.append(accuracy)
f1_score_list.append(f1_test)
print("Accuracy on test set: ", accuracy)
print("F1-score on test set: ", f1_test)
print(metrics.classification_report(y_test, y_predict, target_names=["sleep", "awake"]))
print("Confussion matrix: \n", metrics.confusion_matrix(y_test, y_predict))
print("\n-------------------------------------------------------")
print("\nMean accuracy =", np.mean(accuracy_list))
print("\nMean f1-score =", np.mean(f1_score_list))
statistics_list_std = ["std_x"]
%%time
accuracy_list = []
f1_score_list = []
for train_index, test_index in kf.split(patient_list):
train_patient_list = [patient_list[i] for i in train_index]
test_patient_list = [patient_list[i] for i in test_index]
X_train, y_train = prepare_data.load_stat_features_others_windows(train_patient_list,
data_path=data_path,
n_others_windows=n_others_windows)
X_test, y_test = prepare_data.load_stat_features_others_windows(test_patient_list,
data_path=data_path,
n_others_windows=n_others_windows)
weights = np.ones(y_train.shape)
weights[y_train==1] = 1.5
model1 = GradientBoostingClassifier(n_estimators=40, max_depth=4)
model1.fit(X_train, y_train, sample_weight=weights)
print("\nTrain set result: ")
y_predict = model1.predict(X_train)
accuracy_train = metrics.accuracy_score(y_train, y_predict)
f1_train = metrics.f1_score(y_train, y_predict)
print("Accuracy on train set: ", accuracy_train)
print("F1-score on train set: ", f1_train)
print("\nTest set result: ")
y_predict = model1.predict(X_test)
accuracy = metrics.accuracy_score(y_test, y_predict)
f1_test = metrics.f1_score(y_test, y_predict)
accuracy_list.append(accuracy)
f1_score_list.append(f1_test)
print("Accuracy on test set: ", accuracy)
print("F1-score on test set: ", f1_test)
print(metrics.classification_report(y_test, y_predict, target_names=["sleep", "awake"]))
print("Confussion matrix: \n", metrics.confusion_matrix(y_test, y_predict))
print("\n-------------------------------------------------------")
print("\nMean accuracy =", np.mean(accuracy_list))
print("\nMean f1-score =", np.mean(f1_score_list))
| 0.293202 | 0.702186 |
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
from sklearn.metrics import mean_squared_error
```
## RMSE computed only with ingredients that are in recipe
```
test_predictions = pickle.load(open("test_predictions_hidden1.pkl", "rb"))
hidden_rankings = pickle.load(open("hidden_rankings_hidden1.pkl", "rb"))
results_data = np.zeros((len(test_predictions),2))
print(len(test_predictions))
for i in range(len(test_predictions)):
current_pred = test_predictions[i]
results_data[i, 0] = current_pred.est
results_data[i, 1] = current_pred.r_ui
test_rmse = mean_squared_error(results_data[:, 1], results_data[:, 0], squared = False)
print(test_rmse)
```
## RMSE on all the test ingredients (with 1 hidden ingredient)
```
test_predictions = pickle.load(open("rec_predictions_hidden1.pkl", "rb"))
hidden_rankings = pickle.load(open("rec_rankings_hidden1.pkl", "rb"))
print(len(test_predictions))
results_data = np.zeros((len(test_predictions),2))
print(len(test_predictions))
for i in range(len(test_predictions)):
current_pred = test_predictions[i]
results_data[i, 0] = current_pred.est
results_data[i, 1] = current_pred.r_ui
test_rmse = mean_squared_error(results_data[:, 1], results_data[:, 0], squared = False)
print(test_rmse)
```
## Recommendations
```
recommendations = {}
for pred in test_predictions:
current_rec = recommendations.get(pred.uid, ())
if len(current_rec) > 0:
max_rating = current_rec[1]
if pred.est > max_rating:
recommendations[pred.uid] = (pred.iid, pred.est)
else:
recommendations[pred.uid] = (pred.iid, pred.est)
accuracy = 0
correct_examples = []
for recipe in recommendations:
true_ingredient = hidden_ingredients.loc[hidden_ingredients['recipe_id'] == recipe]['ingredient']
predicted_ingredient = recommendations[recipe][0]
if true_ingredient.item() == predicted_ingredient:
correct_examples.append(recipe)
accuracy += 1
print("Correct ingredients: ")
print(accuracy)
print("Test recipes: ")
print(len(recommendations))
print("Accuracy: ")
print(accuracy / len(recommendations))
```
## Correct Examples
```
full_testset = pickle.load(open("full_test_hidden1.pkl", "rb"))
full_testset.head()
active_ingredients = full_testset.loc[full_testset['rating'] == 1]
for recipe in correct_examples:
print("Recipe: ")
print(recipe)
ingredients = active_ingredients.loc[active_ingredients['recipe_id'] == recipe]
print("Ingredients: ")
print(ingredients['ingredient'].to_string(index = False))
predicted_ingredient = recommendations[recipe]
print("Recommended ingredient: ")
print(predicted_ingredient[0])
```
### Incorrect examples
```
counter = 0
for recipe in recommendations:
if counter > 25:
break
print("Recipe: ")
print(recipe)
ingredients = active_ingredients.loc[active_ingredients['recipe_id'] == recipe]
print("Ingredients: ")
print(ingredients['ingredient'].to_string(index = False))
predicted_ingredient = recommendations[recipe]
print("Recommended ingredient: ")
print(predicted_ingredient[0])
print("True ingredient: ")
true_ingredient = hidden_ingredients.loc[hidden_ingredients['recipe_id'] == recipe]['ingredient']
print(true_ingredient.item())
counter += 1
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
from sklearn.metrics import mean_squared_error
test_predictions = pickle.load(open("test_predictions_hidden1.pkl", "rb"))
hidden_rankings = pickle.load(open("hidden_rankings_hidden1.pkl", "rb"))
results_data = np.zeros((len(test_predictions),2))
print(len(test_predictions))
for i in range(len(test_predictions)):
current_pred = test_predictions[i]
results_data[i, 0] = current_pred.est
results_data[i, 1] = current_pred.r_ui
test_rmse = mean_squared_error(results_data[:, 1], results_data[:, 0], squared = False)
print(test_rmse)
test_predictions = pickle.load(open("rec_predictions_hidden1.pkl", "rb"))
hidden_rankings = pickle.load(open("rec_rankings_hidden1.pkl", "rb"))
print(len(test_predictions))
results_data = np.zeros((len(test_predictions),2))
print(len(test_predictions))
for i in range(len(test_predictions)):
current_pred = test_predictions[i]
results_data[i, 0] = current_pred.est
results_data[i, 1] = current_pred.r_ui
test_rmse = mean_squared_error(results_data[:, 1], results_data[:, 0], squared = False)
print(test_rmse)
recommendations = {}
for pred in test_predictions:
current_rec = recommendations.get(pred.uid, ())
if len(current_rec) > 0:
max_rating = current_rec[1]
if pred.est > max_rating:
recommendations[pred.uid] = (pred.iid, pred.est)
else:
recommendations[pred.uid] = (pred.iid, pred.est)
accuracy = 0
correct_examples = []
for recipe in recommendations:
true_ingredient = hidden_ingredients.loc[hidden_ingredients['recipe_id'] == recipe]['ingredient']
predicted_ingredient = recommendations[recipe][0]
if true_ingredient.item() == predicted_ingredient:
correct_examples.append(recipe)
accuracy += 1
print("Correct ingredients: ")
print(accuracy)
print("Test recipes: ")
print(len(recommendations))
print("Accuracy: ")
print(accuracy / len(recommendations))
full_testset = pickle.load(open("full_test_hidden1.pkl", "rb"))
full_testset.head()
active_ingredients = full_testset.loc[full_testset['rating'] == 1]
for recipe in correct_examples:
print("Recipe: ")
print(recipe)
ingredients = active_ingredients.loc[active_ingredients['recipe_id'] == recipe]
print("Ingredients: ")
print(ingredients['ingredient'].to_string(index = False))
predicted_ingredient = recommendations[recipe]
print("Recommended ingredient: ")
print(predicted_ingredient[0])
counter = 0
for recipe in recommendations:
if counter > 25:
break
print("Recipe: ")
print(recipe)
ingredients = active_ingredients.loc[active_ingredients['recipe_id'] == recipe]
print("Ingredients: ")
print(ingredients['ingredient'].to_string(index = False))
predicted_ingredient = recommendations[recipe]
print("Recommended ingredient: ")
print(predicted_ingredient[0])
print("True ingredient: ")
true_ingredient = hidden_ingredients.loc[hidden_ingredients['recipe_id'] == recipe]['ingredient']
print(true_ingredient.item())
counter += 1
| 0.260672 | 0.740433 |
```
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pandas as pd
# The SIR model differential equations.
def deriv(y, t, N, gamma,beta1,beta2,t_tresh=22):
S,I,R = y
if t<=t_tresh:
B=beta1
elif t>t_tresh and t<=1000:
B=beta1*np.exp(-(t-t_tresh)/beta2)
elif t>1000:
B=0.2*np.exp(-(t-1000)/beta2)
dSdt = -(B*I/N)*S
dIdt = (B*S/N)*I - gamma*I
dRdt = gamma*I
return dSdt, dIdt, dRdt
def time_evo(N,beta1,beta2,gamma,death_rate,t_tresh=22,I0=1,R0=0,t=np.arange(0,365)):
# Definition of the initial conditions
# I0 and R0 denotes the number of initial infected people (I0)
# and the number of people that recovered and are immunized (R0)
# t ise the timegrid
S0=N-I0-R0 # number of people that can still contract the virus
# Initial conditions vector
y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N,gamma,beta1,beta2,t_tresh))
S, I, R = np.transpose(ret)
return (t,S,I,(1-death_rate/100)*R,R*death_rate/100)
vector_regions = ['nord', 'centro', 'sud', 'isole']#,'italia','nolombardia','lombardia']
for r in range(len(vector_regions)):
fit_region = vector_regions[r]
if fit_region =='nord':
region = ['Lombardia','Veneto','Emilia-Romagna','Liguria','Piemonte','Valle d\'Aosta','P.A. Trento','P.A. Bolzano','Friuli Venezia Giulia']
n_regions = len(region)
elif fit_region =='centro':
region = ['Toscana','Marche','Umbria','Lazio','Abruzzo','Molise']
n_regions = len(region)
elif fit_region =='sud':
region = ['Puglia','Calabria','Basilicata','Campania']
n_regions = len(region)
elif fit_region =='isole':
region = ['Sicilia','Sardegna']
n_regions = len(region)
elif fit_region =='italia':
region = 'Italia'
n_regions = 1
elif fit_region =='nolombardia':
region = ['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto']
n_regions = len(region)
elif fit_region =='lombardia':
region = ['Lombardia']
n_regions = 1
popolation_regions = np.array([ 1304970, 559084, 533050, 1947131, 5801692, 4459477, 1215220,5879082, 1550640, 10060574, 1525271, 305617, 4356406, 4029053, 1639591, 4999891, 3729641, 541380, 882015, 125666, 4905854])
name_regions = np.array(['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Lombardia','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto'])
regions = np.vstack((name_regions,popolation_regions))
mask_reg = []
for i in range(n_regions):
mask_reg.append(regions[0,:] == region[i])
mask_reg = np.array(mask_reg)
if region=='Italia':
data = pd.read_csv('https://github.com/pcm-dpc/COVID-19/raw/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv')
xdata=pd.to_numeric(range(data.shape[0]))
ydata=data['totale_casi']
ydata_death=data['deceduti']
ydata_rec=data['dimessi_guariti']
N = 60.48*10**6
else:
data = pd.read_csv('https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv')
N = 0
xxx = []
yyy = []
zzz = []
for i in range(n_regions):
N += int(regions[1,mask_reg[i]])
mask_REG=data['denominazione_regione']==region[i]
xxx.append(data.loc[mask_REG,'totale_casi'])
yyy.append(data.loc[mask_REG,'deceduti'])
zzz.append(data.loc[mask_REG,'dimessi_guariti'])
ydata = np.array(np.sum(xxx,axis=0))
ydata_death = np.array(np.sum(yyy,axis=0))
ydata_rec = np.array(np.sum(zzz,axis=0))
xdata = pd.to_numeric(range(ydata.shape[0]))
if fit_region =='nord':
fin_result=time_evo(N,0.41,27.65,1/14,5.5,t_tresh=17,I0=2,t=np.arange(0,720)) # Nord + 0 giorni
dt = 0
elif fit_region =='centro':
fin_result=time_evo(N,0.41,24.65,1/14,3.4,t_tresh=14.4,I0=2,t=np.arange(0,720)) # Centro + 12 giorni
dt = 10
elif fit_region =='sud':
fin_result=time_evo(N,0.41,29.14,1/14,2.5,t_tresh=9,I0=2,t=np.arange(0,720)) # Sud + 12 giorni
dt = 12
elif fit_region =='isole':
fin_result=time_evo(N,0.41,27.25,1/14,2,t_tresh=7.8,I0=2,t=np.arange(0,720)) # Isole + 16 giorni
dt = 16
elif fit_region =='italia':
fin_result=time_evo(N,0.415,28,1/14,6.5,t_tresh=17,I0=2,t=np.arange(0,720)) # Italia
dt = 0
if fit_region =='nolombardia':
fin_result=time_evo(N,0.415,26.5,1/14,4.2,t_tresh=17,I0=2,t=np.arange(0,720)) # Nord + 0 giorni
dt = 4
if fit_region =='lombardia':
fin_result=time_evo(N,0.415,25.85,1/14,8,t_tresh=17,I0=1,t=np.arange(0,720)) # Nord + 0 giorni
dt = 0
t=fin_result[0]
s_vec=fin_result[1]
i_vec=fin_result[2]
r_vec=fin_result[3]
m_vec=fin_result[4]
ydata_inf=ydata-ydata_rec-ydata_death
# Starting time for the model according to each region
if fit_region == 'nord':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-07')
elif fit_region == 'centro':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-17')
elif fit_region == 'sud':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-19')
elif fit_region == 'isole':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-23')
elif fit_region == 'italia':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-07')
elif fit_region == 'nolombardia':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-11')
elif fit_region == 'lombardia':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-07')
# Starting time for the data - All regions
data_t = pd.to_datetime(xdata,unit='D',origin='2020-02-24')
# Model dataframe
export = pd.DataFrame({'S':np.around(s_vec,0), 'I': np.around(i_vec,0), 'R':np.around(r_vec+m_vec,0), 'sintomatici_modello':np.around(i_vec/3,0)})
export.index = new_t
# Data dataframe
new_ydata_inf = pd.DataFrame({'sintomatici_data':np.around(ydata_inf,0)})
new_ydata_inf.index = data_t
# Join and export
joint_frames = export.join(new_ydata_inf,on=export.index)
export2 = joint_frames.iloc[:200,:]
export2.index.name='data'
export2.to_csv('output/'+fit_region+'.csv',index=True)
```
|
github_jupyter
|
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import pandas as pd
# The SIR model differential equations.
def deriv(y, t, N, gamma,beta1,beta2,t_tresh=22):
S,I,R = y
if t<=t_tresh:
B=beta1
elif t>t_tresh and t<=1000:
B=beta1*np.exp(-(t-t_tresh)/beta2)
elif t>1000:
B=0.2*np.exp(-(t-1000)/beta2)
dSdt = -(B*I/N)*S
dIdt = (B*S/N)*I - gamma*I
dRdt = gamma*I
return dSdt, dIdt, dRdt
def time_evo(N,beta1,beta2,gamma,death_rate,t_tresh=22,I0=1,R0=0,t=np.arange(0,365)):
# Definition of the initial conditions
# I0 and R0 denotes the number of initial infected people (I0)
# and the number of people that recovered and are immunized (R0)
# t ise the timegrid
S0=N-I0-R0 # number of people that can still contract the virus
# Initial conditions vector
y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
ret = odeint(deriv, y0, t, args=(N,gamma,beta1,beta2,t_tresh))
S, I, R = np.transpose(ret)
return (t,S,I,(1-death_rate/100)*R,R*death_rate/100)
vector_regions = ['nord', 'centro', 'sud', 'isole']#,'italia','nolombardia','lombardia']
for r in range(len(vector_regions)):
fit_region = vector_regions[r]
if fit_region =='nord':
region = ['Lombardia','Veneto','Emilia-Romagna','Liguria','Piemonte','Valle d\'Aosta','P.A. Trento','P.A. Bolzano','Friuli Venezia Giulia']
n_regions = len(region)
elif fit_region =='centro':
region = ['Toscana','Marche','Umbria','Lazio','Abruzzo','Molise']
n_regions = len(region)
elif fit_region =='sud':
region = ['Puglia','Calabria','Basilicata','Campania']
n_regions = len(region)
elif fit_region =='isole':
region = ['Sicilia','Sardegna']
n_regions = len(region)
elif fit_region =='italia':
region = 'Italia'
n_regions = 1
elif fit_region =='nolombardia':
region = ['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto']
n_regions = len(region)
elif fit_region =='lombardia':
region = ['Lombardia']
n_regions = 1
popolation_regions = np.array([ 1304970, 559084, 533050, 1947131, 5801692, 4459477, 1215220,5879082, 1550640, 10060574, 1525271, 305617, 4356406, 4029053, 1639591, 4999891, 3729641, 541380, 882015, 125666, 4905854])
name_regions = np.array(['Abruzzo','Basilicata','P.A. Bolzano','Calabria','Campania','Emilia-Romagna','Friuli Venezia Giulia','Lazio','Liguria','Lombardia','Marche','Molise','Piemonte','Puglia','Sardegna','Sicilia','Toscana','P.A. Trento','Umbria','Valle d\'Aosta','Veneto'])
regions = np.vstack((name_regions,popolation_regions))
mask_reg = []
for i in range(n_regions):
mask_reg.append(regions[0,:] == region[i])
mask_reg = np.array(mask_reg)
if region=='Italia':
data = pd.read_csv('https://github.com/pcm-dpc/COVID-19/raw/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv')
xdata=pd.to_numeric(range(data.shape[0]))
ydata=data['totale_casi']
ydata_death=data['deceduti']
ydata_rec=data['dimessi_guariti']
N = 60.48*10**6
else:
data = pd.read_csv('https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv')
N = 0
xxx = []
yyy = []
zzz = []
for i in range(n_regions):
N += int(regions[1,mask_reg[i]])
mask_REG=data['denominazione_regione']==region[i]
xxx.append(data.loc[mask_REG,'totale_casi'])
yyy.append(data.loc[mask_REG,'deceduti'])
zzz.append(data.loc[mask_REG,'dimessi_guariti'])
ydata = np.array(np.sum(xxx,axis=0))
ydata_death = np.array(np.sum(yyy,axis=0))
ydata_rec = np.array(np.sum(zzz,axis=0))
xdata = pd.to_numeric(range(ydata.shape[0]))
if fit_region =='nord':
fin_result=time_evo(N,0.41,27.65,1/14,5.5,t_tresh=17,I0=2,t=np.arange(0,720)) # Nord + 0 giorni
dt = 0
elif fit_region =='centro':
fin_result=time_evo(N,0.41,24.65,1/14,3.4,t_tresh=14.4,I0=2,t=np.arange(0,720)) # Centro + 12 giorni
dt = 10
elif fit_region =='sud':
fin_result=time_evo(N,0.41,29.14,1/14,2.5,t_tresh=9,I0=2,t=np.arange(0,720)) # Sud + 12 giorni
dt = 12
elif fit_region =='isole':
fin_result=time_evo(N,0.41,27.25,1/14,2,t_tresh=7.8,I0=2,t=np.arange(0,720)) # Isole + 16 giorni
dt = 16
elif fit_region =='italia':
fin_result=time_evo(N,0.415,28,1/14,6.5,t_tresh=17,I0=2,t=np.arange(0,720)) # Italia
dt = 0
if fit_region =='nolombardia':
fin_result=time_evo(N,0.415,26.5,1/14,4.2,t_tresh=17,I0=2,t=np.arange(0,720)) # Nord + 0 giorni
dt = 4
if fit_region =='lombardia':
fin_result=time_evo(N,0.415,25.85,1/14,8,t_tresh=17,I0=1,t=np.arange(0,720)) # Nord + 0 giorni
dt = 0
t=fin_result[0]
s_vec=fin_result[1]
i_vec=fin_result[2]
r_vec=fin_result[3]
m_vec=fin_result[4]
ydata_inf=ydata-ydata_rec-ydata_death
# Starting time for the model according to each region
if fit_region == 'nord':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-07')
elif fit_region == 'centro':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-17')
elif fit_region == 'sud':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-19')
elif fit_region == 'isole':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-23')
elif fit_region == 'italia':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-07')
elif fit_region == 'nolombardia':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-11')
elif fit_region == 'lombardia':
new_t = pd.to_datetime(t,unit='D',origin='2020-02-07')
# Starting time for the data - All regions
data_t = pd.to_datetime(xdata,unit='D',origin='2020-02-24')
# Model dataframe
export = pd.DataFrame({'S':np.around(s_vec,0), 'I': np.around(i_vec,0), 'R':np.around(r_vec+m_vec,0), 'sintomatici_modello':np.around(i_vec/3,0)})
export.index = new_t
# Data dataframe
new_ydata_inf = pd.DataFrame({'sintomatici_data':np.around(ydata_inf,0)})
new_ydata_inf.index = data_t
# Join and export
joint_frames = export.join(new_ydata_inf,on=export.index)
export2 = joint_frames.iloc[:200,:]
export2.index.name='data'
export2.to_csv('output/'+fit_region+'.csv',index=True)
| 0.31258 | 0.480113 |
```
# Import dependencies
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import sklearn
from sklearn.model_selection import train_test_split
%matplotlib inline
import pandas as pd
data = pd.read_csv('dataset/studied_slept_passed.csv', names = ['Studied', 'Slept', 'Passed'])
data[['Studied', 'Slept', 'Passed']] = data[['Studied', 'Slept', 'Passed']].astype('float32')
import seaborn as sns
sns.scatterplot(x="Studied", y="Slept", hue="Passed", data=data)
train, test = train_test_split(data, test_size=0.2)
#dataset_train = tf.data.Dataset.from_tensor_slices((train[['Studied', 'Slept']], train['Passed']))
dataset_train = tf.data.Dataset.from_tensor_slices((data[['Studied', 'Slept']], data['Passed']))
iterator = dataset_train.make_initializable_iterator()
X, Y = iterator.get_next()
# Let check if the iterator work
with tf.Session() as sess:
sess.run(iterator.initializer) # initialize the iterator
for i in range(50):
x, y = sess.run((X,Y))
print(x, y)
num_features = 2
learning_rate = 0.01
training_epochs = 100
#tf.reset_default_graph()
# By aving 2 features: hours slept & hours studied
#X = tf.placeholder(tf.float32, [None, num_features], name="X")
#Y = tf.placeholder(tf.float32, [None, 1], name="Y")
# Initialize our weigts & bias
W = tf.get_variable("W", [num_features, 1], initializer = tf.contrib.layers.xavier_initializer())
#W = tf.get_variable("W", initializer = tf.fill([num_features, 1], 0.1)) #tf.constant([[0.1],[0.1]])
#W = tf.get_variable("W", initializer = tf.fill([num_features, 1], 0.1))
b = tf.get_variable("b", [1], initializer = tf.zeros_initializer())
d = tf.matmul(tf.reshape(X, [1, num_features]), W)
Z = tf.add(d, b)
prediction = tf.nn.sigmoid(Z)
Y_reshape = tf.reshape(Y, [1,1])
# Calculate the cost
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Z, labels = Y_reshape))
# Use Adam as optimization method
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
cost_history = np.empty(shape=[1],dtype=float)
n_correct = 0
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
sess.run(iterator.initializer) # initialize the iterator
try:
while True:
#_, c = sess.run([optimizer, cost], feed_dict={X: x_train, Y: y_train})
_, c = sess.run([optimizer, cost])
except tf.errors.OutOfRangeError:
pass
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
cost_history = np.append(cost_history, c)
sess.run(iterator.initializer) # initialize the iterator
correct_prediction = tf.to_float(tf.greater(prediction, 0.5))
for i in range(len(train)):
tmp = sess.run(correct_prediction)
if float(tmp) == 1.0:
n_correct += 1
print ("n_correct: {} / total: {} Train Accuracy {}".format(n_correct, len(train), n_correct/len(train)))
#print ("Train Accuracy:", accuracy.eval({X: x_train, Y: y_train}))
print ("Train Accuracy:", accuracy.eval({X: X, Y: Y}))
#print ("Test Accuracy:", accuracy.eval({X: x_test, Y: y_test}))
w_hat
b_hat
data['Predicted'] = data['Studied'] * w_hat[0] + data['Slept'] *
len(data)
```
|
github_jupyter
|
# Import dependencies
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import sklearn
from sklearn.model_selection import train_test_split
%matplotlib inline
import pandas as pd
data = pd.read_csv('dataset/studied_slept_passed.csv', names = ['Studied', 'Slept', 'Passed'])
data[['Studied', 'Slept', 'Passed']] = data[['Studied', 'Slept', 'Passed']].astype('float32')
import seaborn as sns
sns.scatterplot(x="Studied", y="Slept", hue="Passed", data=data)
train, test = train_test_split(data, test_size=0.2)
#dataset_train = tf.data.Dataset.from_tensor_slices((train[['Studied', 'Slept']], train['Passed']))
dataset_train = tf.data.Dataset.from_tensor_slices((data[['Studied', 'Slept']], data['Passed']))
iterator = dataset_train.make_initializable_iterator()
X, Y = iterator.get_next()
# Let check if the iterator work
with tf.Session() as sess:
sess.run(iterator.initializer) # initialize the iterator
for i in range(50):
x, y = sess.run((X,Y))
print(x, y)
num_features = 2
learning_rate = 0.01
training_epochs = 100
#tf.reset_default_graph()
# By aving 2 features: hours slept & hours studied
#X = tf.placeholder(tf.float32, [None, num_features], name="X")
#Y = tf.placeholder(tf.float32, [None, 1], name="Y")
# Initialize our weigts & bias
W = tf.get_variable("W", [num_features, 1], initializer = tf.contrib.layers.xavier_initializer())
#W = tf.get_variable("W", initializer = tf.fill([num_features, 1], 0.1)) #tf.constant([[0.1],[0.1]])
#W = tf.get_variable("W", initializer = tf.fill([num_features, 1], 0.1))
b = tf.get_variable("b", [1], initializer = tf.zeros_initializer())
d = tf.matmul(tf.reshape(X, [1, num_features]), W)
Z = tf.add(d, b)
prediction = tf.nn.sigmoid(Z)
Y_reshape = tf.reshape(Y, [1,1])
# Calculate the cost
cost = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits = Z, labels = Y_reshape))
# Use Adam as optimization method
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer()
cost_history = np.empty(shape=[1],dtype=float)
n_correct = 0
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
sess.run(iterator.initializer) # initialize the iterator
try:
while True:
#_, c = sess.run([optimizer, cost], feed_dict={X: x_train, Y: y_train})
_, c = sess.run([optimizer, cost])
except tf.errors.OutOfRangeError:
pass
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(c), \
"W=", sess.run(W), "b=", sess.run(b))
cost_history = np.append(cost_history, c)
sess.run(iterator.initializer) # initialize the iterator
correct_prediction = tf.to_float(tf.greater(prediction, 0.5))
for i in range(len(train)):
tmp = sess.run(correct_prediction)
if float(tmp) == 1.0:
n_correct += 1
print ("n_correct: {} / total: {} Train Accuracy {}".format(n_correct, len(train), n_correct/len(train)))
#print ("Train Accuracy:", accuracy.eval({X: x_train, Y: y_train}))
print ("Train Accuracy:", accuracy.eval({X: X, Y: Y}))
#print ("Test Accuracy:", accuracy.eval({X: x_test, Y: y_test}))
w_hat
b_hat
data['Predicted'] = data['Studied'] * w_hat[0] + data['Slept'] *
len(data)
| 0.557364 | 0.597549 |
# Kubeflow Fairing Introduction
Kubeflow Fairing is a Python package that streamlines the process of `building`, `training`, and `deploying` machine learning (ML) models in a hybrid cloud environment. By using Kubeflow Fairing and adding a few lines of code, you can run your ML training job locally or in the cloud, directly from Python code or a Jupyter notebook. After your training job is complete, you can use Kubeflow Fairing to deploy your trained model as a prediction endpoint.
# How does Kubeflow Fairing work
Kubeflow Fairing
1. Packages your Jupyter notebook, Python function, or Python file as a Docker image
2. Deploys and runs the training job on Kubeflow or AI Platform.
3. Deploy your trained model as a prediction endpoint on Kubeflow after your training job is complete.
# Goals of Kubeflow Fairing project
- Easily package ML training jobs: Enable ML practitioners to easily package their ML model training code, and their code’s dependencies, as a Docker image.
- Easily train ML models in a hybrid cloud environment: Provide a high-level API for training ML models to make it easy to run training jobs in the cloud, without needing to understand the underlying infrastructure.
- Streamline the process of deploying a trained model: Make it easy for ML practitioners to deploy trained ML models to a hybrid cloud environment.
## Train and deploy model on Kubeflow in Notebooks
This examples comes from a upstream fairing [example](https://github.com/kubeflow/fairing/tree/master/examples/prediction).
Please check Kaggle competiton [
House Prices: Advanced Regression Techniques](https://www.kaggle.com/c/house-prices-advanced-regression-techniques)
for details about the ML problem we want to resolve.
This notebook introduces you to using Kubeflow Fairing to train and deploy a model to Kubeflow on Amazon EKS. This notebook demonstrate how to:
* Train an XGBoost model in a local notebook,
* Use Kubeflow Fairing to train an XGBoost model remotely on Kubeflow,
* Use Kubeflow Fairing to deploy a trained model to Kubeflow,
* Call the deployed endpoint for predictions.
### Install python dependencies
```
# Install latest Fairing from github repository
!pip install kubeflow-fairing==0.7.1
%%writefile requirements.txt
pandas
joblib
numpy
xgboost
scikit-learn>=0.21.0
seldon-core
tornado>=6.0.3
!pip install -r requirements.txt
# Restart the kernel to pick up pip installed libraries
from IPython.core.display import HTML
HTML("<script>Jupyter.notebook.kernel.restart()</script>")
```
## Train on Kubernetes
We will show you how to run a training job in kubernetes cluster. You can use `ECR` as your container image registry.
```
import boto3
AWS_REGION_AS_SLIST=!curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/'
AWS_REGION = AWS_REGION_AS_SLIST.s
print('Region: {}'.format(AWS_REGION))
AWS_ACCOUNT_ID=boto3.client('sts').get_caller_identity().get('Account')
print('Account ID: {}'.format(AWS_ACCOUNT_ID))
S3_BUCKET='sagemaker-{}-{}'.format(AWS_REGION, AWS_ACCOUNT_ID)
print('S3 Bucket: {}'.format(S3_BUCKET))
# Authenticate ECR
# This command retrieves a token that is valid for a specified registry for 12 hours,
# and then it prints a docker login command with that authorization token.
# Then we executate this command to login ECR
!eval $(aws ecr get-login --no-include-email --region=$AWS_REGION)
# Create an ECR repository in the same region
!aws ecr describe-repositories --repository-names fairing-job --region=$AWS_REGION || aws ecr create-repository --repository-name fairing-job --region=$AWS_REGION
```
# _Ignore error message ^^ above ^^. This is OK!_
### Develop your model
```
import argparse
import logging
import joblib
import sys
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from xgboost import XGBRegressor
logging.basicConfig(format='%(message)s')
logging.getLogger().setLevel(logging.INFO)
def read_input(file_name, test_size=0.25):
"""Read input data and split it into train and test."""
data = pd.read_csv(file_name)
data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = data.SalePrice
X = data.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object'])
train_X, test_X, train_y, test_y = train_test_split(X.values,
y.values,
test_size=test_size,
shuffle=False)
imputer = SimpleImputer()
train_X = imputer.fit_transform(train_X)
test_X = imputer.transform(test_X)
return (train_X, train_y), (test_X, test_y)
def train_model(train_X,
train_y,
test_X,
test_y,
n_estimators,
learning_rate):
"""Train the model using XGBRegressor."""
model = XGBRegressor(n_estimators=n_estimators, learning_rate=learning_rate)
model.fit(train_X,
train_y,
early_stopping_rounds=40,
eval_set=[(test_X, test_y)])
print("Best RMSE on eval: %.2f with %d rounds" %
(model.best_score,
model.best_iteration+1))
return model
def eval_model(model, test_X, test_y):
"""Evaluate the model performance."""
predictions = model.predict(test_X)
logging.info("mean_absolute_error=%.2f", mean_absolute_error(predictions, test_y))
def save_model(model, model_file):
"""Save XGBoost model for serving."""
joblib.dump(model, model_file)
logging.info("Model export success: %s", model_file)
class HousingServe(object):
def __init__(self):
self.train_input = "ames_dataset/train.csv"
self.n_estimators = 50
self.learning_rate = 0.1
self.model_file = "trained_ames_model.dat"
self.model = None
def train(self):
(train_X, train_y), (test_X, test_y) = read_input(self.train_input)
model = train_model(train_X,
train_y,
test_X,
test_y,
self.n_estimators,
self.learning_rate)
eval_model(model, test_X, test_y)
save_model(model, self.model_file)
def predict(self, X, feature_names=None):
"""Predict using the model for given ndarray."""
if not self.model:
self.model = joblib.load(self.model_file)
# Do any preprocessing
prediction = self.model.predict(data=X)
# Do any postprocessing
return prediction
```
### Train an XGBoost model in a notebook
```
model = HousingServe()
model.train()
```
### Set up Kubeflow Fairing for training and predictions
```
from kubeflow import fairing
from kubeflow.fairing import TrainJob
from kubeflow.fairing.backends import KubeflowAWSBackend
from kubeflow import fairing
FAIRING_BACKEND = 'KubeflowAWSBackend'
DOCKER_REGISTRY = '{}.dkr.ecr.{}.amazonaws.com'.format(AWS_ACCOUNT_ID, AWS_REGION)
import importlib
if FAIRING_BACKEND == 'KubeflowAWSBackend':
from kubeflow.fairing.builders.cluster.s3_context import S3ContextSource
BuildContext = S3ContextSource(
aws_account=AWS_ACCOUNT_ID, region=AWS_REGION,
bucket_name=S3_BUCKET
)
BackendClass = getattr(importlib.import_module('kubeflow.fairing.backends'), FAIRING_BACKEND)
```
### Train an XGBoost model on Kubeflow
Import the `TrainJob` and use the configured backend class. Kubeflow Fairing packages the `HousingServe` class, the training data, and the training job's software prerequisites as a Docker image. Then Kubeflow Fairing deploys and runs the training job on Kubeflow.
```
from kubeflow.fairing import TrainJob
train_job = TrainJob(HousingServe, input_files=['ames_dataset/train.csv', "requirements.txt"],
docker_registry=DOCKER_REGISTRY,
backend=BackendClass(build_context_source=BuildContext))
train_job.submit()
```
|
github_jupyter
|
# Install latest Fairing from github repository
!pip install kubeflow-fairing==0.7.1
%%writefile requirements.txt
pandas
joblib
numpy
xgboost
scikit-learn>=0.21.0
seldon-core
tornado>=6.0.3
!pip install -r requirements.txt
# Restart the kernel to pick up pip installed libraries
from IPython.core.display import HTML
HTML("<script>Jupyter.notebook.kernel.restart()</script>")
import boto3
AWS_REGION_AS_SLIST=!curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/'
AWS_REGION = AWS_REGION_AS_SLIST.s
print('Region: {}'.format(AWS_REGION))
AWS_ACCOUNT_ID=boto3.client('sts').get_caller_identity().get('Account')
print('Account ID: {}'.format(AWS_ACCOUNT_ID))
S3_BUCKET='sagemaker-{}-{}'.format(AWS_REGION, AWS_ACCOUNT_ID)
print('S3 Bucket: {}'.format(S3_BUCKET))
# Authenticate ECR
# This command retrieves a token that is valid for a specified registry for 12 hours,
# and then it prints a docker login command with that authorization token.
# Then we executate this command to login ECR
!eval $(aws ecr get-login --no-include-email --region=$AWS_REGION)
# Create an ECR repository in the same region
!aws ecr describe-repositories --repository-names fairing-job --region=$AWS_REGION || aws ecr create-repository --repository-name fairing-job --region=$AWS_REGION
import argparse
import logging
import joblib
import sys
import pandas as pd
import numpy as np
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from xgboost import XGBRegressor
logging.basicConfig(format='%(message)s')
logging.getLogger().setLevel(logging.INFO)
def read_input(file_name, test_size=0.25):
"""Read input data and split it into train and test."""
data = pd.read_csv(file_name)
data.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = data.SalePrice
X = data.drop(['SalePrice'], axis=1).select_dtypes(exclude=['object'])
train_X, test_X, train_y, test_y = train_test_split(X.values,
y.values,
test_size=test_size,
shuffle=False)
imputer = SimpleImputer()
train_X = imputer.fit_transform(train_X)
test_X = imputer.transform(test_X)
return (train_X, train_y), (test_X, test_y)
def train_model(train_X,
train_y,
test_X,
test_y,
n_estimators,
learning_rate):
"""Train the model using XGBRegressor."""
model = XGBRegressor(n_estimators=n_estimators, learning_rate=learning_rate)
model.fit(train_X,
train_y,
early_stopping_rounds=40,
eval_set=[(test_X, test_y)])
print("Best RMSE on eval: %.2f with %d rounds" %
(model.best_score,
model.best_iteration+1))
return model
def eval_model(model, test_X, test_y):
"""Evaluate the model performance."""
predictions = model.predict(test_X)
logging.info("mean_absolute_error=%.2f", mean_absolute_error(predictions, test_y))
def save_model(model, model_file):
"""Save XGBoost model for serving."""
joblib.dump(model, model_file)
logging.info("Model export success: %s", model_file)
class HousingServe(object):
def __init__(self):
self.train_input = "ames_dataset/train.csv"
self.n_estimators = 50
self.learning_rate = 0.1
self.model_file = "trained_ames_model.dat"
self.model = None
def train(self):
(train_X, train_y), (test_X, test_y) = read_input(self.train_input)
model = train_model(train_X,
train_y,
test_X,
test_y,
self.n_estimators,
self.learning_rate)
eval_model(model, test_X, test_y)
save_model(model, self.model_file)
def predict(self, X, feature_names=None):
"""Predict using the model for given ndarray."""
if not self.model:
self.model = joblib.load(self.model_file)
# Do any preprocessing
prediction = self.model.predict(data=X)
# Do any postprocessing
return prediction
model = HousingServe()
model.train()
from kubeflow import fairing
from kubeflow.fairing import TrainJob
from kubeflow.fairing.backends import KubeflowAWSBackend
from kubeflow import fairing
FAIRING_BACKEND = 'KubeflowAWSBackend'
DOCKER_REGISTRY = '{}.dkr.ecr.{}.amazonaws.com'.format(AWS_ACCOUNT_ID, AWS_REGION)
import importlib
if FAIRING_BACKEND == 'KubeflowAWSBackend':
from kubeflow.fairing.builders.cluster.s3_context import S3ContextSource
BuildContext = S3ContextSource(
aws_account=AWS_ACCOUNT_ID, region=AWS_REGION,
bucket_name=S3_BUCKET
)
BackendClass = getattr(importlib.import_module('kubeflow.fairing.backends'), FAIRING_BACKEND)
from kubeflow.fairing import TrainJob
train_job = TrainJob(HousingServe, input_files=['ames_dataset/train.csv', "requirements.txt"],
docker_registry=DOCKER_REGISTRY,
backend=BackendClass(build_context_source=BuildContext))
train_job.submit()
| 0.575588 | 0.9434 |
# Image Analysis with the Computer Vision Service

*Computer Vision* is a branch of artificial intelligence (AI) that explores the development of AI systems that can "see" the world, either in real-time through a camera or by analyzing images and video. This is made possible by the fact that digital images are essentially just arrays of numeric pixel values, and we can use those pixel values as *features* to train machine learning models that can classify images, detect discrete objects in an image, and even generate text-based summaries of photographs.
## Use the Computer Vision Cognitive Service
Microsoft Azure includes a number of *cognitive services* that encapsulate common AI functions, including some that can help you build computer vision solutions.
The *Computer Vision* cognitive service provides an obvious starting point for our exploration of computer vision in Azure. It uses pre-trained machine learning models to analyze images and extract information about them.
For example, suppose Northwind Traders has decided to implement a "smart store", in which AI services monitor the store to identify customers requiring assistance, and direct employees to help them. By using the Computer Vision service, images taken by cameras throughout the store can be analyzed to provide meaningful descriptions of what they depict.
### Create a Cognitive Services Resource
Let's start by creating a **Cognitive Services** resource in your Azure subscription:
1. In another browser tab, open the Azure portal at https://portal.azure.com, signing in with your Microsoft account.
2. Click the **+Create a resource** button, search for *Cognitive Services*, and create a **Cognitive Services** resource with the following settings:
- **Name**: *Enter a unique name*.
- **Subscription**: *Your Azure subscription*.
- **Location**: *Choose any available region*:
- **Pricing tier**: S0
- **Resource group**: *Create a resource group with a unique name*.
3. Wait for deployment to complete. Then go to your cognitive services resource, and on the **Overview** page, click the link to manage the keys for the service. You will need the endpoint and keys to connect to your cognitive services resource from client applications.
### Get the Key and Endpoint for your Cognitive Services resource
To use your cognitive services resource, client applications need its endpoint and authentication key:
1. In the Azure portal, on the **Keys and Endpoint** page for your cognitive service resource, copy the **Key1** for your resource and paste it in the code below, replacing **YOUR_COG_KEY**.
2. Copy the **endpoint** for your resource and and paste it in the code below, replacing **YOUR_COG_ENDPOINT**.
3. Run the code below by selecting the cell and then clicking the **Run cell** (▷) button to the left of the cell.
```
cog_key = 'YOUR_COG_KEY'
cog_endpoint = 'YOUR_COG_ENDPOINT'
print('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key))
```
Now that you've set up the key and endpoint, you can use the computer vision service to analyze an image.
To do this from Python, you'll need to run the following cell to install the Azure Cognitive Services Computer Vision package.
```
! pip install azure-cognitiveservices-vision-computervision
```
Now you're ready to go!
Run the following cell to get a description for an image in the */data/vision/store_cam1.jpg* file.
```
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from msrest.authentication import CognitiveServicesCredentials
from python_code import vision
import os
%matplotlib inline
# Get the path to an image file
image_path = os.path.join('data', 'vision', 'store_cam1.jpg')
# Get a client for the computer vision service
computervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key))
# Get a description from the computer vision service
image_stream = open(image_path, "rb")
description = computervision_client.describe_image_in_stream(image_stream)
# Display image and caption (code in helper_scripts/vision.py)
vision.show_image_caption(image_path, description)
```
That seems reasonably accurate.
Let's try another image.
```
# Get the path to an image file
image_path = os.path.join('data', 'vision', 'store_cam2.jpg')
# Get a description from the computer vision service
image_stream = open(image_path, "rb")
description = computervision_client.describe_image_in_stream(image_stream)
# Display image and caption (code in helper_scripts/vision.py)
vision.show_image_caption(image_path, description)
```
Again, the suggested caption seems to be pretty accurate.
## Analyze image features
So far, you've used the Computer Vision service to generate a descriptive caption for a couple of images; but there's much more you can do. The Computer Vision service provides analysis capabilities that can extract detailed information like:
- The locations of common types of object detected in the image.
- Location and approximate age of human faces in the image.
- Whether the image contains any 'adult', 'racy', or 'gory' content.
- Relevant tags that could be associated with the image in a database to make it easy to find.
Run the following code to analyze an image of a shopper.
```
# Get the path to an image file
image_path = os.path.join('data', 'vision', 'store_cam1.jpg')
# Specify the features we want to analyze
features = ['Description', 'Tags', 'Adult', 'Objects', 'Faces']
# Get an analysis from the computer vision service
image_stream = open(image_path, "rb")
analysis = computervision_client.analyze_image_in_stream(image_stream, visual_features=features)
# Show the results of analysis (code in helper_scripts/vision.py)
vision.show_image_analysis(image_path, analysis)
```
## Learn More
In addition to the capabilities you've explored in this notebook, the Computer Vision cognitive service includes the ability to:
- Identify celebrities in images.
- Detect brand logos in an image.
- Perform optical character recognition (OCR) to read text in an image.
To learn more about the Computer Vision cognitive service, see the [Computer Vision documentation](https://docs.microsoft.com/azure/cognitive-services/computer-vision/)
|
github_jupyter
|
cog_key = 'YOUR_COG_KEY'
cog_endpoint = 'YOUR_COG_ENDPOINT'
print('Ready to use cognitive services at {} using key {}'.format(cog_endpoint, cog_key))
! pip install azure-cognitiveservices-vision-computervision
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from msrest.authentication import CognitiveServicesCredentials
from python_code import vision
import os
%matplotlib inline
# Get the path to an image file
image_path = os.path.join('data', 'vision', 'store_cam1.jpg')
# Get a client for the computer vision service
computervision_client = ComputerVisionClient(cog_endpoint, CognitiveServicesCredentials(cog_key))
# Get a description from the computer vision service
image_stream = open(image_path, "rb")
description = computervision_client.describe_image_in_stream(image_stream)
# Display image and caption (code in helper_scripts/vision.py)
vision.show_image_caption(image_path, description)
# Get the path to an image file
image_path = os.path.join('data', 'vision', 'store_cam2.jpg')
# Get a description from the computer vision service
image_stream = open(image_path, "rb")
description = computervision_client.describe_image_in_stream(image_stream)
# Display image and caption (code in helper_scripts/vision.py)
vision.show_image_caption(image_path, description)
# Get the path to an image file
image_path = os.path.join('data', 'vision', 'store_cam1.jpg')
# Specify the features we want to analyze
features = ['Description', 'Tags', 'Adult', 'Objects', 'Faces']
# Get an analysis from the computer vision service
image_stream = open(image_path, "rb")
analysis = computervision_client.analyze_image_in_stream(image_stream, visual_features=features)
# Show the results of analysis (code in helper_scripts/vision.py)
vision.show_image_analysis(image_path, analysis)
| 0.529263 | 0.983565 |
# Bitcoin Prediction
### Importing libraries and preprocessing
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Activation
from keras.models import load_model
import tensorflow
print(tensorflow.__version__)
training = pd.read_csv("bitcoin_price_Training - Training.csv",thousands=',')
testing = pd.read_csv("bitcoin_price_1week_Test - Test.csv",thousands=',')
training.head()
testing.head()
del training['Date']
training.head()
del testing['Date']
testing.head()
train = training[::-1]
test = testing[::-1]
train.head()
test.head()
training.isnull().sum()
testing.isnull().sum()
training.shape
testing.shape
training.info()
testing.info()
training.describe()
testing.describe()
train = train['Close'].values.astype('float32')
test = test['Close'].values.astype('float32')
```
### Scaling the data
```
from sklearn.preprocessing import MinMaxScaler
values = training['Market Cap'].values.reshape(-1,1)
values = values.astype('float32')
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
```
### Spliting data into training and test set
```
train_size = int(len(scaled) * 0.8)
test_size = len(scaled) - train_size
train, test = scaled[0:train_size,:], scaled[train_size:len(scaled),:]
print(len(train), len(test))
```
### Creating Dataset
```
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
a = dataset[i:(i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
```
### Reshaping the dataset
```
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
```
### LSTM
```
epochs = 100
model=Sequential()
model.add(LSTM(units=50,return_sequences=True,input_shape=(trainX.shape[1], trainX.shape[2])))
model.add(Dropout(0.3))
model.add(LSTM(units=5,return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(units=50,return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(units=50))
model.add(Dropout(0.3))
model.add(Dense(units=9))
model.compile(optimizer='adam',loss='mse',metrics= ['accuracy'])
history=model.fit(trainX,trainY, batch_size = 32, epochs = epochs,validation_data=(testX, testY))
model.summary
```
### Visualization
```
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
yhat = model.predict(testX)
plt.figure(figsize=(10,8))
plt.plot(yhat, label='predict')
plt.plot(testY, label='true')
plt.legend()
plt.show()
yhat.shape
yhat_inverse = scaler.inverse_transform(yhat.reshape(-1, 1))
testY_inverse = scaler.inverse_transform(testY.reshape(-1, 1))
yhat_inverse.shape
plt.figure(figsize=(10,8))
plt.plot(yhat_inverse, label='predict')
plt.plot(testY_inverse, label='actual')
plt.legend()
plt.show()
predictDates = training.tail(len(testX)).index
testY_reshape = testY_inverse.reshape(len(testY_inverse))
yhat_reshape = yhat_inverse.reshape(len(yhat_inverse))
import plotly.graph_objs as go
import plotly.offline as py
actual_chart = go.Scatter(x=predictDates, y=testY_reshape, name= 'Actual Price')
predict_chart = go.Scatter(x=predictDates, y=yhat_reshape, name= 'Predict Price')
py.iplot([predict_chart, actual_chart])
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Activation
from keras.models import load_model
import tensorflow
print(tensorflow.__version__)
training = pd.read_csv("bitcoin_price_Training - Training.csv",thousands=',')
testing = pd.read_csv("bitcoin_price_1week_Test - Test.csv",thousands=',')
training.head()
testing.head()
del training['Date']
training.head()
del testing['Date']
testing.head()
train = training[::-1]
test = testing[::-1]
train.head()
test.head()
training.isnull().sum()
testing.isnull().sum()
training.shape
testing.shape
training.info()
testing.info()
training.describe()
testing.describe()
train = train['Close'].values.astype('float32')
test = test['Close'].values.astype('float32')
from sklearn.preprocessing import MinMaxScaler
values = training['Market Cap'].values.reshape(-1,1)
values = values.astype('float32')
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
train_size = int(len(scaled) * 0.8)
test_size = len(scaled) - train_size
train, test = scaled[0:train_size,:], scaled[train_size:len(scaled),:]
print(len(train), len(test))
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back):
a = dataset[i:(i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
look_back = 1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
epochs = 100
model=Sequential()
model.add(LSTM(units=50,return_sequences=True,input_shape=(trainX.shape[1], trainX.shape[2])))
model.add(Dropout(0.3))
model.add(LSTM(units=5,return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(units=50,return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(units=50))
model.add(Dropout(0.3))
model.add(Dense(units=9))
model.compile(optimizer='adam',loss='mse',metrics= ['accuracy'])
history=model.fit(trainX,trainY, batch_size = 32, epochs = epochs,validation_data=(testX, testY))
model.summary
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
yhat = model.predict(testX)
plt.figure(figsize=(10,8))
plt.plot(yhat, label='predict')
plt.plot(testY, label='true')
plt.legend()
plt.show()
yhat.shape
yhat_inverse = scaler.inverse_transform(yhat.reshape(-1, 1))
testY_inverse = scaler.inverse_transform(testY.reshape(-1, 1))
yhat_inverse.shape
plt.figure(figsize=(10,8))
plt.plot(yhat_inverse, label='predict')
plt.plot(testY_inverse, label='actual')
plt.legend()
plt.show()
predictDates = training.tail(len(testX)).index
testY_reshape = testY_inverse.reshape(len(testY_inverse))
yhat_reshape = yhat_inverse.reshape(len(yhat_inverse))
import plotly.graph_objs as go
import plotly.offline as py
actual_chart = go.Scatter(x=predictDates, y=testY_reshape, name= 'Actual Price')
predict_chart = go.Scatter(x=predictDates, y=yhat_reshape, name= 'Predict Price')
py.iplot([predict_chart, actual_chart])
| 0.800887 | 0.897066 |
<a href="https://colab.research.google.com/github/joaopamaral/deep-learning-v2-pytorch/blob/master/intro-to-pytorch/Part%204%20-%20Fashion-MNIST%20(Exercises).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Classifying Fashion-MNIST
Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.
<img src='https://github.com/joaopamaral/deep-learning-v2-pytorch/blob/master/intro-to-pytorch/assets/fashion-mnist-sprite.png?raw=1' width=500px>
In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebooks though as you work through this.
First off, let's load the dataset through torchvision.
```
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here we can see one of the images.
```
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
```
## Building the network
Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits or log-softmax from the forward pass. It's up to you how many layers you add and the size of those layers.
```
# TODO: Define your network architecture here
from torch import nn
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
```
# Train the network
Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).
Then write the training code. Remember the training pass is a fairly straightforward process:
* Make a forward pass through the network to get the logits
* Use the logits to calculate the loss
* Perform a backward pass through the network with `loss.backward()` to calculate the gradients
* Take a step with the optimizer to update the weights
By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.
```
# TODO: Create the network, define the criterion and optimizer
from torch import optim
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# TODO: Train the network here
model.cuda()
epochs = 10
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
images, labels = images.cuda(), labels.cuda()
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
logps = model(images)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {e}: Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import torch.nn.functional as F
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0].cuda()
# Convert 2D image to 1D vector
img = img.view(img.shape[0], -1)
# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(img))
# Plot the image and probabilities
helper.view_classify(img.cpu().view(1, 28, 28), ps.cpu(), version='Fashion')
```
|
github_jupyter
|
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Download and load the training data
trainset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('~/.pytorch/F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
# TODO: Define your network architecture here
from torch import nn
model = nn.Sequential(nn.Linear(784, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 10),
nn.LogSoftmax(dim=1))
# TODO: Create the network, define the criterion and optimizer
from torch import optim
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# TODO: Train the network here
model.cuda()
epochs = 10
for e in range(epochs):
running_loss = 0
for images, labels in trainloader:
images, labels = images.cuda(), labels.cuda()
images = images.view(images.shape[0], -1)
optimizer.zero_grad()
logps = model(images)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {e}: Training loss: {running_loss/len(trainloader)}")
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import helper
import torch.nn.functional as F
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0].cuda()
# Convert 2D image to 1D vector
img = img.view(img.shape[0], -1)
# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(img))
# Plot the image and probabilities
helper.view_classify(img.cpu().view(1, 28, 28), ps.cpu(), version='Fashion')
| 0.629319 | 0.990496 |
# <center> Loss Function Optimization for Recurrent Neural Networks </center>
<center>By "Mohamed Stouka"</center>
<img src="https://news.mit.edu/sites/default/files/styles/news_article__image_gallery/public/images/202011/MIT-Network-Confidence-01-Press_0.jpg?itok=Wnt9cX6G" width="20%">
Image from: https://news.mit.edu/2020/neural-network-uncertainty-1120
---
# Overview
Generally, when building a machine learning model, some questions that usually comes into mind: How is a model being optimized? Why does Model A outperform Model B? One of the key points to answer these questions is understanding loss functions of different models, and furthermore, being able to choose an appropriate loss function or self-define a loss function based on the goal of the project and the tolerance of error type.
---
# Program Description
For Recurrent Neural Networks, models are optimized by finding optimal coefficients that minimize cost function. Cost function is the sum of losses from each data point calculated with loss function. Cost function is a parabola curve. To minimize it, we need to find its vertex. It can be solved analytically or by using programming algorithms. In this project, I will try to accomplish this by one of the most popular programming solutions, Gradient Descent, or by using optimization tools provided from python libraries such as Scipy.optimize. In this project, the dummy dataset that will be used to test the results of the recurrent neural network will be ur with the MNIST data set for training.
---
# Project Goals and Timeline
- short term: research about optimization techniques and algorithms
- mid term: experiment the optimization techniques on built-in loss functions and self-made loss functions
- long term: try the code on different types of RNNs and compare results
- 1/29/2021 - Create git repository
- 2/12/2021 - Proposal Due
- 2/26/2021 - Stub functions and Example code integration (With documentation)
- 3/12/2021 - Unit Test Integration
- 3/25/2021 - Coding Standards and Linting
- 4/9/2021 - Code Review
- 4/16/2021 - Presentation Video Due
- 4/23/2021 - Final Report and Code due.
---
# Anticipating Challenges
challanges will include understanding the algebra behind existing loss functions, also, learn about different backends to build the model on, for example, tensorflow and theano, and understand advantages and dissadvanages of both.Futhrurmore, learn about python optimization libraries and how to implement them on neaural networks.
|
github_jupyter
|
# <center> Loss Function Optimization for Recurrent Neural Networks </center>
<center>By "Mohamed Stouka"</center>
<img src="https://news.mit.edu/sites/default/files/styles/news_article__image_gallery/public/images/202011/MIT-Network-Confidence-01-Press_0.jpg?itok=Wnt9cX6G" width="20%">
Image from: https://news.mit.edu/2020/neural-network-uncertainty-1120
---
# Overview
Generally, when building a machine learning model, some questions that usually comes into mind: How is a model being optimized? Why does Model A outperform Model B? One of the key points to answer these questions is understanding loss functions of different models, and furthermore, being able to choose an appropriate loss function or self-define a loss function based on the goal of the project and the tolerance of error type.
---
# Program Description
For Recurrent Neural Networks, models are optimized by finding optimal coefficients that minimize cost function. Cost function is the sum of losses from each data point calculated with loss function. Cost function is a parabola curve. To minimize it, we need to find its vertex. It can be solved analytically or by using programming algorithms. In this project, I will try to accomplish this by one of the most popular programming solutions, Gradient Descent, or by using optimization tools provided from python libraries such as Scipy.optimize. In this project, the dummy dataset that will be used to test the results of the recurrent neural network will be ur with the MNIST data set for training.
---
# Project Goals and Timeline
- short term: research about optimization techniques and algorithms
- mid term: experiment the optimization techniques on built-in loss functions and self-made loss functions
- long term: try the code on different types of RNNs and compare results
- 1/29/2021 - Create git repository
- 2/12/2021 - Proposal Due
- 2/26/2021 - Stub functions and Example code integration (With documentation)
- 3/12/2021 - Unit Test Integration
- 3/25/2021 - Coding Standards and Linting
- 4/9/2021 - Code Review
- 4/16/2021 - Presentation Video Due
- 4/23/2021 - Final Report and Code due.
---
# Anticipating Challenges
challanges will include understanding the algebra behind existing loss functions, also, learn about different backends to build the model on, for example, tensorflow and theano, and understand advantages and dissadvanages of both.Futhrurmore, learn about python optimization libraries and how to implement them on neaural networks.
| 0.852491 | 0.850033 |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set(rc={"figure.dpi":100, 'savefig.dpi':100})
sns.set_context('notebook')
# File paths
IN_POS_FILE = '../feature_data/input_positions.csv'
IN_POS_FILE_TEST = '../feature_data/input_positions_test.csv'
# Keys to the pickle objects
CITY = 'city'
LANE = 'lane'
LANE_NORM = 'lane_norm'
SCENE_IDX = 'scene_idx'
AGENT_ID = 'agent_id'
P_IN = 'p_in'
V_IN = 'v_in'
P_OUT = 'p_out'
V_OUT = 'v_out'
CAR_MASK = 'car_mask'
TRACK_ID = 'track_id'
MIA = 'MIA'
PIT = 'PIT'
# Column headers in the CSV files
WAS_TARGET = 'was_target'
P_IN_X = ['p_in_x' + str(i) for i in range(1, 20)]
P_IN_Y = ['p_in_y' + str(i) for i in range(1, 20)]
def describe(a, desc):
print(f"{desc}")
print(f"min = {np.min(a)}")
print(f"max = {np.max(a)}")
print(f"mean = {np.mean(a)}")
print(f"median = {np.median(a)}")
print(f"standard deviation = {np.std(a)}\n")
def plot_hist(x, y, bins, title, palette, xlbl, ylbl, fname):
"""
Plots a histogram of the two arrays x and y.
"""
fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
sns.set_palette(palette)
sns.histplot(ax=ax1, x=x, y=y, bins=bins, cbar=True)
ax1.set_xlabel(xlbl)
ax1.set_ylabel(ylbl);
ax1.set_title(title);
plt.savefig(fname)
```
# Distribution of input positions (training data)
```
df = pd.read_csv(IN_POS_FILE)
# Get all of the input x and y positions for target agents
xpos_t = df[df[WAS_TARGET] == 1][P_IN_X].to_numpy()
xpos_t = xpos_t.reshape(xpos_t.shape[0] * xpos_t.shape[1])
ypos_t = df[df[WAS_TARGET] == 1][P_IN_Y].to_numpy()
ypos_t = ypos_t.reshape(ypos_t.shape[0] * ypos_t.shape[1])
# Get all of the input x and y positions for non-target agents
xpos_n = df[df[WAS_TARGET] == 0][P_IN_X].to_numpy()
xpos_n = xpos_n.reshape(xpos_n.shape[0] * xpos_n.shape[1])
ypos_n = df[df[WAS_TARGET] == 0][P_IN_Y].to_numpy()
ypos_n = ypos_n.reshape(ypos_n.shape[0] * ypos_n.shape[1])
# Get all of the input x and y positions for all agents
xpos = df[P_IN_X].to_numpy()
xpos = xpos.reshape(xpos.shape[0] * xpos.shape[1])
ypos = df[P_IN_Y].to_numpy()
ypos = ypos.reshape(ypos.shape[0] * ypos.shape[1])
ax = sns.histplot(x=ypos);
ax.set_xlabel('y positions')
ax.set_ylabel('Agent count')
ax.set_title('All agent input y positions');
# City analysis
pitdf = df[df[CITY] == PIT]
miadf = df[df[CITY] == MIA]
# Get all of the input x and y positions for all agents by city
pitxpos = pitdf[P_IN_X].to_numpy()
pitxpos = pitxpos.reshape(pitxpos.shape[0] * pitxpos.shape[1])
pitypos = pitdf[P_IN_Y].to_numpy()
pitypos = pitypos.reshape(pitypos.shape[0] * pitypos.shape[1])
miaxpos = miadf[P_IN_X].to_numpy()
miaxpos = miaxpos.reshape(miaxpos.shape[0] * miaxpos.shape[1])
miaypos = miadf[P_IN_Y].to_numpy()
miaypos = miaypos.reshape(miaypos.shape[0] * miaypos.shape[1])
ax = sns.histplot(x=pitxpos, y=pitypos);
sns.histplot(x=miaxpos, y=miaypos, bins=1000);
describe(xpos, 'input x position')
describe(ypos, 'input y position')
sns.histplot(x=xpos_norm);
# Plot the positions
title = 'Target agent input positions (train)'
palette = 'Reds'
bins = 1000
x_label, y_label = 'xpos', 'ypos'
fname = '../milestone_images/inPosTargTrain'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'Non-target-agent input positions (train)'
palette = 'Greens'
fname = '../milestone_images/inPosNonTrain'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'All agent input positions (train)'
palette = 'plasma'
fname = '../milestone_images/inPosTrain'
plot_hist(xpos, ypos, bins, title, palette, x_label, y_label, fname)
```
# Distribution of input positions (test)
```
df = pd.read_csv(IN_POS_FILE_TEST)
# Get all of the input x and y positions for target agents
xpos_t = df[df[WAS_TARGET] == 1][P_IN_X].to_numpy()
xpos_t = xpos_t.reshape(xpos_t.shape[0] * xpos_t.shape[1])
ypos_t = df[df[WAS_TARGET] == 1][P_IN_Y].to_numpy()
ypos_t = ypos_t.reshape(ypos_t.shape[0] * ypos_t.shape[1])
# Get all of the input x and y positions for non-target agents
xpos_n = df[df[WAS_TARGET] == 0][P_IN_X].to_numpy()
xpos_n = xpos_n.reshape(xpos_n.shape[0] * xpos_n.shape[1])
ypos_n = df[df[WAS_TARGET] == 0][P_IN_Y].to_numpy()
ypos_n = ypos_n.reshape(ypos_n.shape[0] * ypos_n.shape[1])
# Get all of the input x and y positions for all agents
xpos = df[P_IN_X].to_numpy()
xpos = xpos.reshape(xpos.shape[0] * xpos.shape[1])
ypos = df[P_IN_Y].to_numpy()
ypos = ypos.reshape(ypos.shape[0] * ypos.shape[1])
title = 'Target agent input position (test)'
palette = 'Reds'
bins = 100
x_label, ylabel = 'xpos', 'ypos'
fname = '../milestone_images/inPosTargTest'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'Non-target-agent input positions (test)'
palette = 'Greens'
fname = '../milestone_images/inPosNonTest'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'All agent input positions (test)'
palette = 'plasma'
fname = '../milestone_images/inPosTest'
plot_hist(xpos, ypos, bins, title, palette, x_label, y_label, fname)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
sns.set(rc={"figure.dpi":100, 'savefig.dpi':100})
sns.set_context('notebook')
# File paths
IN_POS_FILE = '../feature_data/input_positions.csv'
IN_POS_FILE_TEST = '../feature_data/input_positions_test.csv'
# Keys to the pickle objects
CITY = 'city'
LANE = 'lane'
LANE_NORM = 'lane_norm'
SCENE_IDX = 'scene_idx'
AGENT_ID = 'agent_id'
P_IN = 'p_in'
V_IN = 'v_in'
P_OUT = 'p_out'
V_OUT = 'v_out'
CAR_MASK = 'car_mask'
TRACK_ID = 'track_id'
MIA = 'MIA'
PIT = 'PIT'
# Column headers in the CSV files
WAS_TARGET = 'was_target'
P_IN_X = ['p_in_x' + str(i) for i in range(1, 20)]
P_IN_Y = ['p_in_y' + str(i) for i in range(1, 20)]
def describe(a, desc):
print(f"{desc}")
print(f"min = {np.min(a)}")
print(f"max = {np.max(a)}")
print(f"mean = {np.mean(a)}")
print(f"median = {np.median(a)}")
print(f"standard deviation = {np.std(a)}\n")
def plot_hist(x, y, bins, title, palette, xlbl, ylbl, fname):
"""
Plots a histogram of the two arrays x and y.
"""
fig, (ax1) = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
sns.set_palette(palette)
sns.histplot(ax=ax1, x=x, y=y, bins=bins, cbar=True)
ax1.set_xlabel(xlbl)
ax1.set_ylabel(ylbl);
ax1.set_title(title);
plt.savefig(fname)
df = pd.read_csv(IN_POS_FILE)
# Get all of the input x and y positions for target agents
xpos_t = df[df[WAS_TARGET] == 1][P_IN_X].to_numpy()
xpos_t = xpos_t.reshape(xpos_t.shape[0] * xpos_t.shape[1])
ypos_t = df[df[WAS_TARGET] == 1][P_IN_Y].to_numpy()
ypos_t = ypos_t.reshape(ypos_t.shape[0] * ypos_t.shape[1])
# Get all of the input x and y positions for non-target agents
xpos_n = df[df[WAS_TARGET] == 0][P_IN_X].to_numpy()
xpos_n = xpos_n.reshape(xpos_n.shape[0] * xpos_n.shape[1])
ypos_n = df[df[WAS_TARGET] == 0][P_IN_Y].to_numpy()
ypos_n = ypos_n.reshape(ypos_n.shape[0] * ypos_n.shape[1])
# Get all of the input x and y positions for all agents
xpos = df[P_IN_X].to_numpy()
xpos = xpos.reshape(xpos.shape[0] * xpos.shape[1])
ypos = df[P_IN_Y].to_numpy()
ypos = ypos.reshape(ypos.shape[0] * ypos.shape[1])
ax = sns.histplot(x=ypos);
ax.set_xlabel('y positions')
ax.set_ylabel('Agent count')
ax.set_title('All agent input y positions');
# City analysis
pitdf = df[df[CITY] == PIT]
miadf = df[df[CITY] == MIA]
# Get all of the input x and y positions for all agents by city
pitxpos = pitdf[P_IN_X].to_numpy()
pitxpos = pitxpos.reshape(pitxpos.shape[0] * pitxpos.shape[1])
pitypos = pitdf[P_IN_Y].to_numpy()
pitypos = pitypos.reshape(pitypos.shape[0] * pitypos.shape[1])
miaxpos = miadf[P_IN_X].to_numpy()
miaxpos = miaxpos.reshape(miaxpos.shape[0] * miaxpos.shape[1])
miaypos = miadf[P_IN_Y].to_numpy()
miaypos = miaypos.reshape(miaypos.shape[0] * miaypos.shape[1])
ax = sns.histplot(x=pitxpos, y=pitypos);
sns.histplot(x=miaxpos, y=miaypos, bins=1000);
describe(xpos, 'input x position')
describe(ypos, 'input y position')
sns.histplot(x=xpos_norm);
# Plot the positions
title = 'Target agent input positions (train)'
palette = 'Reds'
bins = 1000
x_label, y_label = 'xpos', 'ypos'
fname = '../milestone_images/inPosTargTrain'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'Non-target-agent input positions (train)'
palette = 'Greens'
fname = '../milestone_images/inPosNonTrain'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'All agent input positions (train)'
palette = 'plasma'
fname = '../milestone_images/inPosTrain'
plot_hist(xpos, ypos, bins, title, palette, x_label, y_label, fname)
df = pd.read_csv(IN_POS_FILE_TEST)
# Get all of the input x and y positions for target agents
xpos_t = df[df[WAS_TARGET] == 1][P_IN_X].to_numpy()
xpos_t = xpos_t.reshape(xpos_t.shape[0] * xpos_t.shape[1])
ypos_t = df[df[WAS_TARGET] == 1][P_IN_Y].to_numpy()
ypos_t = ypos_t.reshape(ypos_t.shape[0] * ypos_t.shape[1])
# Get all of the input x and y positions for non-target agents
xpos_n = df[df[WAS_TARGET] == 0][P_IN_X].to_numpy()
xpos_n = xpos_n.reshape(xpos_n.shape[0] * xpos_n.shape[1])
ypos_n = df[df[WAS_TARGET] == 0][P_IN_Y].to_numpy()
ypos_n = ypos_n.reshape(ypos_n.shape[0] * ypos_n.shape[1])
# Get all of the input x and y positions for all agents
xpos = df[P_IN_X].to_numpy()
xpos = xpos.reshape(xpos.shape[0] * xpos.shape[1])
ypos = df[P_IN_Y].to_numpy()
ypos = ypos.reshape(ypos.shape[0] * ypos.shape[1])
title = 'Target agent input position (test)'
palette = 'Reds'
bins = 100
x_label, ylabel = 'xpos', 'ypos'
fname = '../milestone_images/inPosTargTest'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'Non-target-agent input positions (test)'
palette = 'Greens'
fname = '../milestone_images/inPosNonTest'
plot_hist(xpos_t, ypos_t, bins, title, palette, x_label, y_label, fname)
title = 'All agent input positions (test)'
palette = 'plasma'
fname = '../milestone_images/inPosTest'
plot_hist(xpos, ypos, bins, title, palette, x_label, y_label, fname)
| 0.398641 | 0.690703 |
# WeatherPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
pip install citipy
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# # Output File (CSV)
# output_data_file = "output_data/cities.csv"
# # Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
```
## Generate Cities List
```
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
```
### Perform API Calls
* Perform a weather check on each city using a series of successive API calls.
* Include a print log of each city as it'sbeing processed (with the city number and city name).
```
Selectcities =[]
Latitude=[]
Longitude=[]
Max_Temp=[]
SaveHumidity=[]
Cloudiness=[]
Wind_Speed=[]
SaveCountry=[]
SaveDate=[]
# Save config information.
url = "http://api.openweathermap.org/data/2.5/weather?"
units = "metric"
# Build partial query URL
query_url = f"{url}appid={weather_api_key}&units={units}&q="
print("Beginning Data Retrieval")
print("--------------------------------")
#Initialize count for record and set
counter = 0
set_record = 1
# Loop through the list of cities and perform a request for data on each
for city in cities:
try:
response = requests.get(query_url + city.replace(" ","&")).json()
Latitude.append(response['coord']['lat'])
Longitude.append(response['coord']['lon'])
Max_Temp.append(response['main']['temp_max'])
SaveHumidity.append(response['main']['humidity'])
Cloudiness.append(response['clouds']['all'])
Wind_Speed.append(response['wind']['speed'])
SaveCountry.append(response['sys']['country'])
SaveDate.append(response['dt'])
Selectcities.append(city)
if counter > 48:
counter = 0
set_record += 1
else:
counter += 1
print(f"Processing Record {counter} of Set {set_record} | {city}")
except Exception:
print("City not found. Skipping...")
print("------------------------------\nData Retrieval Complete\n------------------------------")
```
### Convert Raw Data to DataFrame
* Export the city data into a .csv.
* Display the DataFrame
```
# Import cities file as DataFrame
details = {
"City":Selectcities,
"Lat":Latitude,
"Lng":Longitude,
"Max Temp":Max_Temp,
"Humidity":SaveHumidity,
"Cloudiness":Cloudiness,
"Wind Speed":Wind_Speed,
"Country":SaveCountry,
"Date":SaveDate
}
weather_df = pd.DataFrame(details)
weather_df.head()
weather_df.to_csv("../output_data/cities.csv",index=False)
weather_df.count()
weather_df.describe()
```
## Inspect the data and remove the cities where the humidity > 100%.
----
Skip this step if there are no cities that have humidity > 100%.
## Plotting the Data
* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
* Save the plotted figures as .pngs.
## Latitude vs. Temperature Plot
```
plt.scatter(weather_df["Lat"],weather_df["Max Temp"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Max Temperature (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature(F)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Temperature Plot.png")
plt.show()
```
Maximum temperature increases as the latitude moves towards 0 degree and then decreases when latitude increases
Temperature is inversely related to latitude.
## Latitude vs. Humidity Plot
```
plt.scatter(weather_df["Lat"],weather_df["Humidity"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Humidity (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Humidity Plot.png")
plt.show()
```
The Above graph shows that humidity can be high in northern and southern hemisphere both .
There is no coorelation between latitude and humidity
## Latitude vs. Cloudiness Plot
```
plt.scatter(weather_df["Lat"],weather_df["Cloudiness"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Cloudiness (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Cloudiness Plot.png")
plt.show()
```
Cloudiness is more in northern hemisphere than southern hemisphere as per above graph
## Latitude vs. Wind Speed Plot
```
plt.scatter(weather_df["Lat"],weather_df["Wind Speed"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Wind Speed (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Wind Speed Plot.png")
plt.show()
```
Windspeed rate is more in northern hemisphere than southen hemisphere
```
Northern_Hemisphere = weather_df.loc[(weather_df["Lat"] >= 0), :]
Northern_Hemisphere
southern_Hemisphere = weather_df.loc[(weather_df["Lat"] < 0), :]
southern_Hemisphere
```
## Linear Regression
```
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Max Temp']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Max Temp vs. Latitude Linear Regression.png")
plt.show()
```
The above graph shows negative correlation between latitude and Max Temp
Graph depicts that when latitude is high , Max temp tends to decrease
#### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
```
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Max Temp']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Max Temp vs. Latitude Linear Regression.png")
plt.show()
```
The above graph shows that there is positive coorelation between latitude and Max Temp
It depicts that when latitude is closer to 0 then Max Temp increases
#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
```
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression.png")
plt.show()
```
The above graph shows negative correlation between latitude and Humidity
Graph depicts that when latitude is high , Max temp tends to decrease
#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
```
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression.png")
plt.show()
```
There is no correlation between latitiude and humidity
#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
```
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Cloudiness']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression.png")
plt.show()
```
Graph depicts there is no coorelation between latitude and cloudiness
Latitude increases and cloudiness tends to increase and decrease both
#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
```
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Cloudiness']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression.png")
plt.show()
```
Graph depicts there is no coorelation between latitude and cloudiness in southern hemisphere
#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
```
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Wind Speed']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression.png")
plt.show()
```
Graph depicts there is no coorelation between latitude and cloudiness in northern hemisphere
#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
```
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Wind Speed']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression.png")
plt.show()
```
#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
Graph depicts there is no coorelation between latitude and cloudiness in southern hemisphere
|
github_jupyter
|
pip install citipy
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# # Output File (CSV)
# output_data_file = "output_data/cities.csv"
# # Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
Selectcities =[]
Latitude=[]
Longitude=[]
Max_Temp=[]
SaveHumidity=[]
Cloudiness=[]
Wind_Speed=[]
SaveCountry=[]
SaveDate=[]
# Save config information.
url = "http://api.openweathermap.org/data/2.5/weather?"
units = "metric"
# Build partial query URL
query_url = f"{url}appid={weather_api_key}&units={units}&q="
print("Beginning Data Retrieval")
print("--------------------------------")
#Initialize count for record and set
counter = 0
set_record = 1
# Loop through the list of cities and perform a request for data on each
for city in cities:
try:
response = requests.get(query_url + city.replace(" ","&")).json()
Latitude.append(response['coord']['lat'])
Longitude.append(response['coord']['lon'])
Max_Temp.append(response['main']['temp_max'])
SaveHumidity.append(response['main']['humidity'])
Cloudiness.append(response['clouds']['all'])
Wind_Speed.append(response['wind']['speed'])
SaveCountry.append(response['sys']['country'])
SaveDate.append(response['dt'])
Selectcities.append(city)
if counter > 48:
counter = 0
set_record += 1
else:
counter += 1
print(f"Processing Record {counter} of Set {set_record} | {city}")
except Exception:
print("City not found. Skipping...")
print("------------------------------\nData Retrieval Complete\n------------------------------")
# Import cities file as DataFrame
details = {
"City":Selectcities,
"Lat":Latitude,
"Lng":Longitude,
"Max Temp":Max_Temp,
"Humidity":SaveHumidity,
"Cloudiness":Cloudiness,
"Wind Speed":Wind_Speed,
"Country":SaveCountry,
"Date":SaveDate
}
weather_df = pd.DataFrame(details)
weather_df.head()
weather_df.to_csv("../output_data/cities.csv",index=False)
weather_df.count()
weather_df.describe()
plt.scatter(weather_df["Lat"],weather_df["Max Temp"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Max Temperature (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature(F)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Temperature Plot.png")
plt.show()
plt.scatter(weather_df["Lat"],weather_df["Humidity"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Humidity (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Humidity (%)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Humidity Plot.png")
plt.show()
plt.scatter(weather_df["Lat"],weather_df["Cloudiness"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Cloudiness (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness (%)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Cloudiness Plot.png")
plt.show()
plt.scatter(weather_df["Lat"],weather_df["Wind Speed"],edgecolors="black",facecolors="skyblue")
plt.title("City Latitude vs. Wind Speed (04/01/20)")
plt.xlabel("Latitude")
plt.ylabel("Wind Speed (mph)")
plt.grid (b=True,which="major",axis="both",linestyle="-",color="lightgrey")
plt.savefig("../output_data/Latitude vs. Wind Speed Plot.png")
plt.show()
Northern_Hemisphere = weather_df.loc[(weather_df["Lat"] >= 0), :]
Northern_Hemisphere
southern_Hemisphere = weather_df.loc[(weather_df["Lat"] < 0), :]
southern_Hemisphere
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Max Temp']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Max Temp vs. Latitude Linear Regression.png")
plt.show()
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Max Temp']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Max Temp')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Max Temp vs. Latitude Linear Regression.png")
plt.show()
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression.png")
plt.show()
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Humidity')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression.png")
plt.show()
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Cloudiness']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression.png")
plt.show()
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Cloudiness']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Cloudiness')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression.png")
plt.show()
x_values = Northern_Hemisphere['Lat']
y_values = Northern_Hemisphere['Wind Speed']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression.png")
plt.show()
x_values = southern_Hemisphere['Lat']
y_values = southern_Hemisphere['Wind Speed']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values,y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Latitude')
plt.ylabel('Wind Speed')
print(f"The r-squared is: {rvalue**2}")
plt.savefig("../output_data/Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression.png")
plt.show()
| 0.42668 | 0.797399 |
# Revisão de Álgebra Linear
```
import numpy as np
from numpy.random import randn
```
## Matrizes
$$ A = \begin{bmatrix} 123, & 343, & 100\\
33, & 0, & -50 \end{bmatrix} $$
```
A = np.array([[123, 343, 100],
[ 33, 0, -50]])
print (A )
print (A.shape )
print (A.shape[0] )
print (A.shape[1] )
B = np.array([[5, 3, 2, 1, 4],
[0, 2, 1, 3, 8]])
print (B )
print (B.shape )
print (B.shape[0] )
print (B.shape[1] )
```
$$ A = \begin{bmatrix} 123, & 343, & 100\\
33, & 0, & -50 \end{bmatrix} =
\begin{bmatrix} a_{0,0}, & a_{0,1}, & a_{0,2}\\
a_{1,0}, & a_{1,1}, & a_{1,2} \end{bmatrix} $$
$$ a_{i,j} $$ é elemento da i-ésima linha e j-ésima coluna
Em NumPy, para matriz de duas dimensões, a primeira dimensão é o número de linhas `shape[0]` e
a segunda dimensão é o número de colunas, `shape[1]`.
O primeiro índice `i` de `A[i,j]`, é o índice das linhas e o segundo índice `j`, é o índice
das colunas.
```
print ('A=\n', A )
for i in range(A.shape[0]):
for j in range(A.shape[1]):
print ('A[%d,%d] = %d' % (i,j, A[i,j]) )
```
## Matriz vetor
Um vetor coluna é uma matriz de duas dimensões, porém com apenas uma coluna, tem o shape `(n,1)`, isto é, tem `n` linhas e `1` coluna.
```
B = np.array([[3],
[5]])
print ('B=\n', B )
print ('B.shape:', B.shape )
```
## Adição de matrizes
$$ C = A + B $$
$$ c_{i,j} = a_{i,j} + b_{i,j} $$ para todo os elementos de $A$, $B$ e $C$.
É importante que as dimensões destas três matrizes sejam iguais.
```
A = (10*randn(2,3)).astype(int)
B = randn(2,3)
C = A + B
print ('A=\n',A )
print ('B=\n',B )
print ('C=\n',C )
```
## Multiplicação de matrizes
### Multiplicação matriz e escalar
$$ \beta A = \begin{bmatrix} \beta a_{0,0} & \beta a_{0,1} & \ldots & a_{0,m-1}\\
\beta a_{1,0} & \beta a_{1,1} & \ldots & a_{1,m-1} \\
\vdots & \vdots & \vdots & vdots \\
\beta a_{n-1,0} & \beta a_{n1,1} & \ldots & a_{n-1,m-1}
\end{bmatrix} $$
```
print ('A=\n', A )
print()
print ('4 * A=\n', 4 * A )
```
### Multiplicação de matrizes
$$ C_{(3,4)} = A_{(3,2)} B_{(2,4)} = \begin{bmatrix}
a_{0,0} & a_{0,1}\\
a_{1,0} & a_{1,1}\\
a_{2,0} & a_{2,1}\\
\end{bmatrix} \begin{bmatrix}
b_{0,0} & b_{0,1} & b_{0,2} & b_{0,3} \\
b_{1,0} & b_{1,1} & b_{1,2} & b_{1,3} \\
\end{bmatrix} $$
$$ C_{(3,4)} = \begin{bmatrix}
a_{0,0} b_{0,0} + a_{0,1} b_{1,0} & a_{0,0} b_{0,1} + a_{0,1} b_{1,1} & a_{0,0} b_{0,2} + a_{0,1} b_{1,2} & a_{0,0} b_{0,3} + a_{0,1} b_{1,3} \\
a_{1,0} b_{0,0} + a_{1,1} b_{1,0} & a_{1,0} b_{0,1} + a_{1,1} b_{1,1} & a_{1,0} b_{0,2} + a_{1,1} b_{1,2} & a_{1,0} b_{0,3} + a_{1,1} b_{1,3} \\
a_{2,0} b_{0,0} + a_{2,1} b_{1,0} & a_{2,0} b_{0,1} + a_{2,1} b_{1,1} & a_{2,0} b_{0,2} + a_{2,1} b_{1,2} & a_{2,0} b_{0,3} + a_{2,1} b_{1,3} \\
\end{bmatrix}
$$
|
github_jupyter
|
import numpy as np
from numpy.random import randn
A = np.array([[123, 343, 100],
[ 33, 0, -50]])
print (A )
print (A.shape )
print (A.shape[0] )
print (A.shape[1] )
B = np.array([[5, 3, 2, 1, 4],
[0, 2, 1, 3, 8]])
print (B )
print (B.shape )
print (B.shape[0] )
print (B.shape[1] )
print ('A=\n', A )
for i in range(A.shape[0]):
for j in range(A.shape[1]):
print ('A[%d,%d] = %d' % (i,j, A[i,j]) )
B = np.array([[3],
[5]])
print ('B=\n', B )
print ('B.shape:', B.shape )
A = (10*randn(2,3)).astype(int)
B = randn(2,3)
C = A + B
print ('A=\n',A )
print ('B=\n',B )
print ('C=\n',C )
print ('A=\n', A )
print()
print ('4 * A=\n', 4 * A )
| 0.124319 | 0.962179 |
```
import os
import pandas as pd
import numpy as np
from _utils.clean import normalize, normalize_frame, exclude_3sd
derivs_dir = os.path.join('..','derivatives')
scales_dir = os.path.join(derivs_dir,'qualtrics','2.subscaled')
output_dir = os.path.join(derivs_dir,'05.subject-level')
try:os.mkdir(output_dir)
except WindowsError as e: print(e)
```
# Import data
```
fname=os.path.join(derivs_dir,'sub-all_task-all_VALUES.xlsx')
behav_data = pd.read_excel(fname).rename(columns={'subjnum':'ssid'})
fname=os.path.join(scales_dir,'all_subscales.csv')
scale_data = pd.read_csv(fname)
behav_data.head()
scale_data.head()
```
# Normalize PANAS and BISBAS subscales
```
keys_to_normalize = [
'PAS','NAS','BIS','BAS_fs','BAS_rr','BAS_dr','DMS_i','DMS_r','DMS_d','DMS_s','DMS_a',
]
df = scale_data.copy(deep=True)
df.columns=[
'ssid','zip','sleep','stress','fin_dif','PAS','NAS','BAS_dr','BAS_fs','BAS_rr','BIS',
'DMS_i','DMS_r','DMS_d','DMS_s','DMS_a','fin_lit'
]
df['study'] = df['ssid'].astype(str).str[0]
z_keys=[]
for key in keys_to_normalize:
z_key = 'z_'+ key
z_keys.append(z_key)
df[z_key] = df.apply(normalize,axis=1)
df.head()
columns = z_keys + ['ssid']
norms = df[columns]
norms.head()
```
### Natural Logarithm of subscales
We take the natural log of each subscale's RAW score.
*NOT* their normalized score, because we can't take the log of a negative.
```
ln_keys = ['ln_'+key for key in keys_to_normalize]
df[ln_keys] = df[keys_to_normalize].apply(np.log,axis=1)
df.head()
logs = df[ln_keys + ['ssid']]
logs.head()
```
# 3sd trial exclusions
```
behav_data.columns
df=behav_data[['ssid','block','trial','domain','estimation','trueprob-norm','waschoiceoptimal','val-estdiff-valid']].rename(
columns={'val-estdiff-valid':'val_estdiff_valid'}
)
subj_3sd = df.groupby('ssid').std()['val_estdiff_valid'] * 3
subj_3sd.head()
subj_means = df.groupby('ssid').mean()['val_estdiff_valid']
subj_means.head()
df['valError_3sd'] = df.apply(exclude_3sd,axis=1)
df.head()
subj_means = df.groupby('ssid').mean()['valError_3sd']
subj_means = pd.DataFrame(subj_means).reset_index()
subj_means.head()
```
# gender-judgment trial exclusions
# subject-level means
```
optimal_choice_freq = df.groupby('ssid').mean().reset_index()[['ssid','waschoiceoptimal']]
optimal_choice_freq.head()
domain_means = df.groupby(['ssid','domain']).mean().reset_index()
domain_means.head()
gain_ave_val_error = domain_means[domain_means['domain'] == 'GAIN'][['ssid','valError_3sd']]
gain_ave_val_error = gain_ave_val_error.set_index('ssid')
loss_ave_val_error = domain_means[domain_means['domain'] == 'LOSS'][['ssid','valError_3sd']]
loss_ave_val_error = loss_ave_val_error.set_index('ssid')
```
### Framing Normalization
We want to normalize for the way the value estimation question is framed.
We're going to multiply valError means by `1` for subjects who were estimating the probability that the stock is *good*, and multiply means by `-1` for subjects who were estimating the probability that the stock is *bad*.
100s: `* 1`
200s: `* -1`
300s: `* 1`
```
means_df = df.groupby('ssid').mean()[['valError_3sd']]
means_df[85:91]
nf_valerror = pd.DataFrame(means_df.apply(normalize_frame,axis=1))
nf_valerror = nf_valerror.rename(columns={0:'nf_valError'})
nf_valerror[85:91]
gain_ave_val_error[85:91]
nf_valerr_gain = pd.DataFrame(gain_ave_val_error.apply(normalize_frame,axis=1))
nf_valerr_gain = nf_valerr_gain.rename(columns={0:'nf_gainValError'})
nf_valerr_gain[85:91]
loss_ave_val_error[85:91]
nf_valerr_loss = pd.DataFrame(loss_ave_val_error.apply(normalize_frame,axis=1))
nf_valerr_loss = nf_valerr_loss.rename(columns={0:'nf_lossValError'})
nf_valerr_loss[85:91]
```
# output
```
output = pd.DataFrame({
'ssid':list(subj_means['ssid']),
'valError':list(subj_means['valError_3sd']),
})
output.head()
output = output.merge(optimal_choice_freq).rename(columns={'waschoiceoptimal':'optimal_choice_freq'})
output.head()
output = output.merge(gain_ave_val_error.rename(columns={'valError_3sd':'gainValError'}).reset_index())
output.head()
output = output.merge(loss_ave_val_error.rename(columns={'valError_3sd':'lossValError'}).reset_index())
output.head()
output = output.merge(nf_valerror.reset_index())
output = output.merge(nf_valerr_gain.reset_index())
output = output.merge(nf_valerr_loss.reset_index())
output[85:91]
output['valWedge'] = abs(output['gainValError'] - output['lossValError'])
output['nf_valWedge'] = abs(output['nf_gainValError'] - output['nf_lossValError'])
output.head()
output = output.merge(norms)
output.head()
output = output.merge(logs)
output.head()
fname=os.path.join(output_dir,'subject-level.csv')
#columns = ['ssid','valError','gainValError','lossValError','valWedge'] + zkeys
output.to_csv(fname,index=False)
```
|
github_jupyter
|
import os
import pandas as pd
import numpy as np
from _utils.clean import normalize, normalize_frame, exclude_3sd
derivs_dir = os.path.join('..','derivatives')
scales_dir = os.path.join(derivs_dir,'qualtrics','2.subscaled')
output_dir = os.path.join(derivs_dir,'05.subject-level')
try:os.mkdir(output_dir)
except WindowsError as e: print(e)
fname=os.path.join(derivs_dir,'sub-all_task-all_VALUES.xlsx')
behav_data = pd.read_excel(fname).rename(columns={'subjnum':'ssid'})
fname=os.path.join(scales_dir,'all_subscales.csv')
scale_data = pd.read_csv(fname)
behav_data.head()
scale_data.head()
keys_to_normalize = [
'PAS','NAS','BIS','BAS_fs','BAS_rr','BAS_dr','DMS_i','DMS_r','DMS_d','DMS_s','DMS_a',
]
df = scale_data.copy(deep=True)
df.columns=[
'ssid','zip','sleep','stress','fin_dif','PAS','NAS','BAS_dr','BAS_fs','BAS_rr','BIS',
'DMS_i','DMS_r','DMS_d','DMS_s','DMS_a','fin_lit'
]
df['study'] = df['ssid'].astype(str).str[0]
z_keys=[]
for key in keys_to_normalize:
z_key = 'z_'+ key
z_keys.append(z_key)
df[z_key] = df.apply(normalize,axis=1)
df.head()
columns = z_keys + ['ssid']
norms = df[columns]
norms.head()
ln_keys = ['ln_'+key for key in keys_to_normalize]
df[ln_keys] = df[keys_to_normalize].apply(np.log,axis=1)
df.head()
logs = df[ln_keys + ['ssid']]
logs.head()
behav_data.columns
df=behav_data[['ssid','block','trial','domain','estimation','trueprob-norm','waschoiceoptimal','val-estdiff-valid']].rename(
columns={'val-estdiff-valid':'val_estdiff_valid'}
)
subj_3sd = df.groupby('ssid').std()['val_estdiff_valid'] * 3
subj_3sd.head()
subj_means = df.groupby('ssid').mean()['val_estdiff_valid']
subj_means.head()
df['valError_3sd'] = df.apply(exclude_3sd,axis=1)
df.head()
subj_means = df.groupby('ssid').mean()['valError_3sd']
subj_means = pd.DataFrame(subj_means).reset_index()
subj_means.head()
optimal_choice_freq = df.groupby('ssid').mean().reset_index()[['ssid','waschoiceoptimal']]
optimal_choice_freq.head()
domain_means = df.groupby(['ssid','domain']).mean().reset_index()
domain_means.head()
gain_ave_val_error = domain_means[domain_means['domain'] == 'GAIN'][['ssid','valError_3sd']]
gain_ave_val_error = gain_ave_val_error.set_index('ssid')
loss_ave_val_error = domain_means[domain_means['domain'] == 'LOSS'][['ssid','valError_3sd']]
loss_ave_val_error = loss_ave_val_error.set_index('ssid')
means_df = df.groupby('ssid').mean()[['valError_3sd']]
means_df[85:91]
nf_valerror = pd.DataFrame(means_df.apply(normalize_frame,axis=1))
nf_valerror = nf_valerror.rename(columns={0:'nf_valError'})
nf_valerror[85:91]
gain_ave_val_error[85:91]
nf_valerr_gain = pd.DataFrame(gain_ave_val_error.apply(normalize_frame,axis=1))
nf_valerr_gain = nf_valerr_gain.rename(columns={0:'nf_gainValError'})
nf_valerr_gain[85:91]
loss_ave_val_error[85:91]
nf_valerr_loss = pd.DataFrame(loss_ave_val_error.apply(normalize_frame,axis=1))
nf_valerr_loss = nf_valerr_loss.rename(columns={0:'nf_lossValError'})
nf_valerr_loss[85:91]
output = pd.DataFrame({
'ssid':list(subj_means['ssid']),
'valError':list(subj_means['valError_3sd']),
})
output.head()
output = output.merge(optimal_choice_freq).rename(columns={'waschoiceoptimal':'optimal_choice_freq'})
output.head()
output = output.merge(gain_ave_val_error.rename(columns={'valError_3sd':'gainValError'}).reset_index())
output.head()
output = output.merge(loss_ave_val_error.rename(columns={'valError_3sd':'lossValError'}).reset_index())
output.head()
output = output.merge(nf_valerror.reset_index())
output = output.merge(nf_valerr_gain.reset_index())
output = output.merge(nf_valerr_loss.reset_index())
output[85:91]
output['valWedge'] = abs(output['gainValError'] - output['lossValError'])
output['nf_valWedge'] = abs(output['nf_gainValError'] - output['nf_lossValError'])
output.head()
output = output.merge(norms)
output.head()
output = output.merge(logs)
output.head()
fname=os.path.join(output_dir,'subject-level.csv')
#columns = ['ssid','valError','gainValError','lossValError','valWedge'] + zkeys
output.to_csv(fname,index=False)
| 0.171373 | 0.550728 |
# Set -1: Dictionary:
### Q1. What are the ways of creating an empty dictionary?
```
#dicionary
#two ways of creating an empty dictionary
d = {}
dd = dict()
```
### Q2. Here is a dictionary: d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}. Use indexing and slicing method to obtain '2' from key 4?
```
d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}
d['k4'][1]
```
### Q3. Here is a dictionary: d = {'k1':'v1','k2':'v2','k3':'v3','k4':'v4'}. Change the value of the key 3 to 'zzzzzz'.
```
d = {'k1':'v1','k2':'v2','k3':'v3','k4':'v4'}
d['k3']
for i in d:
if i=='k3':
d[i] = 'zzzzzz'
print(d['k3'])
print(d)
```
### Q4. Can a dictionary be converted to a set?
### Yes
```
d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}
se = set(d)
print(se)
```
# Set -2: List:
### Q5. Given a list: a = [6,7,2,5,3,9,7,11,45,31], sort the elements in the descending order
```
a = [6,7,2,5,3,9,7,11,45,31]
a.sort()
b = a.copy()
b.sort(reverse = True)
print(b)
```
### Q6. Count the frequency of the number 3 in the given list: a = [1,2,2,3,3,3,4,4,4,4]
```
a = [1,2,2,3,3,3,4,4,4,4]
a.count(3)
```
### Q7. Can a nested list be converted to a set?
### No.
```
a = [[23,125,32],232,32,56,[325,52],64,[34,43,12,23]]
se = set(a)
print(se)
```
# Set -3: Sets:
### Q8. Convert a list to a set, explain any differences between the resulting set and the list.
```
l = [1,2,2,3,3,3,4,4,4,4]
s = set(l)
print(s)
```
Sets remove duplicate elements, while in a list, duplicates don't matter
### Q9. Show the method of discard()
```
s = {1,2,3,4,5}
s.discard(6)
print(s)
s.discard(4)
print(s)
```
### Q10. Show a set comprehension for the expression: x**2 for a set s={1,2,3,4,5}
```
s1 = {1,2,34,5}
print(set([t**2 for t in s1]))
```
# Set -4: Strings:
### Q11. Take a str = "DatA SCienCe", give the result in lowercase.
```
str = "DatA SCienCe"
str.lower()
```
### Q12. Swap the case of the str = "DatA SCienCe"
```
str = "DatA SCienCe"
str.swapcase()
```
# Set -5: Tuples:
### Q13. Give an instance that a tuple is an immutable data type.
```
t = (1,2,3)
print(t[0])
t[0] = 4
```
As in the above example, one cannot change the elements once defined
### Q14. You have a nested tuple: tu = (1,(2,2),(3,3,3),4,5,6), devise a method to get the result: ((3, 3, 3), 4, 5, 6)
```
tu = (1,(2,2),(3,3,3),4,5,6)
tu[2::]
```
### Q15. Convert a nested tuple to a set:
```
tu = ((23,125,32),232,32,56,(325,52),64,(34,43,12,23))
se = set(tu)
print(se)
```
# Set -6:Functions:
### Q16. Given a list: li = [-1,0,1,-2,3,-4,5,-6,7,-8], filter the negative and positive integers from it.
```
li = [-1,0,1,-2,3,-4,5,-6,7,-8]
neg = []
pos = []
neg = list(filter(lambda x:x<0,li))
pos = list(filter(lambda x:x>=0,li))
print(neg)
print(pos)
```
### Q17. Give a lambda function to list prime numbers upto 100:
```
primelist = [2]
prime = lambda x,y:x%y!=0
flag = 0;
for i in range(1,100):
for j in range(2,i):
if(prime(i,j)):
flag+=1
else:
flag = 0
break
if flag>0:
primelist.append(i)
print(primelist)
```
### Q18. Add individual elements of two lists using map function:
```
a = [1,2,3,4,5]
b = [6,7,8,9]
print(list(map(lambda x,y:x+y,a,b)))
```
### Q19. Find the sum of all the elements in a given list:
```
from functools import reduce
li = [1,2,3,4,5,6,7,8,9,10]
reduce(lambda x,y:x+y,li)
```
### Q20. Find the cube of a number using the concept of the generators:
|
github_jupyter
|
#dicionary
#two ways of creating an empty dictionary
d = {}
dd = dict()
d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}
d['k4'][1]
d = {'k1':'v1','k2':'v2','k3':'v3','k4':'v4'}
d['k3']
for i in d:
if i=='k3':
d[i] = 'zzzzzz'
print(d['k3'])
print(d)
d = {'k1':123,'k2':'123','k3':[1,2,3],'k4':['1','2','3'],'k5':(1,2,3)}
se = set(d)
print(se)
a = [6,7,2,5,3,9,7,11,45,31]
a.sort()
b = a.copy()
b.sort(reverse = True)
print(b)
a = [1,2,2,3,3,3,4,4,4,4]
a.count(3)
a = [[23,125,32],232,32,56,[325,52],64,[34,43,12,23]]
se = set(a)
print(se)
l = [1,2,2,3,3,3,4,4,4,4]
s = set(l)
print(s)
s = {1,2,3,4,5}
s.discard(6)
print(s)
s.discard(4)
print(s)
s1 = {1,2,34,5}
print(set([t**2 for t in s1]))
str = "DatA SCienCe"
str.lower()
str = "DatA SCienCe"
str.swapcase()
t = (1,2,3)
print(t[0])
t[0] = 4
tu = (1,(2,2),(3,3,3),4,5,6)
tu[2::]
tu = ((23,125,32),232,32,56,(325,52),64,(34,43,12,23))
se = set(tu)
print(se)
li = [-1,0,1,-2,3,-4,5,-6,7,-8]
neg = []
pos = []
neg = list(filter(lambda x:x<0,li))
pos = list(filter(lambda x:x>=0,li))
print(neg)
print(pos)
primelist = [2]
prime = lambda x,y:x%y!=0
flag = 0;
for i in range(1,100):
for j in range(2,i):
if(prime(i,j)):
flag+=1
else:
flag = 0
break
if flag>0:
primelist.append(i)
print(primelist)
a = [1,2,3,4,5]
b = [6,7,8,9]
print(list(map(lambda x,y:x+y,a,b)))
from functools import reduce
li = [1,2,3,4,5,6,7,8,9,10]
reduce(lambda x,y:x+y,li)
| 0.060557 | 0.952486 |
# WeatherPy
----
#### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
```
## Generate Cities List
```
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(low=-90.000, high=90.000, size=1500)
lngs = np.random.uniform(low=-180.000, high=180.000, size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
```
### Perform API Calls
* Perform a weather check on each city using a series of successive API calls.
* Include a print log of each city as it'sbeing processed (with the city number and city name).
```
url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + weather_api_key
city_name = []
cloudiness = []
country = []
date = []
humidity = []
lat = []
lng = []
max_temp = []
wind_speed = []
# Start the call counter
record = 1
x =1
# Log file print statement
print(f"Beginning Data Retrieval")
print(f"-------------------------------")
#Loop through the cities in the city list
for city in cities:
# Try statement to append calls where value is found
# Not all calls return data as OpenWeatherMap will not have have records in all the cities generated by CityPy module
try:
response = requests.get(f"{url}&q={city}").json()
city_name.append(response["name"])
cloudiness.append(response["clouds"]["all"])
country.append(response["sys"]["country"])
date.append(response["dt"])
humidity.append(response["main"]["humidity"])
max_temp.append(response["main"]["temp_max"])
lat.append(response["coord"]["lat"])
lng.append(response["coord"]["lon"])
wind_speed.append(response["wind"]["speed"])
city_record = response["name"]
print(f"Processing Record {record} of set {x} | {city_record}")
#print(f"{url}&q={city}")
# Increase counter by one
record= record + 1
# Wait a second in loop to not over exceed rate limit of API
time.sleep(1.01)
# If no record found "skip" to next call
except:
print("City not found. Skipping...")
continue
print("------------------------------\nData Retrieval Complete\n------------------------------")
print(response)
```
### Convert Raw Data to DataFrame
* Export the city data into a .csv.
* Display the DataFrame
```
weather_dict = {
"City_ID": city_name,
"Cloudiness":cloudiness,
"Country":country,
"Date":date,
"Humidity": humidity,
"Lat":lat,
"Lng":lng,
"Max Temp": max_temp,
"Wind Speed":wind_speed
}
weather_data = pd.DataFrame(weather_dict)
weather_data
```
### Plotting the Data
* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
* Save the plotted figures as .pngs.
#### Latitude vs. Temperature Plot
```
plt.scatter(weather_data["Lat"],weather_data["Max Temp"], marker= "o")
plt.title("City Latitude vs. Max Temperature(05/28/19)")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature(F)")
plt.grid(True)
plt.savefig("City Latitude vs. Max Temperature.png")
plt.show()
```
Analysis The graph is showing relationship between city latitude and max temperature.There is no dramatic increase from latitude -60 to 0, but it continously drops as latitude goes north. cities with highest max temperature are around latitude 0.
#### Latitude vs. Humidity Plot
```
plt.scatter(weather_data["Lat"],weather_data["Humidity"], marker= "o")
plt.title("City Latitude vs. Humidity(05/28/19)")
plt.xlabel("Latitude")
plt.ylabel("Humidity(%)")
plt.grid(True)
plt.savefig("City Latitude vs. Humidity.png")
plt.show()
```
Analysis The graph is showing relationship between city latitude and Humidity.Cities with highest max humidity are around latitude 60.
#### Latitude vs. Cloudiness Plot
```
plt.scatter(weather_data["Lat"],weather_data["Cloudiness"], marker= "o")
plt.title("City Latitude vs. Cloudiness(08/22/19)")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness(%)")
plt.grid(True)
plt.savefig("City Latitude vs. Cloudiness.png")
plt.show()
```
Analysis The graph is showing relationship between city latitude and Cloudiness.Cities with highest max cloudiness are between latitude 40 to 70.
#### Latitude vs. Wind Speed Plot
```
plt.scatter(weather_data["Lat"],weather_data["Wind Speed"], marker= "o")
plt.title("City Latitude vs. Wind Speed(05/28/19)")
plt.xlabel("Latitude")
plt.ylabel("WInd Speed(mph)")
plt.grid(True)
plt.savefig("City Latitude vs. Wind Speed.png")
plt.show()
```
Analysis The graph is showing relationship between city latitude and Wind speed.Cities with highest max cloudiness are between latitude 40 to 70.
## Linear Regression
```
# OPTIONAL: Create a function to create Linear Regression plots
# Create Northern and Southern Hemisphere DataFrames
```
#### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
|
github_jupyter
|
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(low=-90.000, high=90.000, size=1500)
lngs = np.random.uniform(low=-180.000, high=180.000, size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
url = "http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + weather_api_key
city_name = []
cloudiness = []
country = []
date = []
humidity = []
lat = []
lng = []
max_temp = []
wind_speed = []
# Start the call counter
record = 1
x =1
# Log file print statement
print(f"Beginning Data Retrieval")
print(f"-------------------------------")
#Loop through the cities in the city list
for city in cities:
# Try statement to append calls where value is found
# Not all calls return data as OpenWeatherMap will not have have records in all the cities generated by CityPy module
try:
response = requests.get(f"{url}&q={city}").json()
city_name.append(response["name"])
cloudiness.append(response["clouds"]["all"])
country.append(response["sys"]["country"])
date.append(response["dt"])
humidity.append(response["main"]["humidity"])
max_temp.append(response["main"]["temp_max"])
lat.append(response["coord"]["lat"])
lng.append(response["coord"]["lon"])
wind_speed.append(response["wind"]["speed"])
city_record = response["name"]
print(f"Processing Record {record} of set {x} | {city_record}")
#print(f"{url}&q={city}")
# Increase counter by one
record= record + 1
# Wait a second in loop to not over exceed rate limit of API
time.sleep(1.01)
# If no record found "skip" to next call
except:
print("City not found. Skipping...")
continue
print("------------------------------\nData Retrieval Complete\n------------------------------")
print(response)
weather_dict = {
"City_ID": city_name,
"Cloudiness":cloudiness,
"Country":country,
"Date":date,
"Humidity": humidity,
"Lat":lat,
"Lng":lng,
"Max Temp": max_temp,
"Wind Speed":wind_speed
}
weather_data = pd.DataFrame(weather_dict)
weather_data
plt.scatter(weather_data["Lat"],weather_data["Max Temp"], marker= "o")
plt.title("City Latitude vs. Max Temperature(05/28/19)")
plt.xlabel("Latitude")
plt.ylabel("Max Temperature(F)")
plt.grid(True)
plt.savefig("City Latitude vs. Max Temperature.png")
plt.show()
plt.scatter(weather_data["Lat"],weather_data["Humidity"], marker= "o")
plt.title("City Latitude vs. Humidity(05/28/19)")
plt.xlabel("Latitude")
plt.ylabel("Humidity(%)")
plt.grid(True)
plt.savefig("City Latitude vs. Humidity.png")
plt.show()
plt.scatter(weather_data["Lat"],weather_data["Cloudiness"], marker= "o")
plt.title("City Latitude vs. Cloudiness(08/22/19)")
plt.xlabel("Latitude")
plt.ylabel("Cloudiness(%)")
plt.grid(True)
plt.savefig("City Latitude vs. Cloudiness.png")
plt.show()
plt.scatter(weather_data["Lat"],weather_data["Wind Speed"], marker= "o")
plt.title("City Latitude vs. Wind Speed(05/28/19)")
plt.xlabel("Latitude")
plt.ylabel("WInd Speed(mph)")
plt.grid(True)
plt.savefig("City Latitude vs. Wind Speed.png")
plt.show()
# OPTIONAL: Create a function to create Linear Regression plots
# Create Northern and Southern Hemisphere DataFrames
| 0.31563 | 0.782164 |
# Interactive experimentation
```
# !pip install --upgrade lightgbm scikit-learn pandas adlfs
```
## Setup cloud tracking
```
import mlflow
from azureml.core import Workspace
ws = Workspace.from_config()
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
mlflow.set_experiment("explain-ml-exp")
```
## Load data
You can read directly from public URIs into Pandas. For private Blob or ADLS data, consider using [adlfs](https://github.com/dask/adlfs).
```
data_uri = "https://azuremlexamples.blob.core.windows.net/datasets/iris.csv"
import pandas as pd
df = pd.read_csv(data_uri)
df.head()
```
## Define functions
```
# imports
import time
import lightgbm as lgb
from sklearn.metrics import log_loss, accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
# define functions
def preprocess_data(df):
X = df.drop(["species"], axis=1)
y = df["species"]
enc = LabelEncoder()
y = enc.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
return X_train, X_test, y_train, y_test, enc
def train_model(params, num_boost_round, X_train, X_test, y_train, y_test):
t1 = time.time()
train_data = lgb.Dataset(X_train, label=y_train)
test_data = lgb.Dataset(X_test, label=y_test)
model = lgb.train(
params,
train_data,
num_boost_round=num_boost_round,
valid_sets=[test_data],
valid_names=["test"],
)
t2 = time.time()
return model, t2 - t1
def evaluate_model(model, X_test, y_test):
y_proba = model.predict(X_test)
y_pred = y_proba.argmax(axis=1)
loss = log_loss(y_test, y_proba)
acc = accuracy_score(y_test, y_pred)
return loss, acc
```
## Run a trial
```
# preprocess data
X_train, X_test, y_train, y_test, enc = preprocess_data(df)
# set training parameters
params = {
# "objective": "multiclass",
"objective": "multiclass_ova",
"num_class": 3,
"learning_rate": 0.1,
"metric": "multi_logloss",
"colsample_bytree": 1.0,
"subsample": 1.0,
"seed": 42,
}
num_boost_round = 32
# start run
run = mlflow.start_run()
# enable automatic logging
mlflow.lightgbm.autolog()
# train model
model, train_time = train_model(
params, num_boost_round, X_train, X_test, y_train, y_test
)
mlflow.log_metric("training_time", train_time)
# evaluate model
loss, acc = evaluate_model(model, X_test, y_test)
mlflow.log_metrics({"loss": loss, "accuracy": acc})
# end run
mlflow.end_run()
```
## LGBM's own explainability features
```
print(model.feature_name())
print(model.feature_importance())
lgb.plot_importance(model)
import matplotlib
lgb.plot_split_value_histogram(model, 0)
lgb.plot_split_value_histogram(model, 1)
lgb.plot_split_value_histogram(model, 2)
lgb.plot_split_value_histogram(model, 3)
import graphviz
# lgb.plot_tree(model)
lgb.create_tree_digraph(model)
model.params
# blackbox_model = lgb.LGBMClassifier(model)
# blackbox_model.get_params()
```
## Explain (any blackbox models)
Following steps will require `predict_proba()` so the easiest way would be using `scikit-learn` `Pipeline` to fit the model.
For demo purposes, let's fit the model using Pipeline again.
### Train with scikit-learn pipeline
```
from sklearn.pipeline import Pipeline
params = {
# "objective": "multiclass",
"objective": "multiclass_ova",
"num_class": 3,
"learning_rate": 0.1,
"metric": "multi_logloss",
"colsample_bytree": 1.0,
"subsample": 1.0,
"seed": 42,
}
lgbm_clf = lgb.LGBMClassifier(**params)
blackbox_model = Pipeline([('lgbm_clf', lgbm_clf)])
blackbox_model.fit(X_train, y_train)
blackbox_model.get_params()
```
ROC only supports binary classification, so for multiclass classification, you may want to convert it to one-vs-all.
```
from interpret import show
from interpret.perf import ROC
blackbox_perf = ROC(blackbox_model.predict_proba).explain_perf(X_test, y_test, name='Blackbox')
show(blackbox_perf)
```
Try LimeTabular. `lime` and `dash` are used.
```
from interpret.blackbox import LimeTabular
from interpret import show
#Blackbox explainers need a predict function, and optionally a dataset
lime = LimeTabular(predict_fn=blackbox_model.predict_proba, data=X_train, random_state=1)
#Pick the instances to explain, optionally pass in labels if you have them
lime_local = lime.explain_local(X_test[:5], y_test[:5], name='LIME')
show(lime_local)
```
|
github_jupyter
|
# !pip install --upgrade lightgbm scikit-learn pandas adlfs
import mlflow
from azureml.core import Workspace
ws = Workspace.from_config()
mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri())
mlflow.set_experiment("explain-ml-exp")
data_uri = "https://azuremlexamples.blob.core.windows.net/datasets/iris.csv"
import pandas as pd
df = pd.read_csv(data_uri)
df.head()
# imports
import time
import lightgbm as lgb
from sklearn.metrics import log_loss, accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
# define functions
def preprocess_data(df):
X = df.drop(["species"], axis=1)
y = df["species"]
enc = LabelEncoder()
y = enc.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
return X_train, X_test, y_train, y_test, enc
def train_model(params, num_boost_round, X_train, X_test, y_train, y_test):
t1 = time.time()
train_data = lgb.Dataset(X_train, label=y_train)
test_data = lgb.Dataset(X_test, label=y_test)
model = lgb.train(
params,
train_data,
num_boost_round=num_boost_round,
valid_sets=[test_data],
valid_names=["test"],
)
t2 = time.time()
return model, t2 - t1
def evaluate_model(model, X_test, y_test):
y_proba = model.predict(X_test)
y_pred = y_proba.argmax(axis=1)
loss = log_loss(y_test, y_proba)
acc = accuracy_score(y_test, y_pred)
return loss, acc
# preprocess data
X_train, X_test, y_train, y_test, enc = preprocess_data(df)
# set training parameters
params = {
# "objective": "multiclass",
"objective": "multiclass_ova",
"num_class": 3,
"learning_rate": 0.1,
"metric": "multi_logloss",
"colsample_bytree": 1.0,
"subsample": 1.0,
"seed": 42,
}
num_boost_round = 32
# start run
run = mlflow.start_run()
# enable automatic logging
mlflow.lightgbm.autolog()
# train model
model, train_time = train_model(
params, num_boost_round, X_train, X_test, y_train, y_test
)
mlflow.log_metric("training_time", train_time)
# evaluate model
loss, acc = evaluate_model(model, X_test, y_test)
mlflow.log_metrics({"loss": loss, "accuracy": acc})
# end run
mlflow.end_run()
print(model.feature_name())
print(model.feature_importance())
lgb.plot_importance(model)
import matplotlib
lgb.plot_split_value_histogram(model, 0)
lgb.plot_split_value_histogram(model, 1)
lgb.plot_split_value_histogram(model, 2)
lgb.plot_split_value_histogram(model, 3)
import graphviz
# lgb.plot_tree(model)
lgb.create_tree_digraph(model)
model.params
# blackbox_model = lgb.LGBMClassifier(model)
# blackbox_model.get_params()
from sklearn.pipeline import Pipeline
params = {
# "objective": "multiclass",
"objective": "multiclass_ova",
"num_class": 3,
"learning_rate": 0.1,
"metric": "multi_logloss",
"colsample_bytree": 1.0,
"subsample": 1.0,
"seed": 42,
}
lgbm_clf = lgb.LGBMClassifier(**params)
blackbox_model = Pipeline([('lgbm_clf', lgbm_clf)])
blackbox_model.fit(X_train, y_train)
blackbox_model.get_params()
from interpret import show
from interpret.perf import ROC
blackbox_perf = ROC(blackbox_model.predict_proba).explain_perf(X_test, y_test, name='Blackbox')
show(blackbox_perf)
from interpret.blackbox import LimeTabular
from interpret import show
#Blackbox explainers need a predict function, and optionally a dataset
lime = LimeTabular(predict_fn=blackbox_model.predict_proba, data=X_train, random_state=1)
#Pick the instances to explain, optionally pass in labels if you have them
lime_local = lime.explain_local(X_test[:5], y_test[:5], name='LIME')
show(lime_local)
| 0.728362 | 0.888081 |
# Antennal neural network with extrinsic (modulated) set-point (simulation)
This notebook is a continuation of the intrinsic set-point neural network model. Here, I added interneurons that also synapse onto the motor neurons, thereby controlling the set-point. The classes are more or less that same as the ones in the intrinsic set-point notebook, with modifications to allow the set-point to be controlled by the additional interneurons.
```
# Import necessary libraries
import nest
import numpy as np
import pandas as pd
from copy import deepcopy
from pprint import pprint
from collections import defaultdict
from pprint import pprint
from itertools import cycle
# Plotting libraries
%pylab inline
from bokeh.io import output_file, show, output_notebook
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from bokeh.palettes import Dark2_5 as palette
# Path libraries
from os.path import join as pathjoin
from os.path import isdir
from os import makedirs as mkdir
from os import getcwd
# Switch on notebook output
output_notebook()
```
## Bristle field neural network
A class that defines the underlying connectivity of a single bristle field. Sensory neurons underlying the bristle field are either active or inactive, defined by the cuticle position. These neurons feed onto the antennal motor neurons (along with interneurons carrying set-point), which are simply integrate and fire neurons (iaf_psc_alpha). The motor neurons control muscle firing, which in turn is a low pass (exponential moving average) filter with a time constant of mTau. The muscle firing in turn indirectly controls cuticle position, based on other antennal muscle activity.
```
class bbnn():
'''The bbnn class defines the Bohm's bristle neural network.
It initializes sensory and motor neurons, and changes its rate
based on bristle activation. It saves all the data in a dict'''
def __init__(self, sim_t, Bnum, delay, r_act,
r_inact, r_in, weight, in_weight,
mTau, CutPos):
'''
sim_t - Simulation step (default = 1 ms);
Bnum - Bristle number per field (default = 100 bristles);
delay - Synaptic delay in transmission (default = 1 ms);
r_act - Activated sensory neuron firing rate (default = 50.0);
r_inact - Inactivated sensory neuron firing rate (default = 10.0);
r_in - Interneuron firing rate;
weight - synaptic weight between sensory and motor neurons;
in_weight - synaptic weight between interneuron and motor neurons;
mTau - integration time of the muscle;
CutPos - starting cuticle (/antenna) position;
'''
# Save constants
self.sim_t = sim_t
self.Bnum = Bnum
self.weight = weight
self.delay = delay
self.in_weight = in_weight
self.mTau = mTau
# Initialize network
self.initNetwork(CutPos, r_act, r_inact, r_in)
def initNetwork(self, CutPos, r_act, r_inact, r_in):
'''Set up the network in NEST
CutPos - starting cuticle (/antenna) position;
r_act - firing rate of activated bristles;
r_inact - firing rate of inactivated bristles
r_in - firing rate of set-point interneuron'''
# Save act, inact rates
self.r_act = r_act
self.r_inact = r_inact
self.r_in = r_in
# Obtain bristle activity rate
sn_rate = self.getFieldRate(CutPos, r_act, r_inact)
# Declare Nodes
## Initialize poisson generators
self.sn_pgs = nest.Create("poisson_generator", self.Bnum, sn_rate)
self.jo = nest.Create("poisson_generator", 1,
{"rate": float(self.r_in)})
## Initialize neurons
self.sn = nest.Create("parrot_neuron", self.Bnum) # sensory neuron
self.mn = nest.Create("iaf_psc_alpha") # motor neuron
self.ms_in = nest.Create("parrot_neuron", 1) # interneuron
## Intialize spike detector
self.sn_sd = nest.Create("spike_detector", 1)
self.in_sd = nest.Create("spike_detector", 1)
self.mn_sd = nest.Create("spike_detector", 1)
# Setup Connections
## Poisson generators to parrot neurons
nest.Connect(self.sn_pgs, self.sn, 'one_to_one')
nest.Connect(self.jo, self.ms_in, 'one_to_one')
## Sensory to motor neurons
nest.Connect(self.sn, self.mn, 'all_to_all', {
"model": "static_synapse",
"weight": self.weight,
"delay": self.delay
})
nest.Connect(self.ms_in, self.mn, 'all_to_all', {
"model": "static_synapse",
"weight": self.in_weight * self.weight,
"delay": self.delay
})
## Neurons to spike detectors
nest.Connect(self.sn, self.sn_sd)
nest.Connect(self.ms_in, self.in_sd)
nest.Connect(self.mn, self.mn_sd)
nest.PrintNetwork() # Not working for some reason
# NEST network setup complete
# Muscle activity related datastructure
self.time = 0.0
self.curr_ind = 0
self.muscle_activity = []
self.muscle_activity.append((self.time, 0.0))
# Initialization complete
def getMuscleActivity(self):
''' Obtain muscle firing rate based on the simulation time '''
self.time += self.sim_t
self.curr_ind += 1
calcium_signal = exp_moving_avg(
self.time,
nest.GetStatus(self.mn_sd)[0]['events']['times'], self.mTau)
self.muscle_activity.append((self.time, calcium_signal))
return calcium_signal
def changeRates(self, CutPos):
''' Change the firing rate of the poisson generators
based on bristle activity (cuticle position)'''
# Get Rate
rate = self.getFieldRate(CutPos, self.r_act, self.r_inact)
# Set rate
nest.SetStatus(self.sn_pgs, rate)
def getFieldRate(self, CutPos, r_act, r_inact):
''' Obtain activity rate of each of the sensory neurons underneath the bristles '''
Bfield_closed = np.concatenate(
(np.zeros(self.Bnum - CutPos),
np.ones(CutPos))) # 1 to CutPos are active bristles
Bfield_open = np.concatenate(
(np.ones(self.Bnum - CutPos),
np.zeros(CutPos))) # CutPos to end are inactive bristles
# Arbitary defintion of active and inactive. Simply change r_act, r_inact to inverse it
field = ((Bfield_open * r_inact) + (Bfield_closed * r_act))
return [{"rate": m} for m in field]
def exp_moving_avg(t, spike_times, tau):
''' Find activity at time t based on tau
assuming an exponential decay of calcium
after every spike with a time-constant of tau'''
ind = len(spike_times) - 1
calcium_signal = 0
# Sum up spikes based on calcium decay
while ((ind >= 0) and (t - spike_times[ind] <= 10.0 * tau)):
calcium_signal += np.exp(-(t - spike_times[ind]) / tau)
ind -= 1
return calcium_signal
```
## Antennal neural network
A overarching class representing the antennal neural network. This class creates and connects bristle field neural network objects, two in this simulation, and updates cuticle/antennal position based on muscle activity of each of the bristle field neural network.
```
class ann():
'''Antennal neural network class.'''
def __init__(self,
CutLoc,
sim_t=10.0,
Bnum=100,
weight=50.0,
delay=1.0,
in_weight=1.0,
in_asym=1.0,
mTau=50.0,
mKp=50.0,
r_act=50.0,
r_inact=5.0,
r_in=50.0):
'''Set defaults for the antennal neural network
and initialize the network.
Inputs:
CutLoc - starting cuticle location
sim_t - Simulation step (default = 1 ms);
Bnum - Bristle number per field (default = 100 bristles);
weight - synaptic weight between sensory and motor neurons;
delay - Synaptic delay in transmission (default = 1 ms);
in_weight - synaptic weight between interneuron and motor neurons;
in_asym - asymmetry of synaptic inputs before the two motor neurons;
mTau - integration time of the muscle;
mKp - scaling factor for the difference in muscle activity
r_act - Activated sensory neuron firing rate (default = 50.0);
r_inact - Inactivated sensory neuron firing rate (default = 10.0);
r_in - Interneuron firing rate;'''
# Set defaults
self.sim_t = sim_t
self.Bnum = Bnum
self.weight = weight
self.delay = delay
self.in_weight = in_weight
self.in_asym = in_asym
# muscle properties
self.mTau = mTau
self.mKp = mKp
# neuron characteristics
self.r_act = r_act
self.r_inact = r_inact
self.r_in = r_in
# start time
self.time_start = 0
# cuticle position
self.CutLoc = CutLoc
self.cuticle_position = []
self.cuticle_position.append((self.time_start, CutLoc))
# Initialize field 1
self.field1 = bbnn(self.sim_t, self.Bnum, self.delay, self.r_act,
self.r_inact, self.r_in * self.in_asym,
self.weight, self.in_weight,
self.mTau, self.CutLoc)
# Initialize field 2 (flipped activation and inactivation rates
self.field2 = bbnn(self.sim_t, self.Bnum, self.delay, self.r_inact,
self.r_act, self.r_in / self.in_asym,
self.weight, self.in_weight,
self.mTau, self.CutLoc)
def Simulate(self, simulation_time, perturbed=False):
'''Simulate the neural network.'''
time = np.arange(self.time_start, self.time_start + simulation_time,
self.sim_t)
CutLoc = self.CutLoc
# Simulate till end of time (literally :P)
for t in time:
# Simulate all networks
nest.Simulate(self.sim_t)
# Obtain muscle activity
firing1 = self.field1.getMuscleActivity()
firing2 = self.field2.getMuscleActivity()
# Update position based on activity and perturbation flag
if not perturbed:
CutShift = np.round((firing1 - firing2) / self.mKp)
CutLoc = int(CutLoc - CutShift)
if CutLoc < 0: CutLoc = 0
if CutLoc > self.Bnum: CutLoc = self.Bnum
# Change rates
self.field1.changeRates(CutLoc)
self.field2.changeRates(CutLoc)
# Append cuticle location
self.cuticle_position.append((t + self.sim_t, CutLoc))
# Continue simulation
# Simulation done; change start time, CutLoc
self.CutLoc = CutLoc
self.time_start = self.time_start + simulation_time
# Done
def getSimulatedData(self):
"""Extract simulated data, package cleanly and send back"""
# Extract data;
sn_spikes1, mn_spikes1, in_spikes1, muscle_activity1 = self.getBristleFieldActivity(
self.field1)
sn_spikes2, mn_spikes2, in_spikes2, muscle_activity2 = self.getBristleFieldActivity(
self.field2)
# Package spike data cleanly
neuron_spikes = {
'sn1_spikes': sn_spikes1,
'mn1_spikes': mn_spikes1,
'in1_spikes': in_spikes1,
'sn2_spikes': sn_spikes2,
'mn2_spikes': mn_spikes2,
'in2_spikes': in_spikes2
}
# Package muscle data cleanly
cuticle_position = zip(*self.cuticle_position)
muscle_activity = pd.DataFrame({
'time':
cuticle_position[0],
'position':
cuticle_position[1],
'muscle1_activity':
muscle_activity1[1],
'muscle2_activity':
muscle_activity2[1]
})
return neuron_spikes, muscle_activity
def getBristleFieldActivity(self, field):
'''Extract relevant activity details from the field'''
# Extract sensory neuron spike trains
sn_spikes = defaultdict(list)
sn_spikes_raw = zip(
nest.GetStatus(field.sn_sd)[0]['events']['times'],
nest.GetStatus(field.sn_sd)[0]['events']['senders'])
for time, sender in sn_spikes_raw:
sn_spikes[sender].append(time)
sn_spikes_df = pd.DataFrame(
{key: pd.Series(value)
for key, value in sn_spikes.iteritems()})
# Extract motor neuron spike trains
mn_spikes = defaultdict(list)
mn_spikes_raw = zip(
nest.GetStatus(field.mn_sd)[0]['events']['times'],
nest.GetStatus(field.mn_sd)[0]['events']['senders'])
for time, sender in mn_spikes_raw:
mn_spikes[sender].append(time)
mn_spikes_df = pd.DataFrame(
{key: pd.Series(value)
for key, value in mn_spikes.iteritems()})
# Extract interneuron spike trains
in_spikes = defaultdict(list)
in_spikes_raw = zip(
nest.GetStatus(field.in_sd)[0]['events']['times'],
nest.GetStatus(field.in_sd)[0]['events']['senders'])
for time, sender in in_spikes_raw:
in_spikes[sender].append(time)
in_spikes_df = pd.DataFrame(
{key: pd.Series(value)
for key, value in in_spikes.iteritems()})
# Extract muscle activity
muscle_activity = zip(*field.muscle_activity)
return (sn_spikes_df, mn_spikes_df, in_spikes_df, muscle_activity)
```
## Simulated antennal deployment
Test out the above classes using a simple test case where the antenna is brought from 0 to intrinsic set-point. This would additionally provide the time it takes for the simulate antenna to set down into steady state.
```
nest.ResetKernel()
network_1 = ann(0, sim_t=10.0, mTau=50.0, mKp=3.0, in_asym=50.0)
network_1.Simulate(500)
neuron_spikes, muscle_activity = network_1.getSimulatedData()
subplot(311)
plot(muscle_activity.time, muscle_activity.position)
subplot(312)
plot(muscle_activity.time, muscle_activity.muscle1_activity)
plot(muscle_activity.time, muscle_activity.muscle2_activity)
subplot(313)
plot(muscle_activity.time, muscle_activity.muscle2_activity - muscle_activity.muscle1_activity)
```
## Perturbation test
Perturb the antenna and simulate its response. Do this by simulating for a 1000 seconds where it settles down at the intrinsic set-point, perturb it and simulate its return
```
mKp = 3
nest.ResetKernel()
network_1 = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=1/40.0)
# Simulate for 1000 milliseconds (resting)
network_1.Simulate(1000)
# Perturb the position for 500 milliseconds and simulate for 1000 milliseconds more
network_1.CutLoc = 100
network_1.Simulate(500, perturbed=True)
network_1.Simulate(1000)
neuron_spikes, muscle_activity = network_1.getSimulatedData()
subplot(311)
plot(muscle_activity.time, muscle_activity.position)
subplot(312)
plot(muscle_activity.time, muscle_activity.muscle1_activity)
plot(muscle_activity.time, muscle_activity.muscle2_activity)
subplot(313)
plot(muscle_activity.time, muscle_activity.muscle2_activity - muscle_activity.muscle1_activity)
network_1.field1.time
```
## Test random seed (effect on NEST simulation)
NEST uses a default seed generally. To give it a random effect, it is best to circulate the seed (while keeping a note of it for debugging purposes). Check if the random seed simulation works
```
mKp = 3
nest.ResetKernel()
seed1 = np.random.randint(1,99999999)
nest.SetKernelStatus({'rng_seeds': [seed1]})
network_1 = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=40.0)
# Simulate for 1000 seconds (resting)
network_1.Simulate(1000)
# Perturb the position and simulate for 500 seconds more
network_1.CutLoc = 100
network_1.Simulate(500, perturbed=True)
network_1.Simulate(1000)
neuron_spikes1, muscle_activity1 = network_1.getSimulatedData()
nest.ResetKernel()
seed2 = np.random.randint(1,99999999)
nest.SetKernelStatus({'rng_seeds': [seed2]})
network_2 = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=40.0)
# Simulate for 1000 seconds (resting)
network_2.Simulate(1000)
# Perturb the position and simulate for 500 seconds more
network_2.CutLoc = 100
network_2.Simulate(500, perturbed=True)
network_2.Simulate(1000)
neuron_spikes2, muscle_activity2 = network_2.getSimulatedData()
print("seed1:%d; seed2:%d" % (seed1, seed2))
subplot(311)
plot(muscle_activity1.time, muscle_activity1.position)
plot(muscle_activity2.time, muscle_activity2.position)
subplot(312)
plot(muscle_activity1.time, muscle_activity1.muscle1_activity)
plot(muscle_activity1.time, muscle_activity1.muscle2_activity)
plot(muscle_activity2.time, muscle_activity2.muscle1_activity)
plot(muscle_activity2.time, muscle_activity2.muscle2_activity)
subplot(313)
plot(muscle_activity1.time,
muscle_activity1.muscle2_activity - muscle_activity1.muscle1_activity)
plot(muscle_activity2.time,
muscle_activity2.muscle2_activity - muscle_activity2.muscle1_activity)
# An interactive plot to check details
color = cycle(palette)
# create a new plot
s1 = figure(plot_width=500, plot_height=200)
s1.line(muscle_activity1.time, muscle_activity1.position, color=color.next())
s1.line(muscle_activity2.time, muscle_activity2.position, color=color.next())
s2 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s2.line(
muscle_activity1.time,
muscle_activity1.muscle1_activity,
color=color.next())
s2.line(
muscle_activity1.time,
muscle_activity1.muscle2_activity,
color=color.next())
s2.line(
muscle_activity2.time,
muscle_activity2.muscle1_activity,
color=color.next())
s2.line(
muscle_activity2.time,
muscle_activity2.muscle2_activity,
color=color.next())
s3 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s3.line(
muscle_activity1.time,
muscle_activity1.muscle2_activity - muscle_activity1.muscle1_activity,
color=color.next())
s3.line(
muscle_activity2.time,
muscle_activity2.muscle2_activity - muscle_activity2.muscle1_activity,
color=color.next())
p = gridplot([[s1], [s2], [s3]])
show(p)
```
## Save perturbation simulation data
Perform multiple such perturbations and save the data for further analysis/plotting via matlab.
```
interneuron_asyms = [1/40.0, 1/20.0, 20., 40] # Just four for now
perturbation = 100
trials = 5 # number of trials per perturbation
seed_store = []
mKp = 3.0
save_folder = pathjoin(getcwd(), 'Simulated-Data', 'Extrinsic-setpoint')
# Find save folder
curr_save_folder = pathjoin(save_folder, "mKp-%g" % (mKp))
spike_folder = pathjoin(curr_save_folder, 'Spike-data')
if not isdir(spike_folder):
mkdir(spike_folder)
for in_asym in interneuron_asyms:
for trial in range(trials):
# Perform simulation
nest.ResetKernel()
curr_seed = np.random.randint(1, 99999999)
nest.SetKernelStatus({'rng_seeds': [curr_seed]})
seed_store.append((in_asym, trial, curr_seed))
network = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=in_asym)
# Simulate for 1000 seconds (resting)
network.Simulate(1000, perturbed=False)
# 5-Steps and associated releases
for i in range(5):
network.CutLoc = perturbation
network.Simulate(500, perturbed=True)
network.Simulate(500, perturbed=False)
# About 1000 second trial end time
network.Simulate(1122, perturbed=False)
neuron_spikes, muscle_activity = network.getSimulatedData()
# Save muscle_activity as csv
muscle_activity.to_csv(
pathjoin(curr_save_folder, "In-%g_T-%d.csv" % (in_asym, trial + 1)),
columns=[
'time', 'position', 'muscle1_activity', 'muscle2_activity'
],
index=False)
# Save spike activity as csv
for key in neuron_spikes.keys():
curr_folder = pathjoin(spike_folder, "In-%g_T-%d" % (in_asym,
trial + 1))
if not isdir(curr_folder):
mkdir(curr_folder)
neuron_spikes[key].to_csv(
pathjoin(curr_folder, "In-%g_T-%d_%s.csv" % (in_asym, trial + 1,
key)),
index=False)
# Save seeds
seeds = pd.DataFrame(seed_store, columns=['In_asym', 'Trial', 'Seed'])
seeds.to_csv(pathjoin(curr_save_folder, "Simulation-details.csv"), index=False, mode='w')
# Plot the data (to ensure it looks as expected)
# An interactive plot to check details
color = cycle(palette)
# create a new plot
s1 = figure(plot_width=500, plot_height=200)
s1.line(muscle_activity.time, muscle_activity.position, color=color.next())
s2 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s2.line(
muscle_activity.time,
muscle_activity.muscle1_activity,
color=color.next())
s2.line(
muscle_activity.time,
muscle_activity.muscle2_activity,
color=color.next())
s3 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s3.line(
muscle_activity.time,
muscle_activity.muscle2_activity - muscle_activity.muscle1_activity,
color=color.next())
p = gridplot([[s1], [s2], [s3]])
show(p)
```
## Control theoretic analysis
With this, the simulation part of the neural network is done. Now we need to analyze the network using control theoretic models to make ascertain if the simulated behavior is similar to the normal ones. This will be done in MATLAB (building on previous codes)
|
github_jupyter
|
# Import necessary libraries
import nest
import numpy as np
import pandas as pd
from copy import deepcopy
from pprint import pprint
from collections import defaultdict
from pprint import pprint
from itertools import cycle
# Plotting libraries
%pylab inline
from bokeh.io import output_file, show, output_notebook
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from bokeh.palettes import Dark2_5 as palette
# Path libraries
from os.path import join as pathjoin
from os.path import isdir
from os import makedirs as mkdir
from os import getcwd
# Switch on notebook output
output_notebook()
class bbnn():
'''The bbnn class defines the Bohm's bristle neural network.
It initializes sensory and motor neurons, and changes its rate
based on bristle activation. It saves all the data in a dict'''
def __init__(self, sim_t, Bnum, delay, r_act,
r_inact, r_in, weight, in_weight,
mTau, CutPos):
'''
sim_t - Simulation step (default = 1 ms);
Bnum - Bristle number per field (default = 100 bristles);
delay - Synaptic delay in transmission (default = 1 ms);
r_act - Activated sensory neuron firing rate (default = 50.0);
r_inact - Inactivated sensory neuron firing rate (default = 10.0);
r_in - Interneuron firing rate;
weight - synaptic weight between sensory and motor neurons;
in_weight - synaptic weight between interneuron and motor neurons;
mTau - integration time of the muscle;
CutPos - starting cuticle (/antenna) position;
'''
# Save constants
self.sim_t = sim_t
self.Bnum = Bnum
self.weight = weight
self.delay = delay
self.in_weight = in_weight
self.mTau = mTau
# Initialize network
self.initNetwork(CutPos, r_act, r_inact, r_in)
def initNetwork(self, CutPos, r_act, r_inact, r_in):
'''Set up the network in NEST
CutPos - starting cuticle (/antenna) position;
r_act - firing rate of activated bristles;
r_inact - firing rate of inactivated bristles
r_in - firing rate of set-point interneuron'''
# Save act, inact rates
self.r_act = r_act
self.r_inact = r_inact
self.r_in = r_in
# Obtain bristle activity rate
sn_rate = self.getFieldRate(CutPos, r_act, r_inact)
# Declare Nodes
## Initialize poisson generators
self.sn_pgs = nest.Create("poisson_generator", self.Bnum, sn_rate)
self.jo = nest.Create("poisson_generator", 1,
{"rate": float(self.r_in)})
## Initialize neurons
self.sn = nest.Create("parrot_neuron", self.Bnum) # sensory neuron
self.mn = nest.Create("iaf_psc_alpha") # motor neuron
self.ms_in = nest.Create("parrot_neuron", 1) # interneuron
## Intialize spike detector
self.sn_sd = nest.Create("spike_detector", 1)
self.in_sd = nest.Create("spike_detector", 1)
self.mn_sd = nest.Create("spike_detector", 1)
# Setup Connections
## Poisson generators to parrot neurons
nest.Connect(self.sn_pgs, self.sn, 'one_to_one')
nest.Connect(self.jo, self.ms_in, 'one_to_one')
## Sensory to motor neurons
nest.Connect(self.sn, self.mn, 'all_to_all', {
"model": "static_synapse",
"weight": self.weight,
"delay": self.delay
})
nest.Connect(self.ms_in, self.mn, 'all_to_all', {
"model": "static_synapse",
"weight": self.in_weight * self.weight,
"delay": self.delay
})
## Neurons to spike detectors
nest.Connect(self.sn, self.sn_sd)
nest.Connect(self.ms_in, self.in_sd)
nest.Connect(self.mn, self.mn_sd)
nest.PrintNetwork() # Not working for some reason
# NEST network setup complete
# Muscle activity related datastructure
self.time = 0.0
self.curr_ind = 0
self.muscle_activity = []
self.muscle_activity.append((self.time, 0.0))
# Initialization complete
def getMuscleActivity(self):
''' Obtain muscle firing rate based on the simulation time '''
self.time += self.sim_t
self.curr_ind += 1
calcium_signal = exp_moving_avg(
self.time,
nest.GetStatus(self.mn_sd)[0]['events']['times'], self.mTau)
self.muscle_activity.append((self.time, calcium_signal))
return calcium_signal
def changeRates(self, CutPos):
''' Change the firing rate of the poisson generators
based on bristle activity (cuticle position)'''
# Get Rate
rate = self.getFieldRate(CutPos, self.r_act, self.r_inact)
# Set rate
nest.SetStatus(self.sn_pgs, rate)
def getFieldRate(self, CutPos, r_act, r_inact):
''' Obtain activity rate of each of the sensory neurons underneath the bristles '''
Bfield_closed = np.concatenate(
(np.zeros(self.Bnum - CutPos),
np.ones(CutPos))) # 1 to CutPos are active bristles
Bfield_open = np.concatenate(
(np.ones(self.Bnum - CutPos),
np.zeros(CutPos))) # CutPos to end are inactive bristles
# Arbitary defintion of active and inactive. Simply change r_act, r_inact to inverse it
field = ((Bfield_open * r_inact) + (Bfield_closed * r_act))
return [{"rate": m} for m in field]
def exp_moving_avg(t, spike_times, tau):
''' Find activity at time t based on tau
assuming an exponential decay of calcium
after every spike with a time-constant of tau'''
ind = len(spike_times) - 1
calcium_signal = 0
# Sum up spikes based on calcium decay
while ((ind >= 0) and (t - spike_times[ind] <= 10.0 * tau)):
calcium_signal += np.exp(-(t - spike_times[ind]) / tau)
ind -= 1
return calcium_signal
class ann():
'''Antennal neural network class.'''
def __init__(self,
CutLoc,
sim_t=10.0,
Bnum=100,
weight=50.0,
delay=1.0,
in_weight=1.0,
in_asym=1.0,
mTau=50.0,
mKp=50.0,
r_act=50.0,
r_inact=5.0,
r_in=50.0):
'''Set defaults for the antennal neural network
and initialize the network.
Inputs:
CutLoc - starting cuticle location
sim_t - Simulation step (default = 1 ms);
Bnum - Bristle number per field (default = 100 bristles);
weight - synaptic weight between sensory and motor neurons;
delay - Synaptic delay in transmission (default = 1 ms);
in_weight - synaptic weight between interneuron and motor neurons;
in_asym - asymmetry of synaptic inputs before the two motor neurons;
mTau - integration time of the muscle;
mKp - scaling factor for the difference in muscle activity
r_act - Activated sensory neuron firing rate (default = 50.0);
r_inact - Inactivated sensory neuron firing rate (default = 10.0);
r_in - Interneuron firing rate;'''
# Set defaults
self.sim_t = sim_t
self.Bnum = Bnum
self.weight = weight
self.delay = delay
self.in_weight = in_weight
self.in_asym = in_asym
# muscle properties
self.mTau = mTau
self.mKp = mKp
# neuron characteristics
self.r_act = r_act
self.r_inact = r_inact
self.r_in = r_in
# start time
self.time_start = 0
# cuticle position
self.CutLoc = CutLoc
self.cuticle_position = []
self.cuticle_position.append((self.time_start, CutLoc))
# Initialize field 1
self.field1 = bbnn(self.sim_t, self.Bnum, self.delay, self.r_act,
self.r_inact, self.r_in * self.in_asym,
self.weight, self.in_weight,
self.mTau, self.CutLoc)
# Initialize field 2 (flipped activation and inactivation rates
self.field2 = bbnn(self.sim_t, self.Bnum, self.delay, self.r_inact,
self.r_act, self.r_in / self.in_asym,
self.weight, self.in_weight,
self.mTau, self.CutLoc)
def Simulate(self, simulation_time, perturbed=False):
'''Simulate the neural network.'''
time = np.arange(self.time_start, self.time_start + simulation_time,
self.sim_t)
CutLoc = self.CutLoc
# Simulate till end of time (literally :P)
for t in time:
# Simulate all networks
nest.Simulate(self.sim_t)
# Obtain muscle activity
firing1 = self.field1.getMuscleActivity()
firing2 = self.field2.getMuscleActivity()
# Update position based on activity and perturbation flag
if not perturbed:
CutShift = np.round((firing1 - firing2) / self.mKp)
CutLoc = int(CutLoc - CutShift)
if CutLoc < 0: CutLoc = 0
if CutLoc > self.Bnum: CutLoc = self.Bnum
# Change rates
self.field1.changeRates(CutLoc)
self.field2.changeRates(CutLoc)
# Append cuticle location
self.cuticle_position.append((t + self.sim_t, CutLoc))
# Continue simulation
# Simulation done; change start time, CutLoc
self.CutLoc = CutLoc
self.time_start = self.time_start + simulation_time
# Done
def getSimulatedData(self):
"""Extract simulated data, package cleanly and send back"""
# Extract data;
sn_spikes1, mn_spikes1, in_spikes1, muscle_activity1 = self.getBristleFieldActivity(
self.field1)
sn_spikes2, mn_spikes2, in_spikes2, muscle_activity2 = self.getBristleFieldActivity(
self.field2)
# Package spike data cleanly
neuron_spikes = {
'sn1_spikes': sn_spikes1,
'mn1_spikes': mn_spikes1,
'in1_spikes': in_spikes1,
'sn2_spikes': sn_spikes2,
'mn2_spikes': mn_spikes2,
'in2_spikes': in_spikes2
}
# Package muscle data cleanly
cuticle_position = zip(*self.cuticle_position)
muscle_activity = pd.DataFrame({
'time':
cuticle_position[0],
'position':
cuticle_position[1],
'muscle1_activity':
muscle_activity1[1],
'muscle2_activity':
muscle_activity2[1]
})
return neuron_spikes, muscle_activity
def getBristleFieldActivity(self, field):
'''Extract relevant activity details from the field'''
# Extract sensory neuron spike trains
sn_spikes = defaultdict(list)
sn_spikes_raw = zip(
nest.GetStatus(field.sn_sd)[0]['events']['times'],
nest.GetStatus(field.sn_sd)[0]['events']['senders'])
for time, sender in sn_spikes_raw:
sn_spikes[sender].append(time)
sn_spikes_df = pd.DataFrame(
{key: pd.Series(value)
for key, value in sn_spikes.iteritems()})
# Extract motor neuron spike trains
mn_spikes = defaultdict(list)
mn_spikes_raw = zip(
nest.GetStatus(field.mn_sd)[0]['events']['times'],
nest.GetStatus(field.mn_sd)[0]['events']['senders'])
for time, sender in mn_spikes_raw:
mn_spikes[sender].append(time)
mn_spikes_df = pd.DataFrame(
{key: pd.Series(value)
for key, value in mn_spikes.iteritems()})
# Extract interneuron spike trains
in_spikes = defaultdict(list)
in_spikes_raw = zip(
nest.GetStatus(field.in_sd)[0]['events']['times'],
nest.GetStatus(field.in_sd)[0]['events']['senders'])
for time, sender in in_spikes_raw:
in_spikes[sender].append(time)
in_spikes_df = pd.DataFrame(
{key: pd.Series(value)
for key, value in in_spikes.iteritems()})
# Extract muscle activity
muscle_activity = zip(*field.muscle_activity)
return (sn_spikes_df, mn_spikes_df, in_spikes_df, muscle_activity)
nest.ResetKernel()
network_1 = ann(0, sim_t=10.0, mTau=50.0, mKp=3.0, in_asym=50.0)
network_1.Simulate(500)
neuron_spikes, muscle_activity = network_1.getSimulatedData()
subplot(311)
plot(muscle_activity.time, muscle_activity.position)
subplot(312)
plot(muscle_activity.time, muscle_activity.muscle1_activity)
plot(muscle_activity.time, muscle_activity.muscle2_activity)
subplot(313)
plot(muscle_activity.time, muscle_activity.muscle2_activity - muscle_activity.muscle1_activity)
mKp = 3
nest.ResetKernel()
network_1 = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=1/40.0)
# Simulate for 1000 milliseconds (resting)
network_1.Simulate(1000)
# Perturb the position for 500 milliseconds and simulate for 1000 milliseconds more
network_1.CutLoc = 100
network_1.Simulate(500, perturbed=True)
network_1.Simulate(1000)
neuron_spikes, muscle_activity = network_1.getSimulatedData()
subplot(311)
plot(muscle_activity.time, muscle_activity.position)
subplot(312)
plot(muscle_activity.time, muscle_activity.muscle1_activity)
plot(muscle_activity.time, muscle_activity.muscle2_activity)
subplot(313)
plot(muscle_activity.time, muscle_activity.muscle2_activity - muscle_activity.muscle1_activity)
network_1.field1.time
mKp = 3
nest.ResetKernel()
seed1 = np.random.randint(1,99999999)
nest.SetKernelStatus({'rng_seeds': [seed1]})
network_1 = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=40.0)
# Simulate for 1000 seconds (resting)
network_1.Simulate(1000)
# Perturb the position and simulate for 500 seconds more
network_1.CutLoc = 100
network_1.Simulate(500, perturbed=True)
network_1.Simulate(1000)
neuron_spikes1, muscle_activity1 = network_1.getSimulatedData()
nest.ResetKernel()
seed2 = np.random.randint(1,99999999)
nest.SetKernelStatus({'rng_seeds': [seed2]})
network_2 = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=40.0)
# Simulate for 1000 seconds (resting)
network_2.Simulate(1000)
# Perturb the position and simulate for 500 seconds more
network_2.CutLoc = 100
network_2.Simulate(500, perturbed=True)
network_2.Simulate(1000)
neuron_spikes2, muscle_activity2 = network_2.getSimulatedData()
print("seed1:%d; seed2:%d" % (seed1, seed2))
subplot(311)
plot(muscle_activity1.time, muscle_activity1.position)
plot(muscle_activity2.time, muscle_activity2.position)
subplot(312)
plot(muscle_activity1.time, muscle_activity1.muscle1_activity)
plot(muscle_activity1.time, muscle_activity1.muscle2_activity)
plot(muscle_activity2.time, muscle_activity2.muscle1_activity)
plot(muscle_activity2.time, muscle_activity2.muscle2_activity)
subplot(313)
plot(muscle_activity1.time,
muscle_activity1.muscle2_activity - muscle_activity1.muscle1_activity)
plot(muscle_activity2.time,
muscle_activity2.muscle2_activity - muscle_activity2.muscle1_activity)
# An interactive plot to check details
color = cycle(palette)
# create a new plot
s1 = figure(plot_width=500, plot_height=200)
s1.line(muscle_activity1.time, muscle_activity1.position, color=color.next())
s1.line(muscle_activity2.time, muscle_activity2.position, color=color.next())
s2 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s2.line(
muscle_activity1.time,
muscle_activity1.muscle1_activity,
color=color.next())
s2.line(
muscle_activity1.time,
muscle_activity1.muscle2_activity,
color=color.next())
s2.line(
muscle_activity2.time,
muscle_activity2.muscle1_activity,
color=color.next())
s2.line(
muscle_activity2.time,
muscle_activity2.muscle2_activity,
color=color.next())
s3 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s3.line(
muscle_activity1.time,
muscle_activity1.muscle2_activity - muscle_activity1.muscle1_activity,
color=color.next())
s3.line(
muscle_activity2.time,
muscle_activity2.muscle2_activity - muscle_activity2.muscle1_activity,
color=color.next())
p = gridplot([[s1], [s2], [s3]])
show(p)
interneuron_asyms = [1/40.0, 1/20.0, 20., 40] # Just four for now
perturbation = 100
trials = 5 # number of trials per perturbation
seed_store = []
mKp = 3.0
save_folder = pathjoin(getcwd(), 'Simulated-Data', 'Extrinsic-setpoint')
# Find save folder
curr_save_folder = pathjoin(save_folder, "mKp-%g" % (mKp))
spike_folder = pathjoin(curr_save_folder, 'Spike-data')
if not isdir(spike_folder):
mkdir(spike_folder)
for in_asym in interneuron_asyms:
for trial in range(trials):
# Perform simulation
nest.ResetKernel()
curr_seed = np.random.randint(1, 99999999)
nest.SetKernelStatus({'rng_seeds': [curr_seed]})
seed_store.append((in_asym, trial, curr_seed))
network = ann(0, sim_t=10.0, mTau=50.0, mKp=mKp, in_asym=in_asym)
# Simulate for 1000 seconds (resting)
network.Simulate(1000, perturbed=False)
# 5-Steps and associated releases
for i in range(5):
network.CutLoc = perturbation
network.Simulate(500, perturbed=True)
network.Simulate(500, perturbed=False)
# About 1000 second trial end time
network.Simulate(1122, perturbed=False)
neuron_spikes, muscle_activity = network.getSimulatedData()
# Save muscle_activity as csv
muscle_activity.to_csv(
pathjoin(curr_save_folder, "In-%g_T-%d.csv" % (in_asym, trial + 1)),
columns=[
'time', 'position', 'muscle1_activity', 'muscle2_activity'
],
index=False)
# Save spike activity as csv
for key in neuron_spikes.keys():
curr_folder = pathjoin(spike_folder, "In-%g_T-%d" % (in_asym,
trial + 1))
if not isdir(curr_folder):
mkdir(curr_folder)
neuron_spikes[key].to_csv(
pathjoin(curr_folder, "In-%g_T-%d_%s.csv" % (in_asym, trial + 1,
key)),
index=False)
# Save seeds
seeds = pd.DataFrame(seed_store, columns=['In_asym', 'Trial', 'Seed'])
seeds.to_csv(pathjoin(curr_save_folder, "Simulation-details.csv"), index=False, mode='w')
# Plot the data (to ensure it looks as expected)
# An interactive plot to check details
color = cycle(palette)
# create a new plot
s1 = figure(plot_width=500, plot_height=200)
s1.line(muscle_activity.time, muscle_activity.position, color=color.next())
s2 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s2.line(
muscle_activity.time,
muscle_activity.muscle1_activity,
color=color.next())
s2.line(
muscle_activity.time,
muscle_activity.muscle2_activity,
color=color.next())
s3 = figure(plot_width=500, plot_height=200, x_range=s1.x_range)
s3.line(
muscle_activity.time,
muscle_activity.muscle2_activity - muscle_activity.muscle1_activity,
color=color.next())
p = gridplot([[s1], [s2], [s3]])
show(p)
| 0.616936 | 0.912202 |
##### Copyright 2018 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License").
# Convolutional VAE: An example with tf.keras and eager
<table class="tfo-notebook-buttons" align="left"><td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/examples/generative_examples/cvae.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td><td>
<a target="_blank" href="https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/examples/generative_examples/cvae.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a></td></table>
This notebook demonstrates how to generate images of handwritten digits using [tf.keras](https://www.tensorflow.org/programmers_guide/keras) and [eager execution](https://www.tensorflow.org/programmers_guide/eager) by training a Variational Autoencoder. (VAE, [[1]](https://arxiv.org/abs/1312.6114), [[2]](https://arxiv.org/abs/1401.4082)).
```
# to generate gifs
!pip install imageio
```
## Import TensorFlow and enable Eager execution
```
from __future__ import absolute_import, division, print_function
# Import TensorFlow >= 1.9 and enable eager execution
import tensorflow as tf
tfe = tf.contrib.eager
tf.enable_eager_execution()
import os
import time
import numpy as np
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
from IPython import display
```
## Load the MNIST dataset
Each MNIST image is originally a vector of 784 integers, each of which is between 0-255 and represents the intensity of a pixel. We model each pixel with a Bernoulli distribution in our model, and we statically binarize the dataset.
```
(train_images, _), (test_images, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')
# Normalizing the images to the range of [0., 1.]
train_images /= 255.
test_images /= 255.
# Binarization
train_images[train_images >= .5] = 1.
train_images[train_images < .5] = 0.
test_images[test_images >= .5] = 1.
test_images[test_images < .5] = 0.
TRAIN_BUF = 60000
BATCH_SIZE = 100
TEST_BUF = 10000
```
## Use *tf.data* to create batches and shuffle the dataset
```
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(TRAIN_BUF).batch(BATCH_SIZE)
test_dataset = tf.data.Dataset.from_tensor_slices(test_images).shuffle(TEST_BUF).batch(BATCH_SIZE)
```
## Wire up the generative and inference network with *tf.keras.Sequential*
In our VAE example, we use two small ConvNets for the generative and inference network. Since these neural nets are small, we use `tf.keras.Sequential` to simplify our code. Let $x$ and $z$ denote the observation and latent variable respectively in the following descriptions.
### Generative Network
This defines the generative model which takes a latent encoding as input, and outputs the parameters for a conditional distribution of the observation, i.e. $p(x|z)$. Additionally, we use a unit Gaussian prior $p(z)$ for the latent variable.
### Inference Network
This defines an approximate posterior distribution $q(z|x)$, which takes as input an observation and outputs a set of parameters for the conditional distribution of the latent representation. In this example, we simply model this distribution as a diagonal Gaussian. In this case, the inference network outputs the mean and log-variance parameters of a factorized Gaussian (log-variance instead of the variance directly is for numerical stability).
### Reparameterization Trick
During optimization, we can sample from $q(z|x)$ by first sampling from a unit Gaussian, and then multiplying by the standard deviation and adding the mean. This ensures the gradients could pass through the sample to the inference network parameters.
### Network architecture
For the inference network, we use two convolutional layers followed by a fully-connected layer. In the generative network, we mirror this architecture by using a fully-connected layer followed by three convolution transpose layers (a.k.a. deconvolutional layers in some contexts). Note, it's common practice to avoid using batch normalization when training VAEs, since the additional stochasticity due to using mini-batches may aggravate instability on top of the stochasticity from sampling.
```
class CVAE(tf.keras.Model):
def __init__(self, latent_dim):
super(CVAE, self).__init__()
self.latent_dim = latent_dim
self.inference_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation=tf.nn.relu),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation=tf.nn.relu),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latent_dim + latent_dim),
]
)
self.generative_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
tf.keras.layers.Dense(units=7*7*32, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(7, 7, 32)),
tf.keras.layers.Conv2DTranspose(
filters=64,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation=tf.nn.relu),
tf.keras.layers.Conv2DTranspose(
filters=32,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation=tf.nn.relu),
# No activation
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME"),
]
)
def sample(self, eps=None):
if eps is None:
eps = tf.random_normal(shape=(100, self.latent_dim))
return self.decode(eps, apply_sigmoid=True)
def encode(self, x):
mean, logvar = tf.split(self.inference_net(x), num_or_size_splits=2, axis=1)
return mean, logvar
def reparameterize(self, mean, logvar):
eps = tf.random_normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
def decode(self, z, apply_sigmoid=False):
logits = self.generative_net(z)
if apply_sigmoid:
probs = tf.sigmoid(logits)
return probs
return logits
```
## Define the loss function and the optimizer
VAEs train by maximizing the evidence lower bound (ELBO) on the marginal log-likelihood:
$$\log p(x) \ge \text{ELBO} = \mathbb{E}_{q(z|x)}\left[\log \frac{p(x, z)}{q(z|x)}\right].$$
In practice, we optimize the single sample Monte Carlo estimate of this expectation:
$$\log p(x| z) + \log p(z) - \log q(z|x),$$
where $z$ is sampled from $q(z|x)$.
**Note**: we could also analytically compute the KL term, but here we incorporate all three terms in the Monte Carlo estimator for simplicity.
```
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
def compute_loss(model, x):
mean, logvar = model.encode(x)
z = model.reparameterize(mean, logvar)
x_logit = model.decode(z)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
def compute_gradients(model, x):
with tf.GradientTape() as tape:
loss = compute_loss(model, x)
return tape.gradient(loss, model.trainable_variables), loss
optimizer = tf.train.AdamOptimizer(1e-4)
def apply_gradients(optimizer, gradients, variables, global_step=None):
optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)
```
## Training
* We start by iterating over the dataset
* During each iteration, we pass the image to the encoder to obtain a set of mean and log-variance parameters of the approximate posterior $q(z|x)$
* We then apply the *reparameterization trick* to sample from $q(z|x)$
* Finally, we pass the reparameterized samples to the decoder to obtain the logits of the generative distribution $p(x|z)$
* **Note:** Since we use the dataset loaded by keras with 60k datapoints in the training set and 10k datapoints in the test set, our resulting ELBO on the test set is slightly higher than reported results in the literature which uses dynamic binarization of Larochelle's MNIST.
## Generate Images
* After training, it is time to generate some images
* We start by sampling a set of latent vectors from the unit Gaussian prior distribution $p(z)$
* The generator will then convert the latent sample $z$ to logits of the observation, giving a distribution $p(x|z)$
* Here we plot the probabilities of Bernoulli distributions
```
epochs = 100
latent_dim = 50
num_examples_to_generate = 100
# keeping the random vector constant for generation (prediction) so
# it will be easier to see the improvement.
random_vector_for_generation = tf.random_normal(
shape=[num_examples_to_generate, latent_dim])
model = CVAE(latent_dim)
def generate_and_save_images(model, epoch, test_input):
predictions = model.sample(test_input)
fig = plt.figure(figsize=(10,10))
for i in range(predictions.shape[0]):
plt.subplot(10, 10, i+1)
plt.imshow(predictions[i, :, :, 0], cmap='gray')
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.tight_layout()
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
generate_and_save_images(model, 0, random_vector_for_generation)
for epoch in range(1, epochs + 1):
start_time = time.time()
for train_x in train_dataset:
gradients, loss = compute_gradients(model, train_x)
apply_gradients(optimizer, gradients, model.trainable_variables)
end_time = time.time()
if epoch % 5 == 0:
loss = tfe.metrics.Mean()
for test_x in test_dataset.make_one_shot_iterator():
loss(compute_loss(model, test_x))
elbo = -loss.result()
display.clear_output(wait=False)
print('Epoch: {}, Test set ELBO: {}, '
'time elapse for current epoch {}'.format(epoch,
elbo,
end_time - start_time))
generate_and_save_images(
model, epoch, random_vector_for_generation)
```
### Display an image using the epoch number
```
def display_image(epoch_no):
plt.figure(figsize=(15,15))
plt.imshow(np.array(PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))))
plt.axis('off')
display_image(epochs) # Display images
```
### Generate a GIF of all the saved images.
```
with imageio.get_writer('cvae.gif', mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
# this is a hack to display the gif inside the notebook
os.system('mv cvae.gif cvae.gif.png')
display.Image(filename="cvae.gif.png")
```
|
github_jupyter
|
# to generate gifs
!pip install imageio
from __future__ import absolute_import, division, print_function
# Import TensorFlow >= 1.9 and enable eager execution
import tensorflow as tf
tfe = tf.contrib.eager
tf.enable_eager_execution()
import os
import time
import numpy as np
import glob
import matplotlib.pyplot as plt
import PIL
import imageio
from IPython import display
(train_images, _), (test_images, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1).astype('float32')
# Normalizing the images to the range of [0., 1.]
train_images /= 255.
test_images /= 255.
# Binarization
train_images[train_images >= .5] = 1.
train_images[train_images < .5] = 0.
test_images[test_images >= .5] = 1.
test_images[test_images < .5] = 0.
TRAIN_BUF = 60000
BATCH_SIZE = 100
TEST_BUF = 10000
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(TRAIN_BUF).batch(BATCH_SIZE)
test_dataset = tf.data.Dataset.from_tensor_slices(test_images).shuffle(TEST_BUF).batch(BATCH_SIZE)
class CVAE(tf.keras.Model):
def __init__(self, latent_dim):
super(CVAE, self).__init__()
self.latent_dim = latent_dim
self.inference_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation=tf.nn.relu),
tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation=tf.nn.relu),
tf.keras.layers.Flatten(),
# No activation
tf.keras.layers.Dense(latent_dim + latent_dim),
]
)
self.generative_net = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(latent_dim,)),
tf.keras.layers.Dense(units=7*7*32, activation=tf.nn.relu),
tf.keras.layers.Reshape(target_shape=(7, 7, 32)),
tf.keras.layers.Conv2DTranspose(
filters=64,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation=tf.nn.relu),
tf.keras.layers.Conv2DTranspose(
filters=32,
kernel_size=3,
strides=(2, 2),
padding="SAME",
activation=tf.nn.relu),
# No activation
tf.keras.layers.Conv2DTranspose(
filters=1, kernel_size=3, strides=(1, 1), padding="SAME"),
]
)
def sample(self, eps=None):
if eps is None:
eps = tf.random_normal(shape=(100, self.latent_dim))
return self.decode(eps, apply_sigmoid=True)
def encode(self, x):
mean, logvar = tf.split(self.inference_net(x), num_or_size_splits=2, axis=1)
return mean, logvar
def reparameterize(self, mean, logvar):
eps = tf.random_normal(shape=mean.shape)
return eps * tf.exp(logvar * .5) + mean
def decode(self, z, apply_sigmoid=False):
logits = self.generative_net(z)
if apply_sigmoid:
probs = tf.sigmoid(logits)
return probs
return logits
def log_normal_pdf(sample, mean, logvar, raxis=1):
log2pi = tf.log(2. * np.pi)
return tf.reduce_sum(
-.5 * ((sample - mean) ** 2. * tf.exp(-logvar) + logvar + log2pi),
axis=raxis)
def compute_loss(model, x):
mean, logvar = model.encode(x)
z = model.reparameterize(mean, logvar)
x_logit = model.decode(z)
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(logits=x_logit, labels=x)
logpx_z = -tf.reduce_sum(cross_ent, axis=[1, 2, 3])
logpz = log_normal_pdf(z, 0., 0.)
logqz_x = log_normal_pdf(z, mean, logvar)
return -tf.reduce_mean(logpx_z + logpz - logqz_x)
def compute_gradients(model, x):
with tf.GradientTape() as tape:
loss = compute_loss(model, x)
return tape.gradient(loss, model.trainable_variables), loss
optimizer = tf.train.AdamOptimizer(1e-4)
def apply_gradients(optimizer, gradients, variables, global_step=None):
optimizer.apply_gradients(zip(gradients, variables), global_step=global_step)
epochs = 100
latent_dim = 50
num_examples_to_generate = 100
# keeping the random vector constant for generation (prediction) so
# it will be easier to see the improvement.
random_vector_for_generation = tf.random_normal(
shape=[num_examples_to_generate, latent_dim])
model = CVAE(latent_dim)
def generate_and_save_images(model, epoch, test_input):
predictions = model.sample(test_input)
fig = plt.figure(figsize=(10,10))
for i in range(predictions.shape[0]):
plt.subplot(10, 10, i+1)
plt.imshow(predictions[i, :, :, 0], cmap='gray')
plt.axis('off')
# tight_layout minimizes the overlap between 2 sub-plots
plt.tight_layout()
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
generate_and_save_images(model, 0, random_vector_for_generation)
for epoch in range(1, epochs + 1):
start_time = time.time()
for train_x in train_dataset:
gradients, loss = compute_gradients(model, train_x)
apply_gradients(optimizer, gradients, model.trainable_variables)
end_time = time.time()
if epoch % 5 == 0:
loss = tfe.metrics.Mean()
for test_x in test_dataset.make_one_shot_iterator():
loss(compute_loss(model, test_x))
elbo = -loss.result()
display.clear_output(wait=False)
print('Epoch: {}, Test set ELBO: {}, '
'time elapse for current epoch {}'.format(epoch,
elbo,
end_time - start_time))
generate_and_save_images(
model, epoch, random_vector_for_generation)
def display_image(epoch_no):
plt.figure(figsize=(15,15))
plt.imshow(np.array(PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))))
plt.axis('off')
display_image(epochs) # Display images
with imageio.get_writer('cvae.gif', mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
# this is a hack to display the gif inside the notebook
os.system('mv cvae.gif cvae.gif.png')
display.Image(filename="cvae.gif.png")
| 0.873606 | 0.992047 |
<a href="https://colab.research.google.com/github/always-newbie161/pyprobml/blob/hermissue122/notebooks/clip_make_dataset_tpu_jax.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Required Installations and Environment
```
import os
assert os.environ["COLAB_TPU_ADDR"], "Make sure to select TPU from Edit > Notebook settings > Hardware accelerator"
import os
if "google.colab" in str(get_ipython()) and "COLAB_TPU_ADDR" in os.environ:
import jax
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
print("Connected to TPU.")
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import jax
print("jax version {}".format(jax.__version__))
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
print(jax.lib.xla_bridge.device_count())
print(jax.local_device_count())
import jax.numpy as jnp
devices = jax.local_devices()
print(f"jax devices:")
devices
```
### Cloning Clip_jax
and loading the jax version of clip_model.
```
%cd /content/
!git clone https://github.com/kingoflolz/CLIP_JAX.git
cd / content / CLIP_JAX
pip install ftfy regex tqdm dm-haiku
import numpy as np
from PIL import Image
import time
import clip_jax
image_fn, text_fn, jax_params, jax_preprocess = clip_jax.load("ViT-B/32", "cpu", jit=True)
```
pmapping the encoding function and replicating the params.
```
jax_params_repl = jax.device_put_replicated(jax_params, devices)
image_fn_pmapped = jax.pmap(image_fn)
```
## Dataset
**Download the dataset used here** so that it just loads the downloaded dataset when used later.
Change `ds_name` to the dataset required.
```
ds_name = "imagenette/160px-v2"
data_dir = "/root/tensorflow_datasets"
# @title Choose whether if you want to make a copy of the dataset in the drive
# @markdown Drive can be mounted to download the tfds into the drive for future uses,
# @markdown downloaded ds can be found in `your_drive_path/MyDrive/$ds_name`
to_load_into_drive = False # @param ["False", "True"] {type:"raw"}
if to_load_into_drive:
from google.colab import drive
drive.mount("/content/drive")
!mkdir /content/drive/MyDrive/$ds_name # your_drive_path
data_dir = f"/content/drive/MyDrive/{ds_name}"
```
### Loading tfds
```
import tensorflow as tf
import tensorflow_datasets as tfds
try:
tfds.load(ds_name, data_dir=data_dir)
except:
tfds.load(ds_name, data_dir=data_dir)
```
## Model
```
len(devices)
```
Datamodule which makes the numpy dataloaders for the dataset that return batches such that their leading dimension is len(devices)
```
class Tpu_data_loader:
def __init__(self, loader, split, batch_per_core, no_of_cores):
self.loader = loader
self.split = split
self.batch_size = batch_per_core * no_of_cores
class NumpyDataModule:
def __init__(self, ds_name: str, data_dir: str):
self.ds_name = ds_name
self.data_dir = data_dir
self.image_size = 224
self.mean = [0.48145466, 0.4578275, 0.40821073]
self.std = [0.48145466, 0.4578275, 0.40821073]
self.ds = None
def preprocess(self, sample):
image = sample["image"]
""" `uint8` -> `float32`."""
image = tf.cast(image, tf.float32)
image = tf.image.resize_with_crop_or_pad(image, self.image_size, self.image_size)
image = (image - self.mean) / (self.std)
image = tf.transpose(image, perm=[2, 0, 1])
return image
def make_dataset(self, split, batch_per_core, no_of_cores):
ds = self.ds[split]
ds = ds.map(self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.batch(batch_per_core).batch(no_of_cores)
return Tpu_data_loader(
tfds.as_numpy(ds.prefetch(tf.data.experimental.AUTOTUNE)), split, batch_per_core, no_of_cores
)
def prepare_data(self):
self.ds, ds_info = tfds.load(
self.ds_name,
with_info=True,
data_dir=self.data_dir,
)
return ds_info
dm = NumpyDataModule(ds_name=ds_name, data_dir=data_dir)
ds_info = dm.prepare_data()
```
`batch_per_core` should be such that `(n_examples//batch_per_core) % no_of_cores == 0`
```
train_loader = dm.make_dataset("train", batch_per_core=62, no_of_cores=len(devices))
test_loader = dm.make_dataset("validation", batch_per_core=61, no_of_cores=len(devices))
print(ds_info.splits[train_loader.split].num_examples)
print(ds_info.splits[test_loader.split].num_examples)
import tqdm
def clip_extract(tpu_loader):
clip_features = []
steps = (ds_info.splits[tpu_loader.split].num_examples // tpu_loader.batch_size) + 1
for i, batch in zip(tqdm.trange(steps), tpu_loader.loader):
# the last batch is not parallised.
if i == steps - 1:
clip_encoded_batch = image_fn(jax_params, np.squeeze(batch, axis=0))
else:
clip_encoded_batch = image_fn_pmapped(jax_params_repl, batch)
clip_encoded_batch = jax.device_get(clip_encoded_batch)
clip_features.append(clip_encoded_batch)
clip_flattened_features = [fea.reshape(-1, 512) for fea in clip_features]
coco_clip = np.concatenate(clip_flattened_features)
return coco_clip
clip_train = clip_extract(train_loader)
clip_eval = clip_extract(test_loader)
def make_tfds_and_save(numpy_data, name):
tf_ds = tf.data.Dataset.from_tensor_slices(numpy_data)
tf.data.experimental.save(tf_ds, f"/content/{name}")
return tf_ds
clip_train_ds = make_tfds_and_save(clip_train, "clip_train_ds")
clip_test_ds = make_tfds_and_save(clip_eval, "clip_test_ds")
```
|
github_jupyter
|
import os
assert os.environ["COLAB_TPU_ADDR"], "Make sure to select TPU from Edit > Notebook settings > Hardware accelerator"
import os
if "google.colab" in str(get_ipython()) and "COLAB_TPU_ADDR" in os.environ:
import jax
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
print("Connected to TPU.")
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import jax
print("jax version {}".format(jax.__version__))
print("jax backend {}".format(jax.lib.xla_bridge.get_backend().platform))
print(jax.lib.xla_bridge.device_count())
print(jax.local_device_count())
import jax.numpy as jnp
devices = jax.local_devices()
print(f"jax devices:")
devices
%cd /content/
!git clone https://github.com/kingoflolz/CLIP_JAX.git
cd / content / CLIP_JAX
pip install ftfy regex tqdm dm-haiku
import numpy as np
from PIL import Image
import time
import clip_jax
image_fn, text_fn, jax_params, jax_preprocess = clip_jax.load("ViT-B/32", "cpu", jit=True)
jax_params_repl = jax.device_put_replicated(jax_params, devices)
image_fn_pmapped = jax.pmap(image_fn)
ds_name = "imagenette/160px-v2"
data_dir = "/root/tensorflow_datasets"
# @title Choose whether if you want to make a copy of the dataset in the drive
# @markdown Drive can be mounted to download the tfds into the drive for future uses,
# @markdown downloaded ds can be found in `your_drive_path/MyDrive/$ds_name`
to_load_into_drive = False # @param ["False", "True"] {type:"raw"}
if to_load_into_drive:
from google.colab import drive
drive.mount("/content/drive")
!mkdir /content/drive/MyDrive/$ds_name # your_drive_path
data_dir = f"/content/drive/MyDrive/{ds_name}"
import tensorflow as tf
import tensorflow_datasets as tfds
try:
tfds.load(ds_name, data_dir=data_dir)
except:
tfds.load(ds_name, data_dir=data_dir)
len(devices)
class Tpu_data_loader:
def __init__(self, loader, split, batch_per_core, no_of_cores):
self.loader = loader
self.split = split
self.batch_size = batch_per_core * no_of_cores
class NumpyDataModule:
def __init__(self, ds_name: str, data_dir: str):
self.ds_name = ds_name
self.data_dir = data_dir
self.image_size = 224
self.mean = [0.48145466, 0.4578275, 0.40821073]
self.std = [0.48145466, 0.4578275, 0.40821073]
self.ds = None
def preprocess(self, sample):
image = sample["image"]
""" `uint8` -> `float32`."""
image = tf.cast(image, tf.float32)
image = tf.image.resize_with_crop_or_pad(image, self.image_size, self.image_size)
image = (image - self.mean) / (self.std)
image = tf.transpose(image, perm=[2, 0, 1])
return image
def make_dataset(self, split, batch_per_core, no_of_cores):
ds = self.ds[split]
ds = ds.map(self.preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.batch(batch_per_core).batch(no_of_cores)
return Tpu_data_loader(
tfds.as_numpy(ds.prefetch(tf.data.experimental.AUTOTUNE)), split, batch_per_core, no_of_cores
)
def prepare_data(self):
self.ds, ds_info = tfds.load(
self.ds_name,
with_info=True,
data_dir=self.data_dir,
)
return ds_info
dm = NumpyDataModule(ds_name=ds_name, data_dir=data_dir)
ds_info = dm.prepare_data()
train_loader = dm.make_dataset("train", batch_per_core=62, no_of_cores=len(devices))
test_loader = dm.make_dataset("validation", batch_per_core=61, no_of_cores=len(devices))
print(ds_info.splits[train_loader.split].num_examples)
print(ds_info.splits[test_loader.split].num_examples)
import tqdm
def clip_extract(tpu_loader):
clip_features = []
steps = (ds_info.splits[tpu_loader.split].num_examples // tpu_loader.batch_size) + 1
for i, batch in zip(tqdm.trange(steps), tpu_loader.loader):
# the last batch is not parallised.
if i == steps - 1:
clip_encoded_batch = image_fn(jax_params, np.squeeze(batch, axis=0))
else:
clip_encoded_batch = image_fn_pmapped(jax_params_repl, batch)
clip_encoded_batch = jax.device_get(clip_encoded_batch)
clip_features.append(clip_encoded_batch)
clip_flattened_features = [fea.reshape(-1, 512) for fea in clip_features]
coco_clip = np.concatenate(clip_flattened_features)
return coco_clip
clip_train = clip_extract(train_loader)
clip_eval = clip_extract(test_loader)
def make_tfds_and_save(numpy_data, name):
tf_ds = tf.data.Dataset.from_tensor_slices(numpy_data)
tf.data.experimental.save(tf_ds, f"/content/{name}")
return tf_ds
clip_train_ds = make_tfds_and_save(clip_train, "clip_train_ds")
clip_test_ds = make_tfds_and_save(clip_eval, "clip_test_ds")
| 0.609408 | 0.814311 |
```
import numpy as np
import pandas as pd
def load_day(day):
header = ['timestamp', 'line_id', 'direction', 'jrny_patt_id', 'time_frame', 'journey_id', 'operator',
'congestion', 'lon', 'lat', 'delay', 'block_id', 'vehicle_id', 'stop_id', 'at_stop']
types = {'timestamp': np.int64,
'journey_id': np.int32,
'congestion': np.int8,
'lon': np.float64,
'lat': np.float64,
'delay': np.int8,
'vehicle_id': np.int32,
'at_stop': np.int8}
file_name = 'data/siri.201301{0:02d}.csv'.format(day)
df = pd.read_csv(file_name, header=None, names=header, dtype=types, parse_dates=['time_frame'],
infer_datetime_format=True)
null_replacements = {'line_id': 0, 'stop_id': 0}
df = df.fillna(value=null_replacements)
df['line_id'] = df['line_id'].astype(np.int32)
df['stop_id'] = df['stop_id'].astype(np.int32)
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='us')
return df
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
Taken from here: https://stackoverflow.com/questions/29545704/fast-haversine-approximation-python-pandas#29546836
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
meters = 6378137.0 * c
return meters
def calculate_durations(data_frame, vehicle_id):
one_second = np.timedelta64(1000000000, 'ns')
dv = data_frame[data_frame['vehicle_id'] == vehicle_id]
ts = dv.timestamp.values
dtd = ts[1:] - ts[:-1]
dt = np.zeros(len(dtd) + 1)
dt[1:] = dtd / one_second
return dt
def calculate_distances(data_frame, vehicle_id):
dv = data_frame[data_frame['vehicle_id'] == vehicle_id]
lat = dv.lat.values
lon = dv.lon.values
dxm = haversine_np(lon[1:], lat[1:], lon[:-1], lat[:-1])
dx = np.zeros(len(dxm) + 1)
dx[1:] = dxm
return dx
def delta_location(lat, lon, bearing, meters):
"""
Calculates a destination location from a starting location, a bearing and a distance in meters.
:param lat: Start latitude
:param lon: Start longitude
:param bearing: Bearing (North is zero degrees, measured clockwise)
:param meters: Distance to displace from the starting point
:return: Tuple with the new latitude and longitude
"""
delta = meters / 6378137.0
theta = math.radians(bearing)
lat_r = math.radians(lat)
lon_r = math.radians(lon)
lat_r2 = math.asin(math.sin(lat_r) * math.cos(delta) + math.cos(lat_r) * math.sin(delta) * math.cos(theta))
lon_r2 = lon_r + math.atan2(math.sin(theta) * math.sin(delta) * math.cos(lat_r),
math.cos(delta) - math.sin(lat_r) * math.sin(lat_r2))
return math.degrees(lat_r2), math.degrees(lon_r2)
def delta_degree_to_meters(lat, lon, delta_lat=0, delta_lon=0):
return haversine_np(lon, lat, lon + delta_lon, lat + delta_lat)
def x_meters_to_degrees(meters, lat, lon):
_, lon2 = delta_location(lat, lon, 90, meters)
return abs(lon - lon2)
def y_meters_to_degrees(meters, lat, lon):
lat2, _ = delta_location(lat, lon, 0, meters)
return abs(lat - lat2)
```
Calculate the Q matrix:

```
def calculate_q(lat, lon, sigma_speed):
q = np.zeros((4, 4), dtype=np.float)
q[2, 2] = x_meters_to_degrees(sigma_speed, lat, lon) ** 2
q[3, 3] = y_meters_to_degrees(sigma_speed, lat, lon) ** 2
return q
def calculate_r(lat, lon, sigma):
r = np.zeros((2, 2), dtype=np.float)
r[0, 0] = x_meters_to_degrees(lat, lon, sigma)
r[1, 1] = y_meters_to_degrees(lat, lon, sigma)
return r
```
```
def calculate_p(lat, lon, sigma, sigma_speed):
p = np.zeros((4, 4), dtype=np.float)
p[0, 0] = x_meters_to_degrees(sigma, lat, lon) ** 2
p[1, 1] = y_meters_to_degrees(sigma, lat, lon) ** 2
p[2, 2] = x_meters_to_degrees(sigma_speed, lat, lon) ** 2
p[3, 3] = y_meters_to_degrees(sigma_speed, lat, lon) ** 2
return p
def calculate_phi(dt):
"""
Calculates the Φ matrix
:param dt: Δtᵢ
:return: The Φ matrix
"""
phi = np.eye(4)
phi[0, 2] = dt
phi[1, 3] = dt
return phi
def calculate_kalman_gain(p, c, r):
num = np.matmul(p, np.transpose(c))
den = np.matmul(c, num) + r
return np.matmul(num, np.linalg.pinv(den))
def predict_step(prev_x, prev_p, phi, sigma_speed):
lon = prev_x[0, 0]
lat = prev_x[1, 0]
next_x = np.matmul(phi, prev_x)
next_p = np.matmul(np.matmul(phi, prev_p), np.transpose(phi)) + calculate_q(lat, lon, sigma_speed)
return next_x, next_p
def update_step(predicted_x, predicted_p, c, y, sigma_x):
lon = predicted_x[0, 0]
lat = predicted_x[1, 0]
r = calculate_r(lat, lon, sigma_x)
k = calculate_kalman_gain(predicted_p, c, r)
updated_x = predicted_x + np.matmul(k, y - np.matmul(c, predicted_x))
identity = np.eye(4)
updated_p = np.matmul(identity - np.matmul(k, c), predicted_p)
return updated_x, updated_p
```
Explore the data set.
```
day = load_day(2)
vehicles = day['vehicle_id'].unique()
```
|
github_jupyter
|
import numpy as np
import pandas as pd
def load_day(day):
header = ['timestamp', 'line_id', 'direction', 'jrny_patt_id', 'time_frame', 'journey_id', 'operator',
'congestion', 'lon', 'lat', 'delay', 'block_id', 'vehicle_id', 'stop_id', 'at_stop']
types = {'timestamp': np.int64,
'journey_id': np.int32,
'congestion': np.int8,
'lon': np.float64,
'lat': np.float64,
'delay': np.int8,
'vehicle_id': np.int32,
'at_stop': np.int8}
file_name = 'data/siri.201301{0:02d}.csv'.format(day)
df = pd.read_csv(file_name, header=None, names=header, dtype=types, parse_dates=['time_frame'],
infer_datetime_format=True)
null_replacements = {'line_id': 0, 'stop_id': 0}
df = df.fillna(value=null_replacements)
df['line_id'] = df['line_id'].astype(np.int32)
df['stop_id'] = df['stop_id'].astype(np.int32)
df['timestamp'] = pd.to_datetime(df['timestamp'], unit='us')
return df
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
Taken from here: https://stackoverflow.com/questions/29545704/fast-haversine-approximation-python-pandas#29546836
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1.0 - a))
meters = 6378137.0 * c
return meters
def calculate_durations(data_frame, vehicle_id):
one_second = np.timedelta64(1000000000, 'ns')
dv = data_frame[data_frame['vehicle_id'] == vehicle_id]
ts = dv.timestamp.values
dtd = ts[1:] - ts[:-1]
dt = np.zeros(len(dtd) + 1)
dt[1:] = dtd / one_second
return dt
def calculate_distances(data_frame, vehicle_id):
dv = data_frame[data_frame['vehicle_id'] == vehicle_id]
lat = dv.lat.values
lon = dv.lon.values
dxm = haversine_np(lon[1:], lat[1:], lon[:-1], lat[:-1])
dx = np.zeros(len(dxm) + 1)
dx[1:] = dxm
return dx
def delta_location(lat, lon, bearing, meters):
"""
Calculates a destination location from a starting location, a bearing and a distance in meters.
:param lat: Start latitude
:param lon: Start longitude
:param bearing: Bearing (North is zero degrees, measured clockwise)
:param meters: Distance to displace from the starting point
:return: Tuple with the new latitude and longitude
"""
delta = meters / 6378137.0
theta = math.radians(bearing)
lat_r = math.radians(lat)
lon_r = math.radians(lon)
lat_r2 = math.asin(math.sin(lat_r) * math.cos(delta) + math.cos(lat_r) * math.sin(delta) * math.cos(theta))
lon_r2 = lon_r + math.atan2(math.sin(theta) * math.sin(delta) * math.cos(lat_r),
math.cos(delta) - math.sin(lat_r) * math.sin(lat_r2))
return math.degrees(lat_r2), math.degrees(lon_r2)
def delta_degree_to_meters(lat, lon, delta_lat=0, delta_lon=0):
return haversine_np(lon, lat, lon + delta_lon, lat + delta_lat)
def x_meters_to_degrees(meters, lat, lon):
_, lon2 = delta_location(lat, lon, 90, meters)
return abs(lon - lon2)
def y_meters_to_degrees(meters, lat, lon):
lat2, _ = delta_location(lat, lon, 0, meters)
return abs(lat - lat2)
def calculate_q(lat, lon, sigma_speed):
q = np.zeros((4, 4), dtype=np.float)
q[2, 2] = x_meters_to_degrees(sigma_speed, lat, lon) ** 2
q[3, 3] = y_meters_to_degrees(sigma_speed, lat, lon) ** 2
return q
def calculate_r(lat, lon, sigma):
r = np.zeros((2, 2), dtype=np.float)
r[0, 0] = x_meters_to_degrees(lat, lon, sigma)
r[1, 1] = y_meters_to_degrees(lat, lon, sigma)
return r
def calculate_p(lat, lon, sigma, sigma_speed):
p = np.zeros((4, 4), dtype=np.float)
p[0, 0] = x_meters_to_degrees(sigma, lat, lon) ** 2
p[1, 1] = y_meters_to_degrees(sigma, lat, lon) ** 2
p[2, 2] = x_meters_to_degrees(sigma_speed, lat, lon) ** 2
p[3, 3] = y_meters_to_degrees(sigma_speed, lat, lon) ** 2
return p
def calculate_phi(dt):
"""
Calculates the Φ matrix
:param dt: Δtᵢ
:return: The Φ matrix
"""
phi = np.eye(4)
phi[0, 2] = dt
phi[1, 3] = dt
return phi
def calculate_kalman_gain(p, c, r):
num = np.matmul(p, np.transpose(c))
den = np.matmul(c, num) + r
return np.matmul(num, np.linalg.pinv(den))
def predict_step(prev_x, prev_p, phi, sigma_speed):
lon = prev_x[0, 0]
lat = prev_x[1, 0]
next_x = np.matmul(phi, prev_x)
next_p = np.matmul(np.matmul(phi, prev_p), np.transpose(phi)) + calculate_q(lat, lon, sigma_speed)
return next_x, next_p
def update_step(predicted_x, predicted_p, c, y, sigma_x):
lon = predicted_x[0, 0]
lat = predicted_x[1, 0]
r = calculate_r(lat, lon, sigma_x)
k = calculate_kalman_gain(predicted_p, c, r)
updated_x = predicted_x + np.matmul(k, y - np.matmul(c, predicted_x))
identity = np.eye(4)
updated_p = np.matmul(identity - np.matmul(k, c), predicted_p)
return updated_x, updated_p
day = load_day(2)
vehicles = day['vehicle_id'].unique()
| 0.762336 | 0.791297 |
___
# Support Vector Machines Project
___
Welcome to my Support Vector Machine Project! We will be analyzing the famous iris data set!
## The Data
For this series, we will be using the famous [Iris flower data set](http://en.wikipedia.org/wiki/Iris_flower_data_set).
The Iris flower data set or Fisher's Iris data set is a multivariate data set introduced by Sir Ronald Fisher in the 1936 as an example of discriminant analysis.
The data set consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor), so 150 total samples. Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.
Here's a picture of the three different Iris types:
```
# The Iris Setosa
from IPython.display import Image
path = '/Users/archismanchakraborti/Downloads/Irissetosa1.jpg'
Image(path,width=300, height=300)
# The Iris Versicolor
from IPython.display import Image
path = '/Users/archismanchakraborti/Downloads/440px-Blue_Flag,_Ottawa.jpg'
Image(path,width=300, height=300)
# The Iris Virginica
from IPython.display import Image
path = '/Users/archismanchakraborti/Downloads/440px-Iris_virginica_2.jpg'
Image(path,width=300, height=300)
```
The iris dataset contains measurements for 150 iris flowers from three different species.
The three classes in the Iris dataset:
Iris-setosa (n=50)
Iris-versicolor (n=50)
Iris-virginica (n=50)
The four features of the Iris dataset:
sepal length in cm
sepal width in cm
petal length in cm
petal width in cm
## Getting the data
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style = 'darkgrid', context = 'talk')
iris = pd.read_csv('/Users/archismanchakraborti/Desktop/iris.csv')
```
Let's visualize the data.
## Exploratory Data Analysis
**Which flower species seems to be the most separable?**
```
iris.head()
sns.pairplot(data = iris, hue = 'species', height = 4)
```
#### The setosa variety seems to be the most separable
**We'll a kde plot of sepal_length versus sepal width for setosa species of flower.**
```
sns.jointplot(kind = 'kde', data = iris, x = 'sepal_width', y = 'sepal_length', fill = True, cbar = True,
height = 14, hue = 'species')
```
# Train Test Split
```
from sklearn.model_selection import train_test_split
X = iris.drop('species', axis = 1)
y = iris['species']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
```
# Train a Model
Now its time to train a Support Vector Machine Classifier.
**We'll call the SVC() model from sklearn and fit the model to the training data.**
```
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train, y_train)
```
## Model Evaluation
**Now we'll get predictions from the model and create a confusion matrix and a classification report.**
```
from sklearn.metrics import classification_report, confusion_matrix
y_pred = svm.predict(X_test)
confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
```
Our model was pretty good! Let's see if we can tune the parameters to try to get even better (unlikely, and we'll probably would be satisfied with these results in real like because the data set is quite small, but I just want to practice using GridSearch.
## Gridsearch Practice
```
from sklearn.model_selection import GridSearchCV
```
**We'll create a dictionary called param_grid and fill out some parameters for C and gamma.**
```
param_grid = {'C':[1,2,3,4,5,6],
'kernel':['linear', 'poly', 'rbf'],
'gamma':[1, 0.1, 0.2, 0.01, 0.02, 0.001, 0.002, 0.0001, 0.0002],
}
```
**We'll create a GridSearchCV object and fit it to the training data.**
```
grid = GridSearchCV(SVC(), param_grid, verbose = 8)
grid.fit(X_train, y_train)
```
**Now let's take that grid model and create some predictions using the test set and create classification reports and confusion matrices for them.**
```
grid.best_params_
grid_preds = grid.predict(X_test)
confusion_matrix(y_test, grid_preds)
print(classification_report(y_test, grid_preds))
```
### Our model is 100% accurate!!
---
END
---
|
github_jupyter
|
# The Iris Setosa
from IPython.display import Image
path = '/Users/archismanchakraborti/Downloads/Irissetosa1.jpg'
Image(path,width=300, height=300)
# The Iris Versicolor
from IPython.display import Image
path = '/Users/archismanchakraborti/Downloads/440px-Blue_Flag,_Ottawa.jpg'
Image(path,width=300, height=300)
# The Iris Virginica
from IPython.display import Image
path = '/Users/archismanchakraborti/Downloads/440px-Iris_virginica_2.jpg'
Image(path,width=300, height=300)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_theme(style = 'darkgrid', context = 'talk')
iris = pd.read_csv('/Users/archismanchakraborti/Desktop/iris.csv')
iris.head()
sns.pairplot(data = iris, hue = 'species', height = 4)
sns.jointplot(kind = 'kde', data = iris, x = 'sepal_width', y = 'sepal_length', fill = True, cbar = True,
height = 14, hue = 'species')
from sklearn.model_selection import train_test_split
X = iris.drop('species', axis = 1)
y = iris['species']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
from sklearn.svm import SVC
svm = SVC()
svm.fit(X_train, y_train)
from sklearn.metrics import classification_report, confusion_matrix
y_pred = svm.predict(X_test)
confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
from sklearn.model_selection import GridSearchCV
param_grid = {'C':[1,2,3,4,5,6],
'kernel':['linear', 'poly', 'rbf'],
'gamma':[1, 0.1, 0.2, 0.01, 0.02, 0.001, 0.002, 0.0001, 0.0002],
}
grid = GridSearchCV(SVC(), param_grid, verbose = 8)
grid.fit(X_train, y_train)
grid.best_params_
grid_preds = grid.predict(X_test)
confusion_matrix(y_test, grid_preds)
print(classification_report(y_test, grid_preds))
| 0.44071 | 0.967625 |
```
from selenium import webdriver
import time
import re
from bs4 import BeautifulSoup
import pandas as pd
from twitter import *
from typing import List
import shutil
import json
from tqdm import tqdm
import os
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("TOKEN")
TOKEN_SECRET = os.getenv("TOKEN_SECRET")
CONSUMER_KEY = os.getenv("CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("CONSUMER_SECRET")
def get_driver():
# chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
# wd = webdriver.Chrome('chromedriver', options=chrome_options)
options = webdriver.FirefoxOptions()
options.add_argument("--headless")
wd = webdriver.Firefox(options=options)
return wd
def get_coalition_links():
wd = get_driver()
wd.get("https://www.sejm.gov.pl/sejm9.nsf/kluby.xsp")
links_soup = BeautifulSoup(wd.page_source, 'lxml')
coalition_links = [l['href'] for l in links_soup.find_all('a') if l['href'] is not None and "klubposlowie" in l['href']]
return coalition_links
def get_coalition_politicians(coalition_link):
whole_link = "https://www.sejm.gov.pl" + coalition_link
wd = get_driver()
wd.get(whole_link)
soup = BeautifulSoup(wd.page_source, 'lxml')
people = [d.text for d in soup.find_all("div", class_="deputyName")]
return people
coalition_links = get_coalition_links()
coalition_links
def get_possible_accounts(person_name: str, coalition_name: str, n_first=3) -> List:
twitter = Twitter(auth=OAuth(TOKEN, TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
results = twitter.users.search(q = person_name)
name = person_name.replace(' ', '_')
path = f"users/{coalition_name}/{name}"
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
possible_accounts = []
for i, user in enumerate(results):
if i >= n_first:
break
possible_accounts.append((user['screen_name'], user['verified']))
with open(os.path.join(path, f"{name}_{i}.json"), "w", encoding='utf-8') as f:
json.dump(user, f, ensure_ascii=False)
return possible_accounts
n_first = 3
results = []
base_href = "https://twitter.com/"
for link in coalition_links:
coalition_name = link.split("=")[-1]
print(coalition_name)
coalition_path = f"users/{coalition_name}"
if os.path.exists(coalition_path):
shutil.rmtree(coalition_path)
os.mkdir(coalition_path)
politicians = get_coalition_politicians(link)
for politician_name in tqdm(politicians):
possible_accounts = get_possible_accounts(politician_name, coalition_name, n_first)
possible_links = [(base_href + name, verified) for name, verified in possible_accounts]
data_flat = [item for t in possible_links for item in t]
res = (coalition_name, politician_name, *data_flat)
results.append(res)
columns=["coalition", "name"]
for i in range(n_first):
columns.append(f"acc_{i}")
columns.append(f"ver_{i}")
result_df = pd.DataFrame(results, columns=columns)
result_df.to_csv("res.csv", index=False)
result_df
```
|
github_jupyter
|
from selenium import webdriver
import time
import re
from bs4 import BeautifulSoup
import pandas as pd
from twitter import *
from typing import List
import shutil
import json
from tqdm import tqdm
import os
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("TOKEN")
TOKEN_SECRET = os.getenv("TOKEN_SECRET")
CONSUMER_KEY = os.getenv("CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("CONSUMER_SECRET")
def get_driver():
# chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument('--headless')
# chrome_options.add_argument('--no-sandbox')
# chrome_options.add_argument('--disable-dev-shm-usage')
# wd = webdriver.Chrome('chromedriver', options=chrome_options)
options = webdriver.FirefoxOptions()
options.add_argument("--headless")
wd = webdriver.Firefox(options=options)
return wd
def get_coalition_links():
wd = get_driver()
wd.get("https://www.sejm.gov.pl/sejm9.nsf/kluby.xsp")
links_soup = BeautifulSoup(wd.page_source, 'lxml')
coalition_links = [l['href'] for l in links_soup.find_all('a') if l['href'] is not None and "klubposlowie" in l['href']]
return coalition_links
def get_coalition_politicians(coalition_link):
whole_link = "https://www.sejm.gov.pl" + coalition_link
wd = get_driver()
wd.get(whole_link)
soup = BeautifulSoup(wd.page_source, 'lxml')
people = [d.text for d in soup.find_all("div", class_="deputyName")]
return people
coalition_links = get_coalition_links()
coalition_links
def get_possible_accounts(person_name: str, coalition_name: str, n_first=3) -> List:
twitter = Twitter(auth=OAuth(TOKEN, TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
results = twitter.users.search(q = person_name)
name = person_name.replace(' ', '_')
path = f"users/{coalition_name}/{name}"
if os.path.exists(path):
shutil.rmtree(path)
os.mkdir(path)
possible_accounts = []
for i, user in enumerate(results):
if i >= n_first:
break
possible_accounts.append((user['screen_name'], user['verified']))
with open(os.path.join(path, f"{name}_{i}.json"), "w", encoding='utf-8') as f:
json.dump(user, f, ensure_ascii=False)
return possible_accounts
n_first = 3
results = []
base_href = "https://twitter.com/"
for link in coalition_links:
coalition_name = link.split("=")[-1]
print(coalition_name)
coalition_path = f"users/{coalition_name}"
if os.path.exists(coalition_path):
shutil.rmtree(coalition_path)
os.mkdir(coalition_path)
politicians = get_coalition_politicians(link)
for politician_name in tqdm(politicians):
possible_accounts = get_possible_accounts(politician_name, coalition_name, n_first)
possible_links = [(base_href + name, verified) for name, verified in possible_accounts]
data_flat = [item for t in possible_links for item in t]
res = (coalition_name, politician_name, *data_flat)
results.append(res)
columns=["coalition", "name"]
for i in range(n_first):
columns.append(f"acc_{i}")
columns.append(f"ver_{i}")
result_df = pd.DataFrame(results, columns=columns)
result_df.to_csv("res.csv", index=False)
result_df
| 0.296043 | 0.076961 |
```
#!/usr/bin/env python
import os, random, argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageChops
IMG_PATH = 'E:\Documents\GitHub\Photo_Mosaic\data\images'
try:
os.mkdir(IMG_PATH)
except OSError as error:
print(error)
FONT_PATH = 'C:\Windows\Fonts\\'
pallet = []
#Basic
pallet.append([(204, 0, 51),(255, 255, 255),'Red', 'White'])
pallet.append([(95, 106, 114),(255, 255, 255),'Grey', 'White'])
#pallet.append([(0, 0, 0),(255, 255, 255),'Black', 'White'])
#Bright
pallet.append([(158, 169, 0),(255, 255, 255),'Green', 'White'])
pallet.append([(235, 182, 0),(0, 0, 0),'Yellow', 'Black'])
pallet.append([(231, 111, 0),(255, 255, 255),'Orange', 'White'])
pallet.append([(0, 127, 172),(255, 255, 255),'Blue', 'White'])
#Muted
pallet.append([(223, 210, 179),(0, 0, 0),'Tan', 'Black'])
pallet.append([(0, 98, 109),(255, 255, 255),'Green_Blue', 'White'])
pallet.append([(112, 50, 33),(255, 255, 255),'Brown', 'White'])
pallet.append([(193, 187, 171),(0, 0, 0),'Dark_Tan', 'Black'])
pallet
foreground_or_background = random.randint(0, 1)
pallet_selection = foreground = random.randint(0, len(pallet)-1)
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
fonts = []
fonts.append('palabi')
fonts.append('AGENCYR')
fonts.append('ALGER')
fonts.append('ANTQUAI')
fonts.append('arial')
fonts.append('ARLRDBD')
fonts.append('bahnschrift')
fonts.append('BASKVILL')
fonts.append('BAUHS93')
fonts.append('BELL')
fonts.append('BERNHC')
fonts.append('BKANT')
fonts.append('BOOKOS')
fonts.append('BRADHITC')
fonts.append('BRITANIC')
fonts.append('BRLNSB')
fonts.append('BRLNSDB')
fonts.append('BRLNSR')
fonts.append('BROADW')
fonts.append('BRUSHSCI')
fonts.append('calibri')
fonts.append('CALIFB')
fonts.append('cambriab')
fonts.append('Candara')
fonts.append('CascadiaCode')
fonts.append('CascadiaMono')
fonts.append('CASTELAR')
fonts.append('CENSCBK')
fonts.append('CENTAUR')
fonts.append('CENTURY')
fonts.append('CHILLER')
fonts.append('COLONNA')
fonts.append('comic')
fonts.append('consola')
fonts.append('constan')
fonts.append('COOPBL')
fonts.append('corbel')
fonts.append('cour')
fonts.append('couri')
fonts.append('DUBAI-REGULAR')
fonts.append('ebrima')
fonts.append('ebrimabd')
fonts.append('ELEPHNT')
fonts.append('ENGR')
fonts.append('ERASBD')
fonts.append('ERASDEMI')
fonts.append('ERASLGHT')
fonts.append('ERASMD')
fonts.append('FELIXTI')
fonts.append('FORTE')
fonts.append('FRABK')
fonts.append('FRABKIT')
fonts.append('FRADM')
fonts.append('FRADMCN')
fonts.append('FRADMIT')
fonts.append('FRAHV')
fonts.append('FRAHVIT')
fonts.append('framd')
fonts.append('FRAMDCN')
fonts.append('framdit')
fonts.append('FTLTLT')
fonts.append('Gabriola')
fonts.append('gadugi')
fonts.append('gadugib')
fonts.append('GARA')
fonts.append('GARABD')
fonts.append('GARAIT')
fonts.append('georgia')
fonts.append('GIGI')
fonts.append('GILLUBCD')
fonts.append('GILSANUB')
fonts.append('GLECB')
fonts.append('GLSNECB')
fonts.append('GOTHIC')
fonts.append('GOUDOS')
fonts.append('GOUDYSTO')
fonts.append('HARLOWSI')
fonts.append('HARNGTON')
fonts.append('HATTEN')
fonts.append('himalaya')
fonts.append('HTOWERT')
fonts.append('impact')
fonts.append('IMPRISHA')
fonts.append('INFROMAN')
fonts.append('Inkfree')
fonts.append('ITCBLKAD')
fonts.append('javatext')
fonts.append('JOKERMAN')
fonts.append('KUNSTLER')
fonts.append('LATINWD')
fonts.append('LBRITE')
fonts.append('LCALLIG')
fonts.append('LeelaUIb')
fonts.append('LEELAWAD')
fonts.append('LeelawUI')
fonts.append('LeelUIsl')
fonts.append('LFAX')
fonts.append('LHANDW')
fonts.append('LSANS')
fonts.append('LTYPE')
fonts.append('LTYPEB')
fonts.append('LTYPEBO')
fonts.append('LTYPEO')
fonts.append('lucon')
fonts.append('MAGNETOB')
fonts.append('MAIAN')
fonts.append('malgun')
fonts.append('malgunbd')
fonts.append('malgunsl')
fonts.append('MATURASC')
fonts.append('micross')
fonts.append('MISTRAL')
fonts.append('MOD20')
fonts.append('monbaiti')
fonts.append('MSUIGHUR')
fonts.append('msyi')
fonts.append('MTCORSVA')
fonts.append('mvboli')
fonts.append('NIAGENG')
fonts.append('NIAGSOL')
fonts.append('Nirmala')
fonts.append('ntailu')
fonts.append('ntailub')
fonts.append('OCRAEXT')
fonts.append('OLDENGL')
fonts.append('ONYX')
fonts.append('pala')
fonts.append('PALSCRI')
fonts.append('PARCHM')
fonts.append('PERTIBD')
fonts.append('phagspa')
fonts.append('PLAYBILL')
fonts.append('POORICH')
fonts.append('PRISTINA')
fonts.append('RAGE')
fonts.append('RAVIE')
fonts.append('REFSAN')
fonts.append('ROCK')
fonts.append('ROCKB')
fonts.append('ROCKI')
fonts.append('SCHLBKB')
fonts.append('SCRIPTBL')
fonts.append('segoepr')
fonts.append('SegUIVar')
fonts.append('SHOWG')
fonts.append('simsunb')
fonts.append('SitkaVF')
fonts.append('STENCIL')
fonts.append('sylfaen')
fonts.append('tahoma')
fonts.append('tahomabd')
fonts.append('taile')
fonts.append('times')
fonts.append('trebuc')
fonts.append('verdana')
fonts.append('VINERITC')
fonts.append('VIVALDII')
fonts.append('VLADIMIR')
def createLetter(letter,font,color_combo,foreground_or_background):
img = Image.new('RGB', (30, 30), color = color_combo[foreground_or_background ^ 0])
fnt = ImageFont.truetype(os.path.join(FONT_PATH, font+'.ttf'), 15)
d = ImageDraw.Draw(img)
d.text((10,10), letter, font=fnt, fill=color_combo[foreground_or_background ^ 1])
file_name = letter+'_'+font+'_'+ color_combo[(foreground_or_background ^ 0)+2] + '_' + color_combo[(foreground_or_background ^ 1)+2]+'_text_font.png'
img.save(os.path.join(IMG_PATH,file_name))
return img
for letter in letters:
for font in fonts:
for color_combo in pallet:
for foreground_or_background in range(2):
createLetter(letter,font,color_combo,foreground_or_background)
def createLetterTransparent(letter,font,color_combo):
FONT_SIZE = 60
IMG_SIZE = int(FONT_SIZE*3)
IMG_SPACE = int(FONT_SIZE/2)
img = Image.new('RGBA', (IMG_SIZE, IMG_SIZE), color = (0,0,0,0))
fnt = ImageFont.truetype(os.path.join(FONT_PATH, font+'.ttf'), FONT_SIZE)
d = ImageDraw.Draw(img)
d.text((IMG_SPACE,IMG_SPACE), letter, font=fnt, fill=color_combo[0])
file_name = letter+'_'+font+'_'+ color_combo[2] +'_text_font.png'
img = autoCrop(img)
img.save(os.path.join(IMG_PATH,file_name))
return img
def autoCrop(img):
bg = Image.new(img.mode, img.size, img.getpixel((0,0)))
diff = ImageChops.difference(img, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
cropped = img.crop(bbox)
return cropped
for letter in letters:
for font in fonts:
for color_combo in pallet:
createLetterTransparent(letter,font,color_combo)
FONT_SIZE = 60
IMG_SIZE = int(FONT_SIZE*1.3)
img = Image.new('RGBA', (IMG_SIZE, IMG_SIZE), color = (100, 100, 100, 100))
fnt = ImageFont.truetype('C:\Windows\Fonts\palabi.ttf', 15)
d = ImageDraw.Draw(img)
d.text((10,10), "H", font=fnt, fill=pallet[pallet_selection][foreground_or_background ^ 1])
im2 = ImageOps.fit(img, (100, 100), method = 0, bleed = 0.0, centering =(0.5, 0.5))
#img.save(os.path.join(IMG_PATH,letter+'pil_text_font.png'))
im2
for letter in letters:
for font in fonts:
img = Image.new('RGBA', (IMG_SIZE, IMG_SIZE), color = (0, 0, 0, 0))
fnt = ImageFont.truetype(os.path.join(FONT_PATH, font+'.ttf'), FONT_SIZE)
d = ImageDraw.Draw(img)
d.text((0,0), letter, font=fnt, fill=pallet[pallet_selection][foreground_or_background ^ 1])
img.save(os.path.join(IMG_PATH,'test',letter+font+'test.png'),'PNG')
```
|
github_jupyter
|
#!/usr/bin/env python
import os, random, argparse
import numpy as np
from PIL import Image, ImageDraw, ImageFont, ImageOps, ImageChops
IMG_PATH = 'E:\Documents\GitHub\Photo_Mosaic\data\images'
try:
os.mkdir(IMG_PATH)
except OSError as error:
print(error)
FONT_PATH = 'C:\Windows\Fonts\\'
pallet = []
#Basic
pallet.append([(204, 0, 51),(255, 255, 255),'Red', 'White'])
pallet.append([(95, 106, 114),(255, 255, 255),'Grey', 'White'])
#pallet.append([(0, 0, 0),(255, 255, 255),'Black', 'White'])
#Bright
pallet.append([(158, 169, 0),(255, 255, 255),'Green', 'White'])
pallet.append([(235, 182, 0),(0, 0, 0),'Yellow', 'Black'])
pallet.append([(231, 111, 0),(255, 255, 255),'Orange', 'White'])
pallet.append([(0, 127, 172),(255, 255, 255),'Blue', 'White'])
#Muted
pallet.append([(223, 210, 179),(0, 0, 0),'Tan', 'Black'])
pallet.append([(0, 98, 109),(255, 255, 255),'Green_Blue', 'White'])
pallet.append([(112, 50, 33),(255, 255, 255),'Brown', 'White'])
pallet.append([(193, 187, 171),(0, 0, 0),'Dark_Tan', 'Black'])
pallet
foreground_or_background = random.randint(0, 1)
pallet_selection = foreground = random.randint(0, len(pallet)-1)
letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
fonts = []
fonts.append('palabi')
fonts.append('AGENCYR')
fonts.append('ALGER')
fonts.append('ANTQUAI')
fonts.append('arial')
fonts.append('ARLRDBD')
fonts.append('bahnschrift')
fonts.append('BASKVILL')
fonts.append('BAUHS93')
fonts.append('BELL')
fonts.append('BERNHC')
fonts.append('BKANT')
fonts.append('BOOKOS')
fonts.append('BRADHITC')
fonts.append('BRITANIC')
fonts.append('BRLNSB')
fonts.append('BRLNSDB')
fonts.append('BRLNSR')
fonts.append('BROADW')
fonts.append('BRUSHSCI')
fonts.append('calibri')
fonts.append('CALIFB')
fonts.append('cambriab')
fonts.append('Candara')
fonts.append('CascadiaCode')
fonts.append('CascadiaMono')
fonts.append('CASTELAR')
fonts.append('CENSCBK')
fonts.append('CENTAUR')
fonts.append('CENTURY')
fonts.append('CHILLER')
fonts.append('COLONNA')
fonts.append('comic')
fonts.append('consola')
fonts.append('constan')
fonts.append('COOPBL')
fonts.append('corbel')
fonts.append('cour')
fonts.append('couri')
fonts.append('DUBAI-REGULAR')
fonts.append('ebrima')
fonts.append('ebrimabd')
fonts.append('ELEPHNT')
fonts.append('ENGR')
fonts.append('ERASBD')
fonts.append('ERASDEMI')
fonts.append('ERASLGHT')
fonts.append('ERASMD')
fonts.append('FELIXTI')
fonts.append('FORTE')
fonts.append('FRABK')
fonts.append('FRABKIT')
fonts.append('FRADM')
fonts.append('FRADMCN')
fonts.append('FRADMIT')
fonts.append('FRAHV')
fonts.append('FRAHVIT')
fonts.append('framd')
fonts.append('FRAMDCN')
fonts.append('framdit')
fonts.append('FTLTLT')
fonts.append('Gabriola')
fonts.append('gadugi')
fonts.append('gadugib')
fonts.append('GARA')
fonts.append('GARABD')
fonts.append('GARAIT')
fonts.append('georgia')
fonts.append('GIGI')
fonts.append('GILLUBCD')
fonts.append('GILSANUB')
fonts.append('GLECB')
fonts.append('GLSNECB')
fonts.append('GOTHIC')
fonts.append('GOUDOS')
fonts.append('GOUDYSTO')
fonts.append('HARLOWSI')
fonts.append('HARNGTON')
fonts.append('HATTEN')
fonts.append('himalaya')
fonts.append('HTOWERT')
fonts.append('impact')
fonts.append('IMPRISHA')
fonts.append('INFROMAN')
fonts.append('Inkfree')
fonts.append('ITCBLKAD')
fonts.append('javatext')
fonts.append('JOKERMAN')
fonts.append('KUNSTLER')
fonts.append('LATINWD')
fonts.append('LBRITE')
fonts.append('LCALLIG')
fonts.append('LeelaUIb')
fonts.append('LEELAWAD')
fonts.append('LeelawUI')
fonts.append('LeelUIsl')
fonts.append('LFAX')
fonts.append('LHANDW')
fonts.append('LSANS')
fonts.append('LTYPE')
fonts.append('LTYPEB')
fonts.append('LTYPEBO')
fonts.append('LTYPEO')
fonts.append('lucon')
fonts.append('MAGNETOB')
fonts.append('MAIAN')
fonts.append('malgun')
fonts.append('malgunbd')
fonts.append('malgunsl')
fonts.append('MATURASC')
fonts.append('micross')
fonts.append('MISTRAL')
fonts.append('MOD20')
fonts.append('monbaiti')
fonts.append('MSUIGHUR')
fonts.append('msyi')
fonts.append('MTCORSVA')
fonts.append('mvboli')
fonts.append('NIAGENG')
fonts.append('NIAGSOL')
fonts.append('Nirmala')
fonts.append('ntailu')
fonts.append('ntailub')
fonts.append('OCRAEXT')
fonts.append('OLDENGL')
fonts.append('ONYX')
fonts.append('pala')
fonts.append('PALSCRI')
fonts.append('PARCHM')
fonts.append('PERTIBD')
fonts.append('phagspa')
fonts.append('PLAYBILL')
fonts.append('POORICH')
fonts.append('PRISTINA')
fonts.append('RAGE')
fonts.append('RAVIE')
fonts.append('REFSAN')
fonts.append('ROCK')
fonts.append('ROCKB')
fonts.append('ROCKI')
fonts.append('SCHLBKB')
fonts.append('SCRIPTBL')
fonts.append('segoepr')
fonts.append('SegUIVar')
fonts.append('SHOWG')
fonts.append('simsunb')
fonts.append('SitkaVF')
fonts.append('STENCIL')
fonts.append('sylfaen')
fonts.append('tahoma')
fonts.append('tahomabd')
fonts.append('taile')
fonts.append('times')
fonts.append('trebuc')
fonts.append('verdana')
fonts.append('VINERITC')
fonts.append('VIVALDII')
fonts.append('VLADIMIR')
def createLetter(letter,font,color_combo,foreground_or_background):
img = Image.new('RGB', (30, 30), color = color_combo[foreground_or_background ^ 0])
fnt = ImageFont.truetype(os.path.join(FONT_PATH, font+'.ttf'), 15)
d = ImageDraw.Draw(img)
d.text((10,10), letter, font=fnt, fill=color_combo[foreground_or_background ^ 1])
file_name = letter+'_'+font+'_'+ color_combo[(foreground_or_background ^ 0)+2] + '_' + color_combo[(foreground_or_background ^ 1)+2]+'_text_font.png'
img.save(os.path.join(IMG_PATH,file_name))
return img
for letter in letters:
for font in fonts:
for color_combo in pallet:
for foreground_or_background in range(2):
createLetter(letter,font,color_combo,foreground_or_background)
def createLetterTransparent(letter,font,color_combo):
FONT_SIZE = 60
IMG_SIZE = int(FONT_SIZE*3)
IMG_SPACE = int(FONT_SIZE/2)
img = Image.new('RGBA', (IMG_SIZE, IMG_SIZE), color = (0,0,0,0))
fnt = ImageFont.truetype(os.path.join(FONT_PATH, font+'.ttf'), FONT_SIZE)
d = ImageDraw.Draw(img)
d.text((IMG_SPACE,IMG_SPACE), letter, font=fnt, fill=color_combo[0])
file_name = letter+'_'+font+'_'+ color_combo[2] +'_text_font.png'
img = autoCrop(img)
img.save(os.path.join(IMG_PATH,file_name))
return img
def autoCrop(img):
bg = Image.new(img.mode, img.size, img.getpixel((0,0)))
diff = ImageChops.difference(img, bg)
diff = ImageChops.add(diff, diff, 2.0, -100)
bbox = diff.getbbox()
cropped = img.crop(bbox)
return cropped
for letter in letters:
for font in fonts:
for color_combo in pallet:
createLetterTransparent(letter,font,color_combo)
FONT_SIZE = 60
IMG_SIZE = int(FONT_SIZE*1.3)
img = Image.new('RGBA', (IMG_SIZE, IMG_SIZE), color = (100, 100, 100, 100))
fnt = ImageFont.truetype('C:\Windows\Fonts\palabi.ttf', 15)
d = ImageDraw.Draw(img)
d.text((10,10), "H", font=fnt, fill=pallet[pallet_selection][foreground_or_background ^ 1])
im2 = ImageOps.fit(img, (100, 100), method = 0, bleed = 0.0, centering =(0.5, 0.5))
#img.save(os.path.join(IMG_PATH,letter+'pil_text_font.png'))
im2
for letter in letters:
for font in fonts:
img = Image.new('RGBA', (IMG_SIZE, IMG_SIZE), color = (0, 0, 0, 0))
fnt = ImageFont.truetype(os.path.join(FONT_PATH, font+'.ttf'), FONT_SIZE)
d = ImageDraw.Draw(img)
d.text((0,0), letter, font=fnt, fill=pallet[pallet_selection][foreground_or_background ^ 1])
img.save(os.path.join(IMG_PATH,'test',letter+font+'test.png'),'PNG')
| 0.080662 | 0.10786 |
<a href="https://colab.research.google.com/github/samuelgh15/daa_2021_1/blob/master/28Octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
h1 = 0
h2 = 0
m1 = 0
m2 = 0 # 1440 + 24 *6
contador = 0 # 5 + (1440 + ?) * 2 + 144 + 24 + 2= 3057
while [h1, h2, m1, m2] != [2,3,5,9]:
if [h1, h2] == [m2, m1]:
print(h1, h2,":", m1, m2)
m2 = m2 + 1
if m2 == 10:
m2 = 0
m1 = m1 + 1
if m1 == 6:
h2 = h2 + 1
m2 = 0
contador = contador + 1
m2 = m2 + 1
if m2 == 10:
m2 = 0
m1 = m1 + 1
if m1 == 6:
m1 = 0
h2 = h2 +1
if h2 == 10:
h2 = 0
h1 = h1 +1
print("Numero de palindromos: ",contador)
horario="0000"
contador=0
while horario!="2359":
inv=horario[::-1]
if horario==inv:
contador+=1
print(horario[0:2],":",horario[2:4])
new=int(horario)
new+=1
horario=str(new).zfill(4)
print("son ",contador,"palindromos")
# 2 + (2360 * 4 ) + 24
lista=[]
for i in range(0,24,1): # 24
for j in range(0,60,1): # 60 1440
if i<10:
if j<10:
lista.append("0"+str(i)+":"+"0"+str(j))
elif j>=10:
lista.append("0"+str(i)+":"+str(j))
else:
if i>=10:
if j<10:
lista.append(str(i)+":"+"0"+str(j))
elif j>=10:
lista.append(str(i)+":"+str(j))
# 1440 + 2 + 1440 + 16 * 2 = 2900
lista2=[]
contador=0
for i in range(len(lista)): # 1440
x=lista[i][::-1]
if x==lista[i]:
lista2.append(x)
contador=contador+1
print(contador)
for j in (lista2):
print(j)
for x in range (0,24,1):
for y in range(0,60,1): #1440 * 3 +13 = 4333
hora=str(x)+":"+str(y)
if x<10:
hora="0"+str(x)+":"+str(y)
if y<10:
hora=str(x)+"0"+":"+str(y)
p=hora[::-1]
if p==hora:
print(f"{hora} es palindromo")
#solucion:
total = int(0) #Contador de numero de palindromos
for hor in range(0,24): #Bucles anidados for para dar aumentar las horas y los minutos al mismo tiempo
for min in range(0,60):
hor_n = str(hor) #Variables
min_n = str(min)
if (hor<10): #USamos condiciones para que las horas y los minutos no rebasen el horario
hor_n = ("0"+hor_n)
if (min<10):
min_n = ("0"+ min_n)
if (hor_n[::-1] == min_n): #Mediante un slicing le damos el formato a las horas para que este empiece desde la derecha
print("{}:{}".format(hor_n,min_n))
total += 1
#1 + 1440 * 5 =7201
palindronum= int(0)
for hor in range(0,24):
for min in range(0,60): # 1440
principio= str(hor)
final= str(min)
if (hor<10):
principio=("0"+principio)
if (min<10):
final=("0"+final)
if (principio[::-1]==final):
print(principio +":"+final)
palindronum= palindronum+1
print(palindronum)
# 1 + 1440 * 5 = 7201
```
|
github_jupyter
|
h1 = 0
h2 = 0
m1 = 0
m2 = 0 # 1440 + 24 *6
contador = 0 # 5 + (1440 + ?) * 2 + 144 + 24 + 2= 3057
while [h1, h2, m1, m2] != [2,3,5,9]:
if [h1, h2] == [m2, m1]:
print(h1, h2,":", m1, m2)
m2 = m2 + 1
if m2 == 10:
m2 = 0
m1 = m1 + 1
if m1 == 6:
h2 = h2 + 1
m2 = 0
contador = contador + 1
m2 = m2 + 1
if m2 == 10:
m2 = 0
m1 = m1 + 1
if m1 == 6:
m1 = 0
h2 = h2 +1
if h2 == 10:
h2 = 0
h1 = h1 +1
print("Numero de palindromos: ",contador)
horario="0000"
contador=0
while horario!="2359":
inv=horario[::-1]
if horario==inv:
contador+=1
print(horario[0:2],":",horario[2:4])
new=int(horario)
new+=1
horario=str(new).zfill(4)
print("son ",contador,"palindromos")
# 2 + (2360 * 4 ) + 24
lista=[]
for i in range(0,24,1): # 24
for j in range(0,60,1): # 60 1440
if i<10:
if j<10:
lista.append("0"+str(i)+":"+"0"+str(j))
elif j>=10:
lista.append("0"+str(i)+":"+str(j))
else:
if i>=10:
if j<10:
lista.append(str(i)+":"+"0"+str(j))
elif j>=10:
lista.append(str(i)+":"+str(j))
# 1440 + 2 + 1440 + 16 * 2 = 2900
lista2=[]
contador=0
for i in range(len(lista)): # 1440
x=lista[i][::-1]
if x==lista[i]:
lista2.append(x)
contador=contador+1
print(contador)
for j in (lista2):
print(j)
for x in range (0,24,1):
for y in range(0,60,1): #1440 * 3 +13 = 4333
hora=str(x)+":"+str(y)
if x<10:
hora="0"+str(x)+":"+str(y)
if y<10:
hora=str(x)+"0"+":"+str(y)
p=hora[::-1]
if p==hora:
print(f"{hora} es palindromo")
#solucion:
total = int(0) #Contador de numero de palindromos
for hor in range(0,24): #Bucles anidados for para dar aumentar las horas y los minutos al mismo tiempo
for min in range(0,60):
hor_n = str(hor) #Variables
min_n = str(min)
if (hor<10): #USamos condiciones para que las horas y los minutos no rebasen el horario
hor_n = ("0"+hor_n)
if (min<10):
min_n = ("0"+ min_n)
if (hor_n[::-1] == min_n): #Mediante un slicing le damos el formato a las horas para que este empiece desde la derecha
print("{}:{}".format(hor_n,min_n))
total += 1
#1 + 1440 * 5 =7201
palindronum= int(0)
for hor in range(0,24):
for min in range(0,60): # 1440
principio= str(hor)
final= str(min)
if (hor<10):
principio=("0"+principio)
if (min<10):
final=("0"+final)
if (principio[::-1]==final):
print(principio +":"+final)
palindronum= palindronum+1
print(palindronum)
# 1 + 1440 * 5 = 7201
| 0.024976 | 0.788013 |
# Example: CanvasXpress violin Chart No. 8
This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at:
https://www.canvasxpress.org/examples/violin-8.html
This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function.
Everything required for the chart to render is included in the code below. Simply run the code block.
```
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="violin8",
data={
"y": {
"smps": [
"Var1",
"Var2",
"Var3",
"Var4",
"Var5",
"Var6",
"Var7",
"Var8",
"Var9",
"Var10",
"Var11",
"Var12",
"Var13",
"Var14",
"Var15",
"Var16",
"Var17",
"Var18",
"Var19",
"Var20",
"Var21",
"Var22",
"Var23",
"Var24",
"Var25",
"Var26",
"Var27",
"Var28",
"Var29",
"Var30",
"Var31",
"Var32",
"Var33",
"Var34",
"Var35",
"Var36",
"Var37",
"Var38",
"Var39",
"Var40",
"Var41",
"Var42",
"Var43",
"Var44",
"Var45",
"Var46",
"Var47",
"Var48",
"Var49",
"Var50",
"Var51",
"Var52",
"Var53",
"Var54",
"Var55",
"Var56",
"Var57",
"Var58",
"Var59",
"Var60"
],
"data": [
[
4.2,
11.5,
7.3,
5.8,
6.4,
10,
11.2,
11.2,
5.2,
7,
16.5,
16.5,
15.2,
17.3,
22.5,
17.3,
13.6,
14.5,
18.8,
15.5,
23.6,
18.5,
33.9,
25.5,
26.4,
32.5,
26.7,
21.5,
23.3,
29.5,
15.2,
21.5,
17.6,
9.7,
14.5,
10,
8.2,
9.4,
16.5,
9.7,
19.7,
23.3,
23.6,
26.4,
20,
25.2,
25.8,
21.2,
14.5,
27.3,
25.5,
26.4,
22.4,
24.5,
24.8,
30.9,
26.4,
27.3,
29.4,
23
]
],
"vars": [
"len"
]
},
"x": {
"supp": [
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ"
],
"order": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
],
"dose": [
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2
]
}
},
config={
"axisAlgorithm": "rPretty",
"axisTickScaleFontFactor": 1.8,
"axisTitleFontStyle": "bold",
"axisTitleScaleFontFactor": 1.8,
"background": "white",
"backgroundType": "window",
"backgroundWindow": "#E5E5E5",
"boxplotMedianColor": "red",
"boxplotMedianWidth": 5,
"boxplotNotched": True,
"boxplotWishkersType": "single",
"graphOrientation": "vertical",
"graphType": "Boxplot",
"groupingFactors": [
"dose"
],
"guides": "solid",
"guidesColor": "white",
"showBoxplotIfViolin": True,
"showLegend": False,
"showViolinBoxplot": True,
"smpLabelRotate": 90,
"smpLabelScaleFontFactor": 1.8,
"smpTitle": "dose",
"smpTitleFontStyle": "bold",
"smpTitleScaleFontFactor": 1.8,
"theme": "CanvasXpress",
"title": "The Effect of Vitamin C on Tooth Growth in Guinea Pigs",
"xAxis2Show": False,
"xAxisMinorTicks": False,
"xAxisTickColor": "white",
"xAxisTitle": "len"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="violin_8.html")
```
|
github_jupyter
|
from canvasxpress.canvas import CanvasXpress
from canvasxpress.js.collection import CXEvents
from canvasxpress.render.jupyter import CXNoteBook
cx = CanvasXpress(
render_to="violin8",
data={
"y": {
"smps": [
"Var1",
"Var2",
"Var3",
"Var4",
"Var5",
"Var6",
"Var7",
"Var8",
"Var9",
"Var10",
"Var11",
"Var12",
"Var13",
"Var14",
"Var15",
"Var16",
"Var17",
"Var18",
"Var19",
"Var20",
"Var21",
"Var22",
"Var23",
"Var24",
"Var25",
"Var26",
"Var27",
"Var28",
"Var29",
"Var30",
"Var31",
"Var32",
"Var33",
"Var34",
"Var35",
"Var36",
"Var37",
"Var38",
"Var39",
"Var40",
"Var41",
"Var42",
"Var43",
"Var44",
"Var45",
"Var46",
"Var47",
"Var48",
"Var49",
"Var50",
"Var51",
"Var52",
"Var53",
"Var54",
"Var55",
"Var56",
"Var57",
"Var58",
"Var59",
"Var60"
],
"data": [
[
4.2,
11.5,
7.3,
5.8,
6.4,
10,
11.2,
11.2,
5.2,
7,
16.5,
16.5,
15.2,
17.3,
22.5,
17.3,
13.6,
14.5,
18.8,
15.5,
23.6,
18.5,
33.9,
25.5,
26.4,
32.5,
26.7,
21.5,
23.3,
29.5,
15.2,
21.5,
17.6,
9.7,
14.5,
10,
8.2,
9.4,
16.5,
9.7,
19.7,
23.3,
23.6,
26.4,
20,
25.2,
25.8,
21.2,
14.5,
27.3,
25.5,
26.4,
22.4,
24.5,
24.8,
30.9,
26.4,
27.3,
29.4,
23
]
],
"vars": [
"len"
]
},
"x": {
"supp": [
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"VC",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ",
"OJ"
],
"order": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10
],
"dose": [
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
0.5,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2
]
}
},
config={
"axisAlgorithm": "rPretty",
"axisTickScaleFontFactor": 1.8,
"axisTitleFontStyle": "bold",
"axisTitleScaleFontFactor": 1.8,
"background": "white",
"backgroundType": "window",
"backgroundWindow": "#E5E5E5",
"boxplotMedianColor": "red",
"boxplotMedianWidth": 5,
"boxplotNotched": True,
"boxplotWishkersType": "single",
"graphOrientation": "vertical",
"graphType": "Boxplot",
"groupingFactors": [
"dose"
],
"guides": "solid",
"guidesColor": "white",
"showBoxplotIfViolin": True,
"showLegend": False,
"showViolinBoxplot": True,
"smpLabelRotate": 90,
"smpLabelScaleFontFactor": 1.8,
"smpTitle": "dose",
"smpTitleFontStyle": "bold",
"smpTitleScaleFontFactor": 1.8,
"theme": "CanvasXpress",
"title": "The Effect of Vitamin C on Tooth Growth in Guinea Pigs",
"xAxis2Show": False,
"xAxisMinorTicks": False,
"xAxisTickColor": "white",
"xAxisTitle": "len"
},
width=613,
height=613,
events=CXEvents(),
after_render=[],
other_init_params={
"version": 35,
"events": False,
"info": False,
"afterRenderInit": False,
"noValidate": True
}
)
display = CXNoteBook(cx)
display.render(output_file="violin_8.html")
| 0.386416 | 0.749179 |
# Clustering con datos categóricos
## Importar bibliotecas
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.cluster import KMeans
```
## Datos
```
data = pd.read_csv('Country clusters.csv')
data
```
## Mapeo de los datos
```
# Hacemos una copia de los datos originales
data_mapped = data.copy()
# Mapeamos el idioma con 0, 1, 2.
data_mapped['Language']=data_mapped['Language'].map({'English':0,'French':1,'German':2})
data_mapped
```
## Seleccionamos las caracterisiticas
```
x = data_mapped.iloc[:,3:4]
x
```
## Clustering
```
# Creamos un objeto Kmeans y pasamos como parametro el numero de clusters que queremos
kmeans = KMeans(3)
# Ajustamos los datos
kmeans.fit(x)
```
k-mean++ es el método para solucionar la sensibilidad en la elección de las semillas iniciales.
## Resultados de Clustering
```
# Creamos una variable con los clusters encontrados
identified_clusters = kmeans.fit_predict(x)
identified_clusters
# Copiamos los datos
data_with_clusters = data_mapped.copy()
# Y agregamos una columna con los clusters identificados
data_with_clusters['Cluster'] = identified_clusters
data_with_clusters
plt.scatter(data_with_clusters['Longitude'],data_with_clusters['Latitude'],
c=data_with_clusters['Cluster'],cmap='rainbow')
plt.show()
```
# Escoger el número de clusters
### WCSS (within-cluster sum of squares)
Mide la distancia entre puntos en un cluster.
Proporciona una regla para decidir el número apropiado de grupos.
Queremos escoger el cluster con el minimo WCSS. <br>
Pero si todas las observaciones estan cada una en un cluster i.e #observaciones=#clusters entonces WCSS=0=min. <br>
El mismo problema ocurre si todas las observaciones estan en un solo cluster. WSCS=max. <br>
Entonces queremos el valor WCSS más bajo posible.
```
# Vamos a probar esogiendo todas las columnas
# x = data_mapped.iloc[:,1:4]
# WCSS para la solución actual
kmeans.inertia_
# Vamos a crear una lista con todas las posibles soluciones
wcss=[]
for i in range(1,7):
# Solucion con i clusters
kmeans = KMeans(i)
# Ajustamos los clusters
kmeans.fit(x)
# Calculamos WCCS para la solucion
wcss_iter = kmeans.inertia_
# Lo guardamos en la lista
wcss.append(wcss_iter)
wcss
```
### The Elbow Method
Usando Kmeans, minimizar la distancia entre puntos en un cluster es equivalente a maximizimar la distancia entre clusters. Por eso se llama Elbow Method.
```
number_clusters = range(1,7)
# Graficamos el numero de clusters vs WCCS
plt.plot(number_clusters,wcss)
plt.xlabel('Number of clusters')
plt.ylabel('WCCS')
```
Llega un momento en que incrementar le numero de clusters no reduce significativamente el valor de WCsS. <br>
Tomamos el valor mas grande de clusters para el cual tenemos una reduccion significativa del valor de WCSS
En este caso conviene tomar 3 clusters.
# Otros tipos de Clustering
## Dendogramas

En un dendograma, la distancia entre la líneas horizontales muestra la similitud entre las obseraciones.
<br>
Alemania y Francia son muy similares en terminos de longitud y latitud de acuerdo al dendograma.
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.cluster import KMeans
data = pd.read_csv('Country clusters.csv')
data
# Hacemos una copia de los datos originales
data_mapped = data.copy()
# Mapeamos el idioma con 0, 1, 2.
data_mapped['Language']=data_mapped['Language'].map({'English':0,'French':1,'German':2})
data_mapped
x = data_mapped.iloc[:,3:4]
x
# Creamos un objeto Kmeans y pasamos como parametro el numero de clusters que queremos
kmeans = KMeans(3)
# Ajustamos los datos
kmeans.fit(x)
# Creamos una variable con los clusters encontrados
identified_clusters = kmeans.fit_predict(x)
identified_clusters
# Copiamos los datos
data_with_clusters = data_mapped.copy()
# Y agregamos una columna con los clusters identificados
data_with_clusters['Cluster'] = identified_clusters
data_with_clusters
plt.scatter(data_with_clusters['Longitude'],data_with_clusters['Latitude'],
c=data_with_clusters['Cluster'],cmap='rainbow')
plt.show()
# Vamos a probar esogiendo todas las columnas
# x = data_mapped.iloc[:,1:4]
# WCSS para la solución actual
kmeans.inertia_
# Vamos a crear una lista con todas las posibles soluciones
wcss=[]
for i in range(1,7):
# Solucion con i clusters
kmeans = KMeans(i)
# Ajustamos los clusters
kmeans.fit(x)
# Calculamos WCCS para la solucion
wcss_iter = kmeans.inertia_
# Lo guardamos en la lista
wcss.append(wcss_iter)
wcss
number_clusters = range(1,7)
# Graficamos el numero de clusters vs WCCS
plt.plot(number_clusters,wcss)
plt.xlabel('Number of clusters')
plt.ylabel('WCCS')
| 0.350977 | 0.941277 |
```
import os, sys
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import numpy as np
import matplotlib.pyplot as plt
from load_blender import load_blender_data
# Load input images
data = np.load('tiny_nerf_data.npz')
images = data['images']
poses = data['poses']
focal = data['focal']
H, W = images.shape[1:3]
print(images.shape, poses.shape, focal)
testimg, testpose = images[101], poses[101]
images = images[:100,...,:3]
poses = poses[:100]
plt.imshow(testimg)
plt.show()
def init_model(D=8, W=256):
relu = tf.keras.layers.ReLU()
dense = lambda W=W, act=relu : tf.keras.layers.Dense(W, activation=act)
inputs = tf.keras.Input(shape=(3))
outputs = inputs
for i in range(D):
outputs = dense()(outputs)
if i%4==0 and i>0:
outputs = tf.concat([outputs, inputs], -1)
outputs = dense(4, act=None)(outputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
def get_rays(H, W, focal, c2w):
i, j = tf.meshgrid(tf.range(W, dtype=tf.float32), tf.range(H, dtype=tf.float32), indexing='xy')
dirs = tf.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -tf.ones_like(i)], -1)
rays_d = tf.reduce_sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1)
rays_o = tf.broadcast_to(c2w[:3,-1], tf.shape(rays_d))
return rays_o, rays_d
def render_rays(network_fn, rays_o, rays_d, near, far, N_samples):
# Compute 3D query points
z_vals = tf.linspace(near, far, N_samples)
# z_vals = tf.sort(tf.random.uniform([N_samples], near, far))
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
# Run network
pts_flat = tf.reshape(pts, [-1,3])
raw = network_fn(pts_flat)
raw = tf.reshape(raw, list(pts.shape[:-1]) + [4])
# Compute opacities and colors
sigma_a = tf.nn.relu(raw[...,3])
rgb = tf.math.sigmoid(raw[...,:3])
# Do volume rendering
dists = tf.concat([z_vals[1:] - z_vals[:-1], [1e10]], -1)
alpha = 1.-tf.exp(-sigma_a * dists)
weights = alpha * tf.math.cumprod(1.-alpha + 1e-10, -1, exclusive=True)
rgb_map = tf.reduce_sum(weights[...,None] * rgb, -2)
depth_map = tf.reduce_sum(weights * z_vals, -1)
acc_map = tf.reduce_sum(weights, -1)
return rgb_map, depth_map, acc_map
model = init_model()
optimizer = tf.keras.optimizers.Adam(5e-4)
N_samples = 32
N_iters = 10000
psnrs = []
for i in range(N_iters):
img_i = np.random.randint(images.shape[0])
target = images[img_i]
pose = poses[img_i]
rays_o, rays_d = get_rays(H, W, focal, pose)
with tf.GradientTape() as tape:
rgb, depth, acc = render_rays(model, rays_o, rays_d, near=2., far=6., N_samples=N_samples)
loss = tf.reduce_mean(tf.square(rgb - target))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if i%25==0:
print(i, psnr.numpy())
rays_o, rays_d = get_rays(H, W, focal, testpose)
rgb, depth, acc = render_rays(model, rays_o, rays_d, near=2., far=6., N_samples=N_samples)
loss = tf.reduce_mean(tf.square(rgb - testimg))
psnr = -10. * tf.math.log(loss) / tf.math.log(10.)
psnrs.append(psnr.numpy())
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.imshow(rgb)
plt.subplot(122)
plt.plot(psnrs)
plt.show()
%matplotlib inline
from ipywidgets import interactive, widgets
trans_t = lambda t : tf.convert_to_tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=tf.float32)
rot_phi = lambda phi : tf.convert_to_tensor([
[1,0,0,0],
[0,tf.cos(phi),-tf.sin(phi),0],
[0,tf.sin(phi), tf.cos(phi),0],
[0,0,0,1],
], dtype=tf.float32)
rot_theta = lambda th : tf.convert_to_tensor([
[tf.cos(th),0,-tf.sin(th),0],
[0,1,0,0],
[tf.sin(th),0, tf.cos(th),0],
[0,0,0,1],
], dtype=tf.float32)
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
return c2w
# pose_spherical(angle, -30.0, 4.0)
def f(**kwargs):
c2w = pose_spherical(**kwargs)
rays_o, rays_d = get_rays(H, W, focal, c2w[:3,:4])
rgb, depth, acc = render_rays(model, rays_o, rays_d, near=2., far=6., N_samples=N_samples)
img = np.clip(rgb,0,1)
plt.figure(2, figsize=(20,6))
plt.imshow(img)
plt.show()
sldr = lambda v, mi, ma: widgets.FloatSlider(
value=v,
min=mi,
max=ma,
step=.01,
)
names = [
['theta', [100., 0., 360]],
['phi', [-30., -90, 0]],
['radius', [4., 3., 5.]],
]
interactive_plot = interactive(f, **{s[0] : sldr(*s[1]) for s in names})
interactive_plot
```
|
github_jupyter
|
import os, sys
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
import numpy as np
import matplotlib.pyplot as plt
from load_blender import load_blender_data
# Load input images
data = np.load('tiny_nerf_data.npz')
images = data['images']
poses = data['poses']
focal = data['focal']
H, W = images.shape[1:3]
print(images.shape, poses.shape, focal)
testimg, testpose = images[101], poses[101]
images = images[:100,...,:3]
poses = poses[:100]
plt.imshow(testimg)
plt.show()
def init_model(D=8, W=256):
relu = tf.keras.layers.ReLU()
dense = lambda W=W, act=relu : tf.keras.layers.Dense(W, activation=act)
inputs = tf.keras.Input(shape=(3))
outputs = inputs
for i in range(D):
outputs = dense()(outputs)
if i%4==0 and i>0:
outputs = tf.concat([outputs, inputs], -1)
outputs = dense(4, act=None)(outputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
return model
def get_rays(H, W, focal, c2w):
i, j = tf.meshgrid(tf.range(W, dtype=tf.float32), tf.range(H, dtype=tf.float32), indexing='xy')
dirs = tf.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -tf.ones_like(i)], -1)
rays_d = tf.reduce_sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1)
rays_o = tf.broadcast_to(c2w[:3,-1], tf.shape(rays_d))
return rays_o, rays_d
def render_rays(network_fn, rays_o, rays_d, near, far, N_samples):
# Compute 3D query points
z_vals = tf.linspace(near, far, N_samples)
# z_vals = tf.sort(tf.random.uniform([N_samples], near, far))
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None]
# Run network
pts_flat = tf.reshape(pts, [-1,3])
raw = network_fn(pts_flat)
raw = tf.reshape(raw, list(pts.shape[:-1]) + [4])
# Compute opacities and colors
sigma_a = tf.nn.relu(raw[...,3])
rgb = tf.math.sigmoid(raw[...,:3])
# Do volume rendering
dists = tf.concat([z_vals[1:] - z_vals[:-1], [1e10]], -1)
alpha = 1.-tf.exp(-sigma_a * dists)
weights = alpha * tf.math.cumprod(1.-alpha + 1e-10, -1, exclusive=True)
rgb_map = tf.reduce_sum(weights[...,None] * rgb, -2)
depth_map = tf.reduce_sum(weights * z_vals, -1)
acc_map = tf.reduce_sum(weights, -1)
return rgb_map, depth_map, acc_map
model = init_model()
optimizer = tf.keras.optimizers.Adam(5e-4)
N_samples = 32
N_iters = 10000
psnrs = []
for i in range(N_iters):
img_i = np.random.randint(images.shape[0])
target = images[img_i]
pose = poses[img_i]
rays_o, rays_d = get_rays(H, W, focal, pose)
with tf.GradientTape() as tape:
rgb, depth, acc = render_rays(model, rays_o, rays_d, near=2., far=6., N_samples=N_samples)
loss = tf.reduce_mean(tf.square(rgb - target))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
if i%25==0:
print(i, psnr.numpy())
rays_o, rays_d = get_rays(H, W, focal, testpose)
rgb, depth, acc = render_rays(model, rays_o, rays_d, near=2., far=6., N_samples=N_samples)
loss = tf.reduce_mean(tf.square(rgb - testimg))
psnr = -10. * tf.math.log(loss) / tf.math.log(10.)
psnrs.append(psnr.numpy())
plt.figure(figsize=(10,4))
plt.subplot(121)
plt.imshow(rgb)
plt.subplot(122)
plt.plot(psnrs)
plt.show()
%matplotlib inline
from ipywidgets import interactive, widgets
trans_t = lambda t : tf.convert_to_tensor([
[1,0,0,0],
[0,1,0,0],
[0,0,1,t],
[0,0,0,1],
], dtype=tf.float32)
rot_phi = lambda phi : tf.convert_to_tensor([
[1,0,0,0],
[0,tf.cos(phi),-tf.sin(phi),0],
[0,tf.sin(phi), tf.cos(phi),0],
[0,0,0,1],
], dtype=tf.float32)
rot_theta = lambda th : tf.convert_to_tensor([
[tf.cos(th),0,-tf.sin(th),0],
[0,1,0,0],
[tf.sin(th),0, tf.cos(th),0],
[0,0,0,1],
], dtype=tf.float32)
def pose_spherical(theta, phi, radius):
c2w = trans_t(radius)
c2w = rot_phi(phi/180.*np.pi) @ c2w
c2w = rot_theta(theta/180.*np.pi) @ c2w
c2w = np.array([[-1,0,0,0],[0,0,1,0],[0,1,0,0],[0,0,0,1]]) @ c2w
return c2w
# pose_spherical(angle, -30.0, 4.0)
def f(**kwargs):
c2w = pose_spherical(**kwargs)
rays_o, rays_d = get_rays(H, W, focal, c2w[:3,:4])
rgb, depth, acc = render_rays(model, rays_o, rays_d, near=2., far=6., N_samples=N_samples)
img = np.clip(rgb,0,1)
plt.figure(2, figsize=(20,6))
plt.imshow(img)
plt.show()
sldr = lambda v, mi, ma: widgets.FloatSlider(
value=v,
min=mi,
max=ma,
step=.01,
)
names = [
['theta', [100., 0., 360]],
['phi', [-30., -90, 0]],
['radius', [4., 3., 5.]],
]
interactive_plot = interactive(f, **{s[0] : sldr(*s[1]) for s in names})
interactive_plot
| 0.62223 | 0.489442 |
```
import pandas as pd
import requests
import unicodedata
from bs4 import BeautifulSoup as soup
raw_path = '../raw'
df = pd.read_csv(raw_path + '/' + 'ehrenamtskartei-gnd.csv', usecols=['nachname','vorname','umfang','geburt','tod','idns'] )
df
```
# GND-Suche
```
base_url = 'https://services.dnb.de/sru/authorities?'
params = {'recordSchema' : 'MARC21-xml',
'operation': 'searchRetrieve',
'version': '1.1',
'maximumRecords': '100',
}
def gnd_abfrage(row):
params.update({'query': f'PER = "{row.nachname}, {row.vorname}" AND BBG=Tp*'})
response = requests.get(base_url, params=params)
response_xml = soup(response.content)
try:
matches = list()
for record in response_xml.find_all('record', {'type':'Authority'}):
record_match = dict()
#idn bestimmen
try:
record_match['idn'] = record.find('controlfield', {'tag': '001'}).string.strip()
except:
record_match['idn'] = None
# exaktes datum datx finden
try:
datx = record.find('subfield', {'code': '4'}, string="datx").parent.find('subfield', {'code': 'a'}).string
except:
datx = ''
# jahresdatum in 100 finden
try:
datl_100 = record.find('datafield', {'tag': '100'}).find('subfield', {'code': 'd'}).string
except:
datl_100 = ''
# jahresdatum in 548 datl finden
try:
datl_548 = record.find('subfield', {'code': '4'}, string="datl").parent.find('subfield', {'code': 'a'}).string
except:
datl_548 = ''
#setzen von datum_match, wenn geburtsjahre übereinstimmen
if datx == row.lebensdaten:
record_match['datum_match'] = True
elif datl_100.split('-')[0] == row.geburt[-4] and datl_100.split('-')[1] == row.tod[-4]:
record_match['datum_match'] = True
elif datl_548.split('-')[0] == row.geburt[-4] and datl_548.split('-')[1] == row.tod[-4]:
record_match['datum_match'] = True
else:
record_match['datum_match'] = False
matches.append(record_match)
return matches
except Exception as e:
return "fehler %r" % e
def gnd_auswertung(matches):
return [match['idn'] for match in matches if match['datum_match'] == True]
# idn abfrage starten und ergebnisse in neue spalte matches schreiben
df['matches'] = df.apply(gnd_abfrage, axis=1)
# auswertung starten und matchende idns in neue spalte idns schreiben
df['idns'] = df.matches.apply(gnd_auswertung)
```
|
github_jupyter
|
import pandas as pd
import requests
import unicodedata
from bs4 import BeautifulSoup as soup
raw_path = '../raw'
df = pd.read_csv(raw_path + '/' + 'ehrenamtskartei-gnd.csv', usecols=['nachname','vorname','umfang','geburt','tod','idns'] )
df
base_url = 'https://services.dnb.de/sru/authorities?'
params = {'recordSchema' : 'MARC21-xml',
'operation': 'searchRetrieve',
'version': '1.1',
'maximumRecords': '100',
}
def gnd_abfrage(row):
params.update({'query': f'PER = "{row.nachname}, {row.vorname}" AND BBG=Tp*'})
response = requests.get(base_url, params=params)
response_xml = soup(response.content)
try:
matches = list()
for record in response_xml.find_all('record', {'type':'Authority'}):
record_match = dict()
#idn bestimmen
try:
record_match['idn'] = record.find('controlfield', {'tag': '001'}).string.strip()
except:
record_match['idn'] = None
# exaktes datum datx finden
try:
datx = record.find('subfield', {'code': '4'}, string="datx").parent.find('subfield', {'code': 'a'}).string
except:
datx = ''
# jahresdatum in 100 finden
try:
datl_100 = record.find('datafield', {'tag': '100'}).find('subfield', {'code': 'd'}).string
except:
datl_100 = ''
# jahresdatum in 548 datl finden
try:
datl_548 = record.find('subfield', {'code': '4'}, string="datl").parent.find('subfield', {'code': 'a'}).string
except:
datl_548 = ''
#setzen von datum_match, wenn geburtsjahre übereinstimmen
if datx == row.lebensdaten:
record_match['datum_match'] = True
elif datl_100.split('-')[0] == row.geburt[-4] and datl_100.split('-')[1] == row.tod[-4]:
record_match['datum_match'] = True
elif datl_548.split('-')[0] == row.geburt[-4] and datl_548.split('-')[1] == row.tod[-4]:
record_match['datum_match'] = True
else:
record_match['datum_match'] = False
matches.append(record_match)
return matches
except Exception as e:
return "fehler %r" % e
def gnd_auswertung(matches):
return [match['idn'] for match in matches if match['datum_match'] == True]
# idn abfrage starten und ergebnisse in neue spalte matches schreiben
df['matches'] = df.apply(gnd_abfrage, axis=1)
# auswertung starten und matchende idns in neue spalte idns schreiben
df['idns'] = df.matches.apply(gnd_auswertung)
| 0.136091 | 0.279933 |
```
# Library imports
import scipy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def rk4(t,dt,y,evaluate): #Used to evaluate the first n steps
k1 = dt * evaluate(t, y)
k2 = dt * evaluate(t + 0.5*dt, y + 0.5*k1)
k3 = dt * evaluate(t + 0.5*dt, y + 0.5*k2)
k4 = dt * evaluate(t + dt, y + k3)
y_new = y + (1/6.)*(k1+ 2*k2 + 2*k3 + k4)
return y_new
def evaluate(t, y):
NBodies = int(len(y)/6)
solved_vector = np.zeros(y.size)
for i in range(NBodies):
ioffset = i*6
for j in range(NBodies):
joffset = j*6
solved_vector[ioffset:ioffset+3] = y[ioffset+3:ioffset+6]
if i != j:
d = y[ioffset:ioffset+3] - y[joffset:joffset+3]
r = np.sqrt(np.sum(d**2))
a = d*G*masses[j]/(r**3)
solved_vector[ioffset+3:ioffset+6] += a
return solved_vector
def AdamsMoulton(t,dt,y,evaluate):
w1 = y[1]
w2 = y[2]
w3 = y[3]
w4 = AdamsBashforth(t, dt, y, evaluate)
t1 = t - 3*dt
t2 = t - 2*dt
t3 = t - 1*dt
t4 = t
w4 = w3 + (dt/24)*(9*evaluate(t4, w4) + 19*evaluate(t3, w3) - 5*evaluate(t2, w2) + evaluate(t1, w1))
return w4
def AdamsBashforth(t,dt,y,evaluate):
w0 = y[0]
w1 = y[1]
w2 = y[2]
w3 = y[3]
t0 = t - 4*dt
t1 = t - 3*dt
t2 = t - 2*dt
t3 = t - dt
w = w3 + (dt/24)*(55*evaluate(t3, w3) - 59*evaluate(t2, w2) + 37*evaluate(t1, w1) - 9*evaluate(t0, w0))
return w
def runAB(T, dt, y0, masses, evaluate, t0 = 0):
nsteps = int((T-t0)/dt)
history = np.empty((nsteps+1, len(y0)))
history[0, :] = y0
t = t0
for i in range(3):
history[i+1] = rk4(t, dt, history[i,:], evaluate)
t += dt
for i in range(3, nsteps):
history[i+1] = AdamsBashforth(t, dt, history[i-3:i+1,:], evaluate)
t += dt
return history
def runAM(T, dt, y0, masses, evaluate, historyAux, t0 = 0):
nsteps = int((T-t0)/dt)
history = np.empty((nsteps+1, len(y0)))
history[0, :] = y0
t = t0
for i in range(3):
history[i+1] = rk4(t, dt, history[i,:], evaluate)
t += dt
for i in range(3, nsteps):
history[i+1] = AdamsMoulton(t, dt, historyAux[i-3:i+2,:], evaluate)
t += dt
return history
def runPredictorCorrector(T, dt, y0, masses, evaluate, t0 = 0):
historyAB = runAB(T+dt, dt, y0, masses, evaluate)
history = runAM(T, dt, y0, masses, evaluate, historyAB)
return history
# Variable declaration 1
G = -6.67*(10**-11)
t0 = 0
T = 6.221e6
dt = 3600
y0 = np.array([3.84e8, 0, 0, 0, 1000, 0,
0, 0, 0, 0, 0, 0])
masses = np.array([7.347e22, 5.97e24])
# Aux calculation with AB to obtain history
history = runPredictorCorrector(T, dt, y0, masses, evaluate)
# Plot the results
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z');
ax.plot3D(history[:,0], history[:,1], history[:,2])
ax.plot3D(history[:,6], history[:,7], history[:,8])
```
|
github_jupyter
|
# Library imports
import scipy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def rk4(t,dt,y,evaluate): #Used to evaluate the first n steps
k1 = dt * evaluate(t, y)
k2 = dt * evaluate(t + 0.5*dt, y + 0.5*k1)
k3 = dt * evaluate(t + 0.5*dt, y + 0.5*k2)
k4 = dt * evaluate(t + dt, y + k3)
y_new = y + (1/6.)*(k1+ 2*k2 + 2*k3 + k4)
return y_new
def evaluate(t, y):
NBodies = int(len(y)/6)
solved_vector = np.zeros(y.size)
for i in range(NBodies):
ioffset = i*6
for j in range(NBodies):
joffset = j*6
solved_vector[ioffset:ioffset+3] = y[ioffset+3:ioffset+6]
if i != j:
d = y[ioffset:ioffset+3] - y[joffset:joffset+3]
r = np.sqrt(np.sum(d**2))
a = d*G*masses[j]/(r**3)
solved_vector[ioffset+3:ioffset+6] += a
return solved_vector
def AdamsMoulton(t,dt,y,evaluate):
w1 = y[1]
w2 = y[2]
w3 = y[3]
w4 = AdamsBashforth(t, dt, y, evaluate)
t1 = t - 3*dt
t2 = t - 2*dt
t3 = t - 1*dt
t4 = t
w4 = w3 + (dt/24)*(9*evaluate(t4, w4) + 19*evaluate(t3, w3) - 5*evaluate(t2, w2) + evaluate(t1, w1))
return w4
def AdamsBashforth(t,dt,y,evaluate):
w0 = y[0]
w1 = y[1]
w2 = y[2]
w3 = y[3]
t0 = t - 4*dt
t1 = t - 3*dt
t2 = t - 2*dt
t3 = t - dt
w = w3 + (dt/24)*(55*evaluate(t3, w3) - 59*evaluate(t2, w2) + 37*evaluate(t1, w1) - 9*evaluate(t0, w0))
return w
def runAB(T, dt, y0, masses, evaluate, t0 = 0):
nsteps = int((T-t0)/dt)
history = np.empty((nsteps+1, len(y0)))
history[0, :] = y0
t = t0
for i in range(3):
history[i+1] = rk4(t, dt, history[i,:], evaluate)
t += dt
for i in range(3, nsteps):
history[i+1] = AdamsBashforth(t, dt, history[i-3:i+1,:], evaluate)
t += dt
return history
def runAM(T, dt, y0, masses, evaluate, historyAux, t0 = 0):
nsteps = int((T-t0)/dt)
history = np.empty((nsteps+1, len(y0)))
history[0, :] = y0
t = t0
for i in range(3):
history[i+1] = rk4(t, dt, history[i,:], evaluate)
t += dt
for i in range(3, nsteps):
history[i+1] = AdamsMoulton(t, dt, historyAux[i-3:i+2,:], evaluate)
t += dt
return history
def runPredictorCorrector(T, dt, y0, masses, evaluate, t0 = 0):
historyAB = runAB(T+dt, dt, y0, masses, evaluate)
history = runAM(T, dt, y0, masses, evaluate, historyAB)
return history
# Variable declaration 1
G = -6.67*(10**-11)
t0 = 0
T = 6.221e6
dt = 3600
y0 = np.array([3.84e8, 0, 0, 0, 1000, 0,
0, 0, 0, 0, 0, 0])
masses = np.array([7.347e22, 5.97e24])
# Aux calculation with AB to obtain history
history = runPredictorCorrector(T, dt, y0, masses, evaluate)
# Plot the results
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z');
ax.plot3D(history[:,0], history[:,1], history[:,2])
ax.plot3D(history[:,6], history[:,7], history[:,8])
| 0.489748 | 0.841174 |
**Chapter 13 – Loading and Preprocessing Data with TensorFlow**
_This notebook contains all the sample code and solutions to the exercises in chapter 13._
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0-preview.
```
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# TensorFlow ≥2.0-preview is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "data"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
```
## Datasets
```
X = tf.range(10)
dataset = tf.data.Dataset.from_tensor_slices(X)
dataset
```
Equivalently:
```
dataset = tf.data.Dataset.range(10)
for item in dataset:
print(item)
dataset = dataset.repeat(3).batch(7)
for item in dataset:
print(item)
dataset = dataset.map(lambda x: x * 2)
for item in dataset:
print(item)
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.filter(lambda x: x < 10) # keep only items < 10
for item in dataset.take(3):
print(item)
dataset = tf.data.Dataset.range(10).repeat(3)
dataset = dataset.shuffle(buffer_size=3, seed=42).batch(7)
for item in dataset:
print(item)
```
## Split the California dataset to multiple CSV files
Let's start by loading and preparing the California housing dataset. We first load it, then split it into a training set, a validation set and a test set, and finally we scale it:
```
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(
housing.data, housing.target.reshape(-1, 1), random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
scaler.fit(X_train)
X_mean = scaler.mean_
X_std = scaler.scale_
```
For a very large dataset that does not fit in memory, you will typically want to split it into many files first, then have TensorFlow read these files in parallel. To demonstrate this, let's start by splitting the housing dataset and save it to 20 CSV files:
```
def save_to_multiple_csv_files(data, name_prefix, header=None, n_parts=10):
housing_dir = os.path.join("datasets", "housing")
os.makedirs(housing_dir, exist_ok=True)
path_format = os.path.join(housing_dir, "my_{}_{:02d}.csv")
filepaths = []
m = len(data)
for file_idx, row_indices in enumerate(np.array_split(np.arange(m), n_parts)):
part_csv = path_format.format(name_prefix, file_idx)
filepaths.append(part_csv)
with open(part_csv, "wt", encoding="utf-8") as f:
if header is not None:
f.write(header)
f.write("\n")
for row_idx in row_indices:
f.write(",".join([repr(col) for col in data[row_idx]]))
f.write("\n")
return filepaths
train_data = np.c_[X_train, y_train]
valid_data = np.c_[X_valid, y_valid]
test_data = np.c_[X_test, y_test]
header_cols = housing.feature_names + ["MedianHouseValue"]
header = ",".join(header_cols)
train_filepaths = save_to_multiple_csv_files(train_data, "train", header, n_parts=20)
valid_filepaths = save_to_multiple_csv_files(valid_data, "valid", header, n_parts=10)
test_filepaths = save_to_multiple_csv_files(test_data, "test", header, n_parts=10)
```
Okay, now let's take a peek at the first few lines of one of these CSV files:
```
import pandas as pd
pd.read_csv(train_filepaths[0]).head()
```
Or in text mode:
```
with open(train_filepaths[0]) as f:
for i in range(5):
print(f.readline(), end="")
train_filepaths
```
## Building an Input Pipeline
```
filepath_dataset = tf.data.Dataset.list_files(train_filepaths, seed=42)
for filepath in filepath_dataset:
print(filepath)
n_readers = 5
dataset = filepath_dataset.interleave(
lambda filepath: tf.data.TextLineDataset(filepath).skip(1),
cycle_length=n_readers)
for line in dataset.take(5):
print(line.numpy())
```
Notice that field 4 is interpreted as a string.
```
record_defaults=[0, np.nan, tf.constant(np.nan, dtype=tf.float64), "Hello", tf.constant([])]
parsed_fields = tf.io.decode_csv('1,2,3,4,5', record_defaults)
parsed_fields
```
Notice that all missing fields are replaced with their default value, when provided:
```
parsed_fields = tf.io.decode_csv(',,,,5', record_defaults)
parsed_fields
```
The 5th field is compulsory (since we provided `tf.constant([])` as the "default value"), so we get an exception if we do not provide it:
```
try:
parsed_fields = tf.io.decode_csv(',,,,', record_defaults)
except tf.errors.InvalidArgumentError as ex:
print(ex)
```
The number of fields should match exactly the number of fields in the `record_defaults`:
```
try:
parsed_fields = tf.io.decode_csv('1,2,3,4,5,6,7', record_defaults)
except tf.errors.InvalidArgumentError as ex:
print(ex)
n_inputs = 8 # X_train.shape[-1]
@tf.function
def preprocess(line):
defs = [0.] * n_inputs + [tf.constant([], dtype=tf.float32)]
fields = tf.io.decode_csv(line, record_defaults=defs)
x = tf.stack(fields[:-1])
y = tf.stack(fields[-1:])
return (x - X_mean) / X_std, y
preprocess(b'4.2083,44.0,5.3232,0.9171,846.0,2.3370,37.47,-122.2,2.782')
def csv_reader_dataset(filepaths, repeat=1, n_readers=5,
n_read_threads=None, shuffle_buffer_size=10000,
n_parse_threads=5, batch_size=32):
dataset = tf.data.Dataset.list_files(filepaths).repeat(repeat)
dataset = dataset.interleave(
lambda filepath: tf.data.TextLineDataset(filepath).skip(1),
cycle_length=n_readers, num_parallel_calls=n_read_threads)
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads)
dataset = dataset.batch(batch_size)
return dataset.prefetch(1)
train_set = csv_reader_dataset(train_filepaths, batch_size=3)
for X_batch, y_batch in train_set.take(2):
print("X =", X_batch)
print("y =", y_batch)
print()
train_set = csv_reader_dataset(train_filepaths, repeat=None)
valid_set = csv_reader_dataset(valid_filepaths)
test_set = csv_reader_dataset(test_filepaths)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1),
])
model.compile(loss="mse", optimizer="sgd")
batch_size = 32
model.fit(train_set, steps_per_epoch=len(X_train) // batch_size, epochs=10,
validation_data=valid_set)
model.evaluate(test_set, steps=len(X_test) // batch_size)
new_set = test_set.map(lambda X, y: X) # we could instead just pass test_set, Keras would ignore the labels
X_new = X_test
model.predict(new_set, steps=len(X_new) // batch_size)
optimizer = keras.optimizers.Nadam(lr=0.01)
loss_fn = keras.losses.mean_squared_error
n_epochs = 5
batch_size = 32
n_steps_per_epoch = len(X_train) // batch_size
total_steps = n_epochs * n_steps_per_epoch
global_step = 0
for X_batch, y_batch in train_set.take(total_steps):
global_step += 1
print("\rGlobal step {}/{}".format(global_step, total_steps), end="")
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
optimizer = keras.optimizers.Nadam(lr=0.01)
loss_fn = keras.losses.mean_squared_error
@tf.function
def train(model, n_epochs, batch_size=32,
n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5):
train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers,
n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size,
n_parse_threads=n_parse_threads, batch_size=batch_size)
for X_batch, y_batch in train_set:
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train(model, 5)
optimizer = keras.optimizers.Nadam(lr=0.01)
loss_fn = keras.losses.mean_squared_error
@tf.function
def train(model, n_epochs, batch_size=32,
n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5):
train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers,
n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size,
n_parse_threads=n_parse_threads, batch_size=batch_size)
n_steps_per_epoch = len(X_train) // batch_size
total_steps = n_epochs * n_steps_per_epoch
global_step = 0
for X_batch, y_batch in train_set.take(total_steps):
global_step += 1
if tf.equal(global_step % 100, 0):
tf.print("\rGlobal step", global_step, "/", total_steps)
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train(model, 5)
```
Here is a short description of each method in the `Dataset` class:
```
for m in dir(tf.data.Dataset):
if not (m.startswith("_") or m.endswith("_")):
func = getattr(tf.data.Dataset, m)
if hasattr(func, "__doc__"):
print("● {:21s}{}".format(m + "()", func.__doc__.split("\n")[0]))
```
## The `TFRecord` binary format
A TFRecord file is just a list of binary records. You can create one using a `tf.io.TFRecordWriter`:
```
with tf.io.TFRecordWriter("my_data.tfrecord") as f:
f.write(b"This is the first record")
f.write(b"And this is the second record")
```
And you can read it using a `tf.data.TFRecordDataset`:
```
filepaths = ["my_data.tfrecord"]
dataset = tf.data.TFRecordDataset(filepaths)
for item in dataset:
print(item)
```
You can read multiple TFRecord files with just one `TFRecordDataset`. By default it will read them one at a time, but if you set `num_parallel_reads=3`, it will read 3 at a time in parallel and interleave their records:
```
filepaths = ["my_test_{}.tfrecord".format(i) for i in range(5)]
for i, filepath in enumerate(filepaths):
with tf.io.TFRecordWriter(filepath) as f:
for j in range(3):
f.write("File {} record {}".format(i, j).encode("utf-8"))
dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=3)
for item in dataset:
print(item)
options = tf.io.TFRecordOptions(compression_type="GZIP")
with tf.io.TFRecordWriter("my_compressed.tfrecord", options) as f:
f.write(b"This is the first record")
f.write(b"And this is the second record")
dataset = tf.data.TFRecordDataset(["my_compressed.tfrecord"],
compression_type="GZIP")
for item in dataset:
print(item)
```
### A Brief Intro to Protocol Buffers
```
from homl.person_pb2 import Person
person = Person(name="Al", id=123, email=["[email protected]"]) # create a Person
print(person) # display the Person
person.name # read a field
person.name = "Alice" # modify a field
person.email[0] # repeated fields can be accessed like arrays
person.email.append("[email protected]") # add an email address
s = person.SerializeToString() # serialize to a byte string
s
person2 = Person() # create a new Person
person2.ParseFromString(s) # parse the byte string (27 bytes)
person == person2 # now they are equal
```
### TensorFlow Protobufs
Here is the definition of the tf.train.Example protobuf:
```proto
syntax = "proto3";
message BytesList { repeated bytes value = 1; }
message FloatList { repeated float value = 1 [packed = true]; }
message Int64List { repeated int64 value = 1 [packed = true]; }
message Feature {
oneof kind {
BytesList bytes_list = 1;
FloatList float_list = 2;
Int64List int64_list = 3;
}
};
message Features { map<string, Feature> feature = 1; };
message Example { Features features = 1; };
```
```
from tensorflow.train import BytesList, FloatList, Int64List
from tensorflow.train import Feature, Features, Example
person_example = Example(
features=Features(
feature={
"name": Feature(bytes_list=BytesList(value=[b"Alice"])),
"id": Feature(int64_list=Int64List(value=[123])),
"emails": Feature(bytes_list=BytesList(value=[b"[email protected]", b"[email protected]"]))
}))
with tf.io.TFRecordWriter("my_contacts.tfrecord") as f:
f.write(person_example.SerializeToString())
feature_description = {
"name": tf.io.FixedLenFeature([], tf.string, default_value=""),
"id": tf.io.FixedLenFeature([], tf.int64, default_value=0),
"emails": tf.io.VarLenFeature(tf.string),
}
for serialized_example in tf.data.TFRecordDataset(["my_contacts.tfrecord"]):
parsed_example = tf.io.parse_single_example(serialized_example,
feature_description)
parsed_example
parsed_example
parsed_example["emails"].values[0]
tf.sparse.to_dense(parsed_example["emails"], default_value=b"")
parsed_example["emails"].values
```
### Putting Images in TFRecords
```
from sklearn.datasets import load_sample_images
img = load_sample_images()["images"][0]
plt.imshow(img)
plt.axis("off")
plt.title("Original Image")
plt.show()
data = tf.io.encode_jpeg(img)
example_with_image = Example(features=Features(feature={
"image": Feature(bytes_list=BytesList(value=[data.numpy()]))}))
serialized_example = example_with_image.SerializeToString()
# then save to TFRecord
feature_description = { "image": tf.io.VarLenFeature(tf.string) }
example_with_image = tf.io.parse_single_example(serialized_example, feature_description)
decoded_img = tf.io.decode_jpeg(example_with_image["image"].values[0])
```
Or use `decode_image()` which supports BMP, GIF, JPEG and PNG formats:
```
decoded_img = tf.io.decode_image(example_with_image["image"].values[0])
plt.imshow(decoded_img)
plt.title("Decoded Image")
plt.axis("off")
plt.show()
```
### Putting Tensors and Sparse Tensors in TFRecords
Tensors can be serialized and parsed easily using `tf.io.serialize_tensor()` and `tf.io.parse_tensor()`:
```
t = tf.constant([[0., 1.], [2., 3.], [4., 5.]])
s = tf.io.serialize_tensor(t)
s
tf.io.parse_tensor(s, out_type=tf.float32)
serialized_sparse = tf.io.serialize_sparse(parsed_example["emails"])
serialized_sparse
BytesList(value=serialized_sparse.numpy())
dataset = tf.data.TFRecordDataset(["my_contacts.tfrecord"]).batch(10)
for serialized_examples in dataset:
parsed_examples = tf.io.parse_example(serialized_examples,
feature_description)
parsed_examples
```
## Handling Sequential Data Using `SequenceExample`
```proto
syntax = "proto3";
message FeatureList { repeated Feature feature = 1; };
message FeatureLists { map<string, FeatureList> feature_list = 1; };
message SequenceExample {
Features context = 1;
FeatureLists feature_lists = 2;
};
```
```
from tensorflow.train import FeatureList, FeatureLists, SequenceExample
context = Features(feature={
"author_id": Feature(int64_list=Int64List(value=[123])),
"title": Feature(bytes_list=BytesList(value=[b"A", b"desert", b"place", b"."])),
"pub_date": Feature(int64_list=Int64List(value=[1623, 12, 25]))
})
content = [["When", "shall", "we", "three", "meet", "again", "?"],
["In", "thunder", ",", "lightning", ",", "or", "in", "rain", "?"]]
comments = [["When", "the", "hurlyburly", "'s", "done", "."],
["When", "the", "battle", "'s", "lost", "and", "won", "."]]
def words_to_feature(words):
return Feature(bytes_list=BytesList(value=[word.encode("utf-8")
for word in words]))
content_features = [words_to_feature(sentence) for sentence in content]
comments_features = [words_to_feature(comment) for comment in comments]
sequence_example = SequenceExample(
context=context,
feature_lists=FeatureLists(feature_list={
"content": FeatureList(feature=content_features),
"comments": FeatureList(feature=comments_features)
}))
sequence_example
serialized_sequence_example = sequence_example.SerializeToString()
context_feature_descriptions = {
"author_id": tf.io.FixedLenFeature([], tf.int64, default_value=0),
"title": tf.io.VarLenFeature(tf.string),
"pub_date": tf.io.FixedLenFeature([3], tf.int64, default_value=[0, 0, 0]),
}
sequence_feature_descriptions = {
"content": tf.io.VarLenFeature(tf.string),
"comments": tf.io.VarLenFeature(tf.string),
}
parsed_context, parsed_feature_lists = tf.io.parse_single_sequence_example(
serialized_sequence_example, context_feature_descriptions,
sequence_feature_descriptions)
parsed_context
parsed_context["title"].values
parsed_feature_lists
print(tf.RaggedTensor.from_sparse(parsed_feature_lists["content"]))
```
# The Features API
Let's use the variant of the California housing dataset that we used in Chapter 2, since it contains categorical features and missing values:
```
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
fetch_housing_data()
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
housing = load_housing_data()
housing.head()
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
age_mean, age_std = X_mean[1], X_std[1] # The median age is column in 1
housing_median_age = tf.feature_column.numeric_column(
"housing_median_age", normalizer_fn=lambda x: (x - age_mean) / age_std)
median_income = tf.feature_column.numeric_column("median_income")
bucketized_income = tf.feature_column.bucketized_column(
median_income, boundaries=[1.5, 3., 4.5, 6.])
bucketized_income
ocean_prox_vocab = ['<1H OCEAN', 'INLAND', 'ISLAND', 'NEAR BAY', 'NEAR OCEAN']
ocean_proximity = tf.feature_column.categorical_column_with_vocabulary_list(
"ocean_proximity", ocean_prox_vocab)
ocean_proximity
# Just an example, it's not used later on
city_hash = tf.feature_column.categorical_column_with_hash_bucket(
"city", hash_bucket_size=1000)
city_hash
bucketized_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=[-1., -0.5, 0., 0.5, 1.]) # age was scaled
age_and_ocean_proximity = tf.feature_column.crossed_column(
[bucketized_age, ocean_proximity], hash_bucket_size=100)
latitude = tf.feature_column.numeric_column("latitude")
longitude = tf.feature_column.numeric_column("longitude")
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=list(np.linspace(32., 42., 20 - 1)))
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=list(np.linspace(-125., -114., 20 - 1)))
location = tf.feature_column.crossed_column(
[bucketized_latitude, bucketized_longitude], hash_bucket_size=1000)
ocean_proximity_one_hot = tf.feature_column.indicator_column(ocean_proximity)
ocean_proximity_embed = tf.feature_column.embedding_column(ocean_proximity,
dimension=2)
```
### Using Feature Columns for Parsing
```
median_house_value = tf.feature_column.numeric_column("median_house_value")
columns = [housing_median_age, median_house_value]
feature_descriptions = tf.feature_column.make_parse_example_spec(columns)
feature_descriptions
with tf.io.TFRecordWriter("my_data_with_features.tfrecords") as f:
for x, y in zip(X_train[:, 1:2], y_train):
example = Example(features=Features(feature={
"housing_median_age": Feature(float_list=FloatList(value=[x])),
"median_house_value": Feature(float_list=FloatList(value=[y]))
}))
f.write(example.SerializeToString())
def parse_examples(serialized_examples):
examples = tf.io.parse_example(serialized_examples, feature_descriptions)
targets = examples.pop("median_house_value") # separate the targets
return examples, targets
batch_size = 32
dataset = tf.data.TFRecordDataset(["my_data_with_features.tfrecords"])
dataset = dataset.repeat().shuffle(10000).batch(batch_size).map(parse_examples)
columns_without_target = columns[:-1]
model = keras.models.Sequential([
keras.layers.DenseFeatures(feature_columns=columns_without_target),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer="sgd", metrics=["accuracy"])
model.fit(dataset, steps_per_epoch=len(X_train) // batch_size, epochs=5)
some_columns = [ocean_proximity_embed, bucketized_income]
dense_features = keras.layers.DenseFeatures(some_columns)
dense_features({
"ocean_proximity": [["NEAR OCEAN"], ["INLAND"], ["INLAND"]],
"median_income": [[3.], [7.2], [1.]]
})
```
# TF Transform
```
try:
import tensorflow_transform as tft
def preprocess(inputs): # inputs is a batch of input features
median_age = inputs["housing_median_age"]
ocean_proximity = inputs["ocean_proximity"]
standardized_age = tft.scale_to_z_score(median_age - tft.mean(median_age))
ocean_proximity_id = tft.compute_and_apply_vocabulary(ocean_proximity)
return {
"standardized_median_age": standardized_age,
"ocean_proximity_id": ocean_proximity_id
}
except ImportError:
print("TF Transform is not installed. Try running: pip3 install -U tensorflow-transform")
```
# TensorFlow Datasets
```
import tensorflow_datasets as tfds
datasets = tfds.load(name="mnist")
mnist_train, mnist_test = datasets["train"], datasets["test"]
print(tfds.list_builders())
plt.figure(figsize=(6,3))
mnist_train = mnist_train.repeat(5).batch(32).prefetch(1)
for item in mnist_train:
images = item["image"]
labels = item["label"]
for index in range(5):
plt.subplot(1, 5, index + 1)
image = images[index, ..., 0]
label = labels[index].numpy()
plt.imshow(image, cmap="binary")
plt.title(label)
plt.axis("off")
break # just showing part of the first batch
datasets = tfds.load(name="mnist")
mnist_train, mnist_test = datasets["train"], datasets["test"]
mnist_train = mnist_train.repeat(5).batch(32)
mnist_train = mnist_train.map(lambda items: (items["image"], items["label"]))
mnist_train = mnist_train.prefetch(1)
for images, labels in mnist_train.take(1):
print(images.shape)
print(labels.numpy())
datasets = tfds.load(name="mnist", batch_size=32, as_supervised=True)
mnist_train = datasets["train"].repeat().prefetch(1)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)),
keras.layers.Dense(10, activation="softmax")])
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"])
model.fit(mnist_train, steps_per_epoch=60000 // 32, epochs=5)
try:
datasets = tfds.load("imagenet2012", split=["train", "test"])
except AssertionError as ex:
print(ex)
```
# TensorFlow Hub
```
import tensorflow_hub as hub
hub_layer = hub.KerasLayer("https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1",
output_shape=[50], input_shape=[], dtype=tf.string)
model = keras.Sequential()
model.add(hub_layer)
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
sentences = tf.constant(["It was a great movie", "The actors were amazing"])
embeddings = hub_layer(sentences)
embeddings
```
|
github_jupyter
|
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# TensorFlow ≥2.0-preview is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "data"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
X = tf.range(10)
dataset = tf.data.Dataset.from_tensor_slices(X)
dataset
dataset = tf.data.Dataset.range(10)
for item in dataset:
print(item)
dataset = dataset.repeat(3).batch(7)
for item in dataset:
print(item)
dataset = dataset.map(lambda x: x * 2)
for item in dataset:
print(item)
dataset = dataset.apply(tf.data.experimental.unbatch())
dataset = dataset.filter(lambda x: x < 10) # keep only items < 10
for item in dataset.take(3):
print(item)
dataset = tf.data.Dataset.range(10).repeat(3)
dataset = dataset.shuffle(buffer_size=3, seed=42).batch(7)
for item in dataset:
print(item)
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
housing = fetch_california_housing()
X_train_full, X_test, y_train_full, y_test = train_test_split(
housing.data, housing.target.reshape(-1, 1), random_state=42)
X_train, X_valid, y_train, y_valid = train_test_split(
X_train_full, y_train_full, random_state=42)
scaler = StandardScaler()
scaler.fit(X_train)
X_mean = scaler.mean_
X_std = scaler.scale_
def save_to_multiple_csv_files(data, name_prefix, header=None, n_parts=10):
housing_dir = os.path.join("datasets", "housing")
os.makedirs(housing_dir, exist_ok=True)
path_format = os.path.join(housing_dir, "my_{}_{:02d}.csv")
filepaths = []
m = len(data)
for file_idx, row_indices in enumerate(np.array_split(np.arange(m), n_parts)):
part_csv = path_format.format(name_prefix, file_idx)
filepaths.append(part_csv)
with open(part_csv, "wt", encoding="utf-8") as f:
if header is not None:
f.write(header)
f.write("\n")
for row_idx in row_indices:
f.write(",".join([repr(col) for col in data[row_idx]]))
f.write("\n")
return filepaths
train_data = np.c_[X_train, y_train]
valid_data = np.c_[X_valid, y_valid]
test_data = np.c_[X_test, y_test]
header_cols = housing.feature_names + ["MedianHouseValue"]
header = ",".join(header_cols)
train_filepaths = save_to_multiple_csv_files(train_data, "train", header, n_parts=20)
valid_filepaths = save_to_multiple_csv_files(valid_data, "valid", header, n_parts=10)
test_filepaths = save_to_multiple_csv_files(test_data, "test", header, n_parts=10)
import pandas as pd
pd.read_csv(train_filepaths[0]).head()
with open(train_filepaths[0]) as f:
for i in range(5):
print(f.readline(), end="")
train_filepaths
filepath_dataset = tf.data.Dataset.list_files(train_filepaths, seed=42)
for filepath in filepath_dataset:
print(filepath)
n_readers = 5
dataset = filepath_dataset.interleave(
lambda filepath: tf.data.TextLineDataset(filepath).skip(1),
cycle_length=n_readers)
for line in dataset.take(5):
print(line.numpy())
record_defaults=[0, np.nan, tf.constant(np.nan, dtype=tf.float64), "Hello", tf.constant([])]
parsed_fields = tf.io.decode_csv('1,2,3,4,5', record_defaults)
parsed_fields
parsed_fields = tf.io.decode_csv(',,,,5', record_defaults)
parsed_fields
try:
parsed_fields = tf.io.decode_csv(',,,,', record_defaults)
except tf.errors.InvalidArgumentError as ex:
print(ex)
try:
parsed_fields = tf.io.decode_csv('1,2,3,4,5,6,7', record_defaults)
except tf.errors.InvalidArgumentError as ex:
print(ex)
n_inputs = 8 # X_train.shape[-1]
@tf.function
def preprocess(line):
defs = [0.] * n_inputs + [tf.constant([], dtype=tf.float32)]
fields = tf.io.decode_csv(line, record_defaults=defs)
x = tf.stack(fields[:-1])
y = tf.stack(fields[-1:])
return (x - X_mean) / X_std, y
preprocess(b'4.2083,44.0,5.3232,0.9171,846.0,2.3370,37.47,-122.2,2.782')
def csv_reader_dataset(filepaths, repeat=1, n_readers=5,
n_read_threads=None, shuffle_buffer_size=10000,
n_parse_threads=5, batch_size=32):
dataset = tf.data.Dataset.list_files(filepaths).repeat(repeat)
dataset = dataset.interleave(
lambda filepath: tf.data.TextLineDataset(filepath).skip(1),
cycle_length=n_readers, num_parallel_calls=n_read_threads)
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.map(preprocess, num_parallel_calls=n_parse_threads)
dataset = dataset.batch(batch_size)
return dataset.prefetch(1)
train_set = csv_reader_dataset(train_filepaths, batch_size=3)
for X_batch, y_batch in train_set.take(2):
print("X =", X_batch)
print("y =", y_batch)
print()
train_set = csv_reader_dataset(train_filepaths, repeat=None)
valid_set = csv_reader_dataset(valid_filepaths)
test_set = csv_reader_dataset(test_filepaths)
model = keras.models.Sequential([
keras.layers.Dense(30, activation="relu", input_shape=X_train.shape[1:]),
keras.layers.Dense(1),
])
model.compile(loss="mse", optimizer="sgd")
batch_size = 32
model.fit(train_set, steps_per_epoch=len(X_train) // batch_size, epochs=10,
validation_data=valid_set)
model.evaluate(test_set, steps=len(X_test) // batch_size)
new_set = test_set.map(lambda X, y: X) # we could instead just pass test_set, Keras would ignore the labels
X_new = X_test
model.predict(new_set, steps=len(X_new) // batch_size)
optimizer = keras.optimizers.Nadam(lr=0.01)
loss_fn = keras.losses.mean_squared_error
n_epochs = 5
batch_size = 32
n_steps_per_epoch = len(X_train) // batch_size
total_steps = n_epochs * n_steps_per_epoch
global_step = 0
for X_batch, y_batch in train_set.take(total_steps):
global_step += 1
print("\rGlobal step {}/{}".format(global_step, total_steps), end="")
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
optimizer = keras.optimizers.Nadam(lr=0.01)
loss_fn = keras.losses.mean_squared_error
@tf.function
def train(model, n_epochs, batch_size=32,
n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5):
train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers,
n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size,
n_parse_threads=n_parse_threads, batch_size=batch_size)
for X_batch, y_batch in train_set:
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train(model, 5)
optimizer = keras.optimizers.Nadam(lr=0.01)
loss_fn = keras.losses.mean_squared_error
@tf.function
def train(model, n_epochs, batch_size=32,
n_readers=5, n_read_threads=5, shuffle_buffer_size=10000, n_parse_threads=5):
train_set = csv_reader_dataset(train_filepaths, repeat=n_epochs, n_readers=n_readers,
n_read_threads=n_read_threads, shuffle_buffer_size=shuffle_buffer_size,
n_parse_threads=n_parse_threads, batch_size=batch_size)
n_steps_per_epoch = len(X_train) // batch_size
total_steps = n_epochs * n_steps_per_epoch
global_step = 0
for X_batch, y_batch in train_set.take(total_steps):
global_step += 1
if tf.equal(global_step % 100, 0):
tf.print("\rGlobal step", global_step, "/", total_steps)
with tf.GradientTape() as tape:
y_pred = model(X_batch)
main_loss = tf.reduce_mean(loss_fn(y_batch, y_pred))
loss = tf.add_n([main_loss] + model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train(model, 5)
for m in dir(tf.data.Dataset):
if not (m.startswith("_") or m.endswith("_")):
func = getattr(tf.data.Dataset, m)
if hasattr(func, "__doc__"):
print("● {:21s}{}".format(m + "()", func.__doc__.split("\n")[0]))
with tf.io.TFRecordWriter("my_data.tfrecord") as f:
f.write(b"This is the first record")
f.write(b"And this is the second record")
filepaths = ["my_data.tfrecord"]
dataset = tf.data.TFRecordDataset(filepaths)
for item in dataset:
print(item)
filepaths = ["my_test_{}.tfrecord".format(i) for i in range(5)]
for i, filepath in enumerate(filepaths):
with tf.io.TFRecordWriter(filepath) as f:
for j in range(3):
f.write("File {} record {}".format(i, j).encode("utf-8"))
dataset = tf.data.TFRecordDataset(filepaths, num_parallel_reads=3)
for item in dataset:
print(item)
options = tf.io.TFRecordOptions(compression_type="GZIP")
with tf.io.TFRecordWriter("my_compressed.tfrecord", options) as f:
f.write(b"This is the first record")
f.write(b"And this is the second record")
dataset = tf.data.TFRecordDataset(["my_compressed.tfrecord"],
compression_type="GZIP")
for item in dataset:
print(item)
from homl.person_pb2 import Person
person = Person(name="Al", id=123, email=["[email protected]"]) # create a Person
print(person) # display the Person
person.name # read a field
person.name = "Alice" # modify a field
person.email[0] # repeated fields can be accessed like arrays
person.email.append("[email protected]") # add an email address
s = person.SerializeToString() # serialize to a byte string
s
person2 = Person() # create a new Person
person2.ParseFromString(s) # parse the byte string (27 bytes)
person == person2 # now they are equal
syntax = "proto3";
message BytesList { repeated bytes value = 1; }
message FloatList { repeated float value = 1 [packed = true]; }
message Int64List { repeated int64 value = 1 [packed = true]; }
message Feature {
oneof kind {
BytesList bytes_list = 1;
FloatList float_list = 2;
Int64List int64_list = 3;
}
};
message Features { map<string, Feature> feature = 1; };
message Example { Features features = 1; };
from tensorflow.train import BytesList, FloatList, Int64List
from tensorflow.train import Feature, Features, Example
person_example = Example(
features=Features(
feature={
"name": Feature(bytes_list=BytesList(value=[b"Alice"])),
"id": Feature(int64_list=Int64List(value=[123])),
"emails": Feature(bytes_list=BytesList(value=[b"[email protected]", b"[email protected]"]))
}))
with tf.io.TFRecordWriter("my_contacts.tfrecord") as f:
f.write(person_example.SerializeToString())
feature_description = {
"name": tf.io.FixedLenFeature([], tf.string, default_value=""),
"id": tf.io.FixedLenFeature([], tf.int64, default_value=0),
"emails": tf.io.VarLenFeature(tf.string),
}
for serialized_example in tf.data.TFRecordDataset(["my_contacts.tfrecord"]):
parsed_example = tf.io.parse_single_example(serialized_example,
feature_description)
parsed_example
parsed_example
parsed_example["emails"].values[0]
tf.sparse.to_dense(parsed_example["emails"], default_value=b"")
parsed_example["emails"].values
from sklearn.datasets import load_sample_images
img = load_sample_images()["images"][0]
plt.imshow(img)
plt.axis("off")
plt.title("Original Image")
plt.show()
data = tf.io.encode_jpeg(img)
example_with_image = Example(features=Features(feature={
"image": Feature(bytes_list=BytesList(value=[data.numpy()]))}))
serialized_example = example_with_image.SerializeToString()
# then save to TFRecord
feature_description = { "image": tf.io.VarLenFeature(tf.string) }
example_with_image = tf.io.parse_single_example(serialized_example, feature_description)
decoded_img = tf.io.decode_jpeg(example_with_image["image"].values[0])
decoded_img = tf.io.decode_image(example_with_image["image"].values[0])
plt.imshow(decoded_img)
plt.title("Decoded Image")
plt.axis("off")
plt.show()
t = tf.constant([[0., 1.], [2., 3.], [4., 5.]])
s = tf.io.serialize_tensor(t)
s
tf.io.parse_tensor(s, out_type=tf.float32)
serialized_sparse = tf.io.serialize_sparse(parsed_example["emails"])
serialized_sparse
BytesList(value=serialized_sparse.numpy())
dataset = tf.data.TFRecordDataset(["my_contacts.tfrecord"]).batch(10)
for serialized_examples in dataset:
parsed_examples = tf.io.parse_example(serialized_examples,
feature_description)
parsed_examples
syntax = "proto3";
message FeatureList { repeated Feature feature = 1; };
message FeatureLists { map<string, FeatureList> feature_list = 1; };
message SequenceExample {
Features context = 1;
FeatureLists feature_lists = 2;
};
from tensorflow.train import FeatureList, FeatureLists, SequenceExample
context = Features(feature={
"author_id": Feature(int64_list=Int64List(value=[123])),
"title": Feature(bytes_list=BytesList(value=[b"A", b"desert", b"place", b"."])),
"pub_date": Feature(int64_list=Int64List(value=[1623, 12, 25]))
})
content = [["When", "shall", "we", "three", "meet", "again", "?"],
["In", "thunder", ",", "lightning", ",", "or", "in", "rain", "?"]]
comments = [["When", "the", "hurlyburly", "'s", "done", "."],
["When", "the", "battle", "'s", "lost", "and", "won", "."]]
def words_to_feature(words):
return Feature(bytes_list=BytesList(value=[word.encode("utf-8")
for word in words]))
content_features = [words_to_feature(sentence) for sentence in content]
comments_features = [words_to_feature(comment) for comment in comments]
sequence_example = SequenceExample(
context=context,
feature_lists=FeatureLists(feature_list={
"content": FeatureList(feature=content_features),
"comments": FeatureList(feature=comments_features)
}))
sequence_example
serialized_sequence_example = sequence_example.SerializeToString()
context_feature_descriptions = {
"author_id": tf.io.FixedLenFeature([], tf.int64, default_value=0),
"title": tf.io.VarLenFeature(tf.string),
"pub_date": tf.io.FixedLenFeature([3], tf.int64, default_value=[0, 0, 0]),
}
sequence_feature_descriptions = {
"content": tf.io.VarLenFeature(tf.string),
"comments": tf.io.VarLenFeature(tf.string),
}
parsed_context, parsed_feature_lists = tf.io.parse_single_sequence_example(
serialized_sequence_example, context_feature_descriptions,
sequence_feature_descriptions)
parsed_context
parsed_context["title"].values
parsed_feature_lists
print(tf.RaggedTensor.from_sparse(parsed_feature_lists["content"]))
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
fetch_housing_data()
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
housing = load_housing_data()
housing.head()
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
age_mean, age_std = X_mean[1], X_std[1] # The median age is column in 1
housing_median_age = tf.feature_column.numeric_column(
"housing_median_age", normalizer_fn=lambda x: (x - age_mean) / age_std)
median_income = tf.feature_column.numeric_column("median_income")
bucketized_income = tf.feature_column.bucketized_column(
median_income, boundaries=[1.5, 3., 4.5, 6.])
bucketized_income
ocean_prox_vocab = ['<1H OCEAN', 'INLAND', 'ISLAND', 'NEAR BAY', 'NEAR OCEAN']
ocean_proximity = tf.feature_column.categorical_column_with_vocabulary_list(
"ocean_proximity", ocean_prox_vocab)
ocean_proximity
# Just an example, it's not used later on
city_hash = tf.feature_column.categorical_column_with_hash_bucket(
"city", hash_bucket_size=1000)
city_hash
bucketized_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=[-1., -0.5, 0., 0.5, 1.]) # age was scaled
age_and_ocean_proximity = tf.feature_column.crossed_column(
[bucketized_age, ocean_proximity], hash_bucket_size=100)
latitude = tf.feature_column.numeric_column("latitude")
longitude = tf.feature_column.numeric_column("longitude")
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=list(np.linspace(32., 42., 20 - 1)))
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=list(np.linspace(-125., -114., 20 - 1)))
location = tf.feature_column.crossed_column(
[bucketized_latitude, bucketized_longitude], hash_bucket_size=1000)
ocean_proximity_one_hot = tf.feature_column.indicator_column(ocean_proximity)
ocean_proximity_embed = tf.feature_column.embedding_column(ocean_proximity,
dimension=2)
median_house_value = tf.feature_column.numeric_column("median_house_value")
columns = [housing_median_age, median_house_value]
feature_descriptions = tf.feature_column.make_parse_example_spec(columns)
feature_descriptions
with tf.io.TFRecordWriter("my_data_with_features.tfrecords") as f:
for x, y in zip(X_train[:, 1:2], y_train):
example = Example(features=Features(feature={
"housing_median_age": Feature(float_list=FloatList(value=[x])),
"median_house_value": Feature(float_list=FloatList(value=[y]))
}))
f.write(example.SerializeToString())
def parse_examples(serialized_examples):
examples = tf.io.parse_example(serialized_examples, feature_descriptions)
targets = examples.pop("median_house_value") # separate the targets
return examples, targets
batch_size = 32
dataset = tf.data.TFRecordDataset(["my_data_with_features.tfrecords"])
dataset = dataset.repeat().shuffle(10000).batch(batch_size).map(parse_examples)
columns_without_target = columns[:-1]
model = keras.models.Sequential([
keras.layers.DenseFeatures(feature_columns=columns_without_target),
keras.layers.Dense(1)
])
model.compile(loss="mse", optimizer="sgd", metrics=["accuracy"])
model.fit(dataset, steps_per_epoch=len(X_train) // batch_size, epochs=5)
some_columns = [ocean_proximity_embed, bucketized_income]
dense_features = keras.layers.DenseFeatures(some_columns)
dense_features({
"ocean_proximity": [["NEAR OCEAN"], ["INLAND"], ["INLAND"]],
"median_income": [[3.], [7.2], [1.]]
})
try:
import tensorflow_transform as tft
def preprocess(inputs): # inputs is a batch of input features
median_age = inputs["housing_median_age"]
ocean_proximity = inputs["ocean_proximity"]
standardized_age = tft.scale_to_z_score(median_age - tft.mean(median_age))
ocean_proximity_id = tft.compute_and_apply_vocabulary(ocean_proximity)
return {
"standardized_median_age": standardized_age,
"ocean_proximity_id": ocean_proximity_id
}
except ImportError:
print("TF Transform is not installed. Try running: pip3 install -U tensorflow-transform")
import tensorflow_datasets as tfds
datasets = tfds.load(name="mnist")
mnist_train, mnist_test = datasets["train"], datasets["test"]
print(tfds.list_builders())
plt.figure(figsize=(6,3))
mnist_train = mnist_train.repeat(5).batch(32).prefetch(1)
for item in mnist_train:
images = item["image"]
labels = item["label"]
for index in range(5):
plt.subplot(1, 5, index + 1)
image = images[index, ..., 0]
label = labels[index].numpy()
plt.imshow(image, cmap="binary")
plt.title(label)
plt.axis("off")
break # just showing part of the first batch
datasets = tfds.load(name="mnist")
mnist_train, mnist_test = datasets["train"], datasets["test"]
mnist_train = mnist_train.repeat(5).batch(32)
mnist_train = mnist_train.map(lambda items: (items["image"], items["label"]))
mnist_train = mnist_train.prefetch(1)
for images, labels in mnist_train.take(1):
print(images.shape)
print(labels.numpy())
datasets = tfds.load(name="mnist", batch_size=32, as_supervised=True)
mnist_train = datasets["train"].repeat().prefetch(1)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28, 1]),
keras.layers.Lambda(lambda images: tf.cast(images, tf.float32)),
keras.layers.Dense(10, activation="softmax")])
model.compile(loss="sparse_categorical_crossentropy", optimizer="sgd", metrics=["accuracy"])
model.fit(mnist_train, steps_per_epoch=60000 // 32, epochs=5)
try:
datasets = tfds.load("imagenet2012", split=["train", "test"])
except AssertionError as ex:
print(ex)
import tensorflow_hub as hub
hub_layer = hub.KerasLayer("https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1",
output_shape=[50], input_shape=[], dtype=tf.string)
model = keras.Sequential()
model.add(hub_layer)
model.add(keras.layers.Dense(16, activation='relu'))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
sentences = tf.constant(["It was a great movie", "The actors were amazing"])
embeddings = hub_layer(sentences)
embeddings
| 0.482673 | 0.963437 |
```
import os
import folium
print(folium.__version__)
```
## ColorLine
```
import numpy as np
x = np.linspace(0, 2*np.pi, 300)
lats = 20 * np.cos(x)
lons = 20 * np.sin(x)
colors = np.sin(5 * x)
# FIXME: This example is broken!!!
from folium import features
m = folium.Map([0, 0], zoom_start=3)
color_line = features.ColorLine(
list(zip(lats, lons)),
colors=colors,
colormap=['y', 'orange', 'r'],
weight=10)
color_line.add_to(m)
m.save(os.path.join('results', 'Features_0.html'))
m
```
### WMS
```
m = folium.Map([40, -100], zoom_start=4)
w = features.WmsTileLayer(
"http://mesonet.agron.iastate.edu/cgi-bin/wms/nexrad/n0r.cgi",
name='test',
format='image/png',
layers='nexrad-n0r-900913',
attr=u"Weather data © 2012 IEM Nexrad",
transparent=True
)
w.add_to(m)
m.save(os.path.join('results', 'Features_1.html'))
m
```
### Marker, Icon, Popup
```
import branca
f = branca.element.Figure(figsize=(8, 8))
m = folium.Map([0, 0], zoom_start=1)
mk = features.Marker([0, 0])
pp = features.Popup('hello')
ic = features.Icon(color='red')
f.add_child(m)
mk.add_child(ic)
mk.add_child(pp)
m.add_child(mk)
f.save(os.path.join('results', 'Features_2.html'))
f
```
### RegularPolygonMarker
```
f = branca.element.Figure()
m = folium.Map([0, 0], zoom_start=1)
mk = features.RegularPolygonMarker([0, 0])
mk2 = features.RegularPolygonMarker([0, 45])
f.add_child(m)
m.add_child(mk)
m.add_child(mk2)
f.save(os.path.join('results', 'Features_3.html'))
f
```
### Vega stuff
```
# FIXME: This example is broken!!!
import json
import vincent
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=100, width=200)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
m = folium.Map([0, 0], zoom_start=1)
mk = features.Marker([0, 0])
p = features.Popup('Hello')
v = features.Vega(data, width='100%', height='100%')
f.add_child(m)
mk.add_child(p)
p.add_child(v)
m.add_child(mk)
f.save(os.path.join('results', 'Features_4.html'))
f
```
### Vega div
```
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=400, width=600)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
v = features.Vega(data, height=40, width=600)
f.add_child(v)
f.save(os.path.join('results', 'Features_5.html'))
f
```
### A div and a Map
```
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=250, width=420)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
# Create two maps.
m = folium.Map(location=[0, 0],
tiles='stamenwatercolor',
zoom_start=1,
position='absolute',
left='0%',
width='50%',
height='50%')
m2 = folium.Map(location=[46, 3],
tiles='OpenStreetMap',
zoom_start=4,
position='absolute',
left='50%',
width='50%',
height='50%',
top='50%')
# Create two Vega.
v = features.Vega(data, position='absolute', left='50%', width='50%', height='50%')
v2 = features.Vega(data, position='absolute', left='0%', width='50%', height='50%', top='50%')
f.add_child(m)
f.add_child(m2)
f.add_child(v)
f.add_child(v2)
f.save(os.path.join('results', 'Features_6.html'))
f
```
### GeoJson
```
N = 1000
lons = +5 - np.random.normal(size=N)
lats = 48 - np.random.normal(size=N)
data = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "MultiPoint",
"coordinates": [[lon, lat] for (lat, lon) in zip(lats, lons)],
},
"properties": {"prop0": "value0"}
},
],
}
m = folium.Map([48, 5], zoom_start=6)
m.add_child(features.GeoJson(data))
m.save(os.path.join('results', 'Features_7.html'))
m
```
### Marker Cluster
```
N = 100
data = np.array(
[
np.random.uniform(low=35, high=60, size=N), # Random latitudes in Europe.
np.random.uniform(low=-12, high=30, size=N), # Random longitudes in Europe.
range(N), # Popups text will be simple numbers .
]
).T
m = folium.Map([45, 3], zoom_start=4)
mc = features.MarkerCluster()
for k in range(N):
mk = features.Marker([data[k][0], data[k][1]])
p = features.Popup(str(data[k][2]))
mk.add_child(p)
mc.add_child(mk)
m.add_child(mc)
m.save(os.path.join('results', 'Features_8.html'))
m
```
### Div
```
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=250, width=420)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
d1 = f.add_subplot(1, 2, 1)
d2 = f.add_subplot(1, 2, 2)
d1.add_child(folium.Map([0, 0], tiles='stamenwatercolor', zoom_start=1))
d2.add_child(folium.Map([46, 3], tiles='OpenStreetMap', zoom_start=5))
f.save(os.path.join('results', 'Features_9.html'))
f
```
### LayerControl
```
m = folium.Map(tiles=None)
folium.TileLayer('OpenStreetMap').add_to(m)
folium.TileLayer('stamentoner').add_to(m)
folium.LayerControl().add_to(m)
m.save(os.path.join('results', 'Features_10.html'))
m
```
## Line example
```
# Coordinates are 15 points on the great circle from Boston to
# San Francisco.
# Reference: http://williams.best.vwh.net/avform.htm#Intermediate
coordinates = [
[42.3581, -71.0636],
[42.82995815, -74.78991444],
[43.17929819, -78.56603306],
[43.40320216, -82.37774519],
[43.49975489, -86.20965845],
[43.46811941, -90.04569087],
[43.30857071, -93.86961818],
[43.02248456, -97.66563267],
[42.61228259, -101.41886832],
[42.08133868, -105.11585198],
[41.4338549, -108.74485069],
[40.67471747, -112.29609954],
[39.8093434, -115.76190821],
[38.84352776, -119.13665678],
[37.7833, -122.4167]]
# Create the map and add the line
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
folium.PolyLine(coordinates, color='#FF0000', weight=5).add_to(m)
m.save(os.path.join('results', 'Features_11.html'))
m
```
|
github_jupyter
|
import os
import folium
print(folium.__version__)
import numpy as np
x = np.linspace(0, 2*np.pi, 300)
lats = 20 * np.cos(x)
lons = 20 * np.sin(x)
colors = np.sin(5 * x)
# FIXME: This example is broken!!!
from folium import features
m = folium.Map([0, 0], zoom_start=3)
color_line = features.ColorLine(
list(zip(lats, lons)),
colors=colors,
colormap=['y', 'orange', 'r'],
weight=10)
color_line.add_to(m)
m.save(os.path.join('results', 'Features_0.html'))
m
m = folium.Map([40, -100], zoom_start=4)
w = features.WmsTileLayer(
"http://mesonet.agron.iastate.edu/cgi-bin/wms/nexrad/n0r.cgi",
name='test',
format='image/png',
layers='nexrad-n0r-900913',
attr=u"Weather data © 2012 IEM Nexrad",
transparent=True
)
w.add_to(m)
m.save(os.path.join('results', 'Features_1.html'))
m
import branca
f = branca.element.Figure(figsize=(8, 8))
m = folium.Map([0, 0], zoom_start=1)
mk = features.Marker([0, 0])
pp = features.Popup('hello')
ic = features.Icon(color='red')
f.add_child(m)
mk.add_child(ic)
mk.add_child(pp)
m.add_child(mk)
f.save(os.path.join('results', 'Features_2.html'))
f
f = branca.element.Figure()
m = folium.Map([0, 0], zoom_start=1)
mk = features.RegularPolygonMarker([0, 0])
mk2 = features.RegularPolygonMarker([0, 45])
f.add_child(m)
m.add_child(mk)
m.add_child(mk2)
f.save(os.path.join('results', 'Features_3.html'))
f
# FIXME: This example is broken!!!
import json
import vincent
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=100, width=200)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
m = folium.Map([0, 0], zoom_start=1)
mk = features.Marker([0, 0])
p = features.Popup('Hello')
v = features.Vega(data, width='100%', height='100%')
f.add_child(m)
mk.add_child(p)
p.add_child(v)
m.add_child(mk)
f.save(os.path.join('results', 'Features_4.html'))
f
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=400, width=600)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
v = features.Vega(data, height=40, width=600)
f.add_child(v)
f.save(os.path.join('results', 'Features_5.html'))
f
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=250, width=420)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
# Create two maps.
m = folium.Map(location=[0, 0],
tiles='stamenwatercolor',
zoom_start=1,
position='absolute',
left='0%',
width='50%',
height='50%')
m2 = folium.Map(location=[46, 3],
tiles='OpenStreetMap',
zoom_start=4,
position='absolute',
left='50%',
width='50%',
height='50%',
top='50%')
# Create two Vega.
v = features.Vega(data, position='absolute', left='50%', width='50%', height='50%')
v2 = features.Vega(data, position='absolute', left='0%', width='50%', height='50%', top='50%')
f.add_child(m)
f.add_child(m2)
f.add_child(v)
f.add_child(v2)
f.save(os.path.join('results', 'Features_6.html'))
f
N = 1000
lons = +5 - np.random.normal(size=N)
lats = 48 - np.random.normal(size=N)
data = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"geometry": {
"type": "MultiPoint",
"coordinates": [[lon, lat] for (lat, lon) in zip(lats, lons)],
},
"properties": {"prop0": "value0"}
},
],
}
m = folium.Map([48, 5], zoom_start=6)
m.add_child(features.GeoJson(data))
m.save(os.path.join('results', 'Features_7.html'))
m
N = 100
data = np.array(
[
np.random.uniform(low=35, high=60, size=N), # Random latitudes in Europe.
np.random.uniform(low=-12, high=30, size=N), # Random longitudes in Europe.
range(N), # Popups text will be simple numbers .
]
).T
m = folium.Map([45, 3], zoom_start=4)
mc = features.MarkerCluster()
for k in range(N):
mk = features.Marker([data[k][0], data[k][1]])
p = features.Popup(str(data[k][2]))
mk.add_child(p)
mc.add_child(mk)
m.add_child(mc)
m.save(os.path.join('results', 'Features_8.html'))
m
N = 100
multi_iter2 = {
'x': np.random.uniform(size=(N,)),
'y': np.random.uniform(size=(N,)),
}
scatter = vincent.Scatter(multi_iter2, iter_idx='x', height=250, width=420)
data = json.loads(scatter.to_json())
f = branca.element.Figure()
d1 = f.add_subplot(1, 2, 1)
d2 = f.add_subplot(1, 2, 2)
d1.add_child(folium.Map([0, 0], tiles='stamenwatercolor', zoom_start=1))
d2.add_child(folium.Map([46, 3], tiles='OpenStreetMap', zoom_start=5))
f.save(os.path.join('results', 'Features_9.html'))
f
m = folium.Map(tiles=None)
folium.TileLayer('OpenStreetMap').add_to(m)
folium.TileLayer('stamentoner').add_to(m)
folium.LayerControl().add_to(m)
m.save(os.path.join('results', 'Features_10.html'))
m
# Coordinates are 15 points on the great circle from Boston to
# San Francisco.
# Reference: http://williams.best.vwh.net/avform.htm#Intermediate
coordinates = [
[42.3581, -71.0636],
[42.82995815, -74.78991444],
[43.17929819, -78.56603306],
[43.40320216, -82.37774519],
[43.49975489, -86.20965845],
[43.46811941, -90.04569087],
[43.30857071, -93.86961818],
[43.02248456, -97.66563267],
[42.61228259, -101.41886832],
[42.08133868, -105.11585198],
[41.4338549, -108.74485069],
[40.67471747, -112.29609954],
[39.8093434, -115.76190821],
[38.84352776, -119.13665678],
[37.7833, -122.4167]]
# Create the map and add the line
m = folium.Map(location=[41.9, -97.3], zoom_start=4)
folium.PolyLine(coordinates, color='#FF0000', weight=5).add_to(m)
m.save(os.path.join('results', 'Features_11.html'))
m
| 0.374905 | 0.773858 |
# Convolutional Neural Network
En este ejercicio vamos a construir una red neuronal convolucional de 2 capas + fully connected con Tensorflow
## CNN Overview

## MNIST Dataset Overview
El dataset de estudio será el MNIST. Como ya sabemos, consiste en un ejem
Como ya sabemos, los datos son imágenes de 28x28 píxeles y existen 10 números a clasificar, del 0 al 9. Por simplicidad "aplastamos" las imágenes para dejarlo en numpy arrays de 1D y dimensión 784

Más información sobre el dataset: http://yann.lecun.com/exdb/mnist/
```
from __future__ import division, print_function, absolute_import
# Importamos el dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("C:/Users/Javie/Downloads/", one_hot=False)
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# Parámetros a configurar
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Parámetros de la red
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.25 # Dropout, probability to drop a unit
# Creamos la red neuronal
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
with tf.variable_scope('ConvNet', reuse=reuse):
# La entrada TF Estimator es un diccionario. Esto se hace así por si la entrada es más de una
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Capa convolucional de 32 filtros y un kernel de 5x5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Capa convolucional de 64 filtros y un kernel de 3x3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Salida capa fully connected con 10 neuronas (1 por cada clase posible, números del 0-9)
fc1 = tf.contrib.layers.flatten(conv2)
# Capa Fully connected
fc1 = tf.layers.dense(fc1, 1024)
# Aplicamos Dropout
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Capa de salida
out = tf.layers.dense(fc1, n_classes)
return out
# Definimos la función del modelo
def model_fn(features, labels, mode):
# Constrimos la red neuronal
# Debido al Dropout, creamos 2 redes una para entrenamiento y otra para test.
# No obstante, ambas redes comparten los mismos pesos
logits_train = conv_net(features, num_classes, dropout, reuse=False, is_training=True)
logits_test = conv_net(features, num_classes, dropout, reuse=True, is_training=False)
# Predicciones
pred_classes = tf.argmax(logits_test, axis=1)
pred_probas = tf.nn.softmax(logits_test)
# Si estamos prediciendo, devolvemos el estimador
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Definimos la función de pérdida y el optimizador (Define loss and optimizer)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())
# Evaluamos la precisión del modelo
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
# Con el fin de poder entrenar el modelo, devolvemos un estimador
estim_specs = tf.estimator.EstimatorSpec(
mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
# Construimos el estimador
model = tf.estimator.Estimator(model_fn)
# Define the input function for training
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
# Train the Model
model.train(input_fn, steps=num_steps)
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
model.evaluate(input_fn)
# Predicción de imágenes simples
n_images = 4
# Del conjunto de test
test_images = mnist.test.images[:n_images]
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': test_images}, shuffle=False)
# Usamos el modelo para predecir los ejemplos
preds = list(model.predict(input_fn))
# Las dibujamos
for i in range(n_images):
plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction:", preds[i])
```
|
github_jupyter
|
from __future__ import division, print_function, absolute_import
# Importamos el dataset
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("C:/Users/Javie/Downloads/", one_hot=False)
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# Parámetros a configurar
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Parámetros de la red
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.25 # Dropout, probability to drop a unit
# Creamos la red neuronal
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
with tf.variable_scope('ConvNet', reuse=reuse):
# La entrada TF Estimator es un diccionario. Esto se hace así por si la entrada es más de una
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Capa convolucional de 32 filtros y un kernel de 5x5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Capa convolucional de 64 filtros y un kernel de 3x3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Salida capa fully connected con 10 neuronas (1 por cada clase posible, números del 0-9)
fc1 = tf.contrib.layers.flatten(conv2)
# Capa Fully connected
fc1 = tf.layers.dense(fc1, 1024)
# Aplicamos Dropout
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Capa de salida
out = tf.layers.dense(fc1, n_classes)
return out
# Definimos la función del modelo
def model_fn(features, labels, mode):
# Constrimos la red neuronal
# Debido al Dropout, creamos 2 redes una para entrenamiento y otra para test.
# No obstante, ambas redes comparten los mismos pesos
logits_train = conv_net(features, num_classes, dropout, reuse=False, is_training=True)
logits_test = conv_net(features, num_classes, dropout, reuse=True, is_training=False)
# Predicciones
pred_classes = tf.argmax(logits_test, axis=1)
pred_probas = tf.nn.softmax(logits_test)
# Si estamos prediciendo, devolvemos el estimador
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Definimos la función de pérdida y el optimizador (Define loss and optimizer)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step())
# Evaluamos la precisión del modelo
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
# Con el fin de poder entrenar el modelo, devolvemos un estimador
estim_specs = tf.estimator.EstimatorSpec(
mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
# Construimos el estimador
model = tf.estimator.Estimator(model_fn)
# Define the input function for training
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
# Train the Model
model.train(input_fn, steps=num_steps)
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
model.evaluate(input_fn)
# Predicción de imágenes simples
n_images = 4
# Del conjunto de test
test_images = mnist.test.images[:n_images]
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': test_images}, shuffle=False)
# Usamos el modelo para predecir los ejemplos
preds = list(model.predict(input_fn))
# Las dibujamos
for i in range(n_images):
plt.imshow(np.reshape(test_images[i], [28, 28]), cmap='gray')
plt.show()
print("Model prediction:", preds[i])
| 0.764628 | 0.981524 |
# Assignment 2: Helping the Tanzanian Ministry of Water provide water to their people
#### By: Sara Krumpak, Jessica Matta, Ryan Chiu, and Carolin Kroeger
#### Team name: Pump it up MBD
##### Final score: 0.8201
Tanzania is facing a central problem regarding the low access to water. Water is crucial to the development of a country, primarily as a critical requirement for our survival, and additionally in agriculture and industrial development. Even though Tanzania is surrounded by numerous water sources of different types, the millions living below poverty cannot access it at easy, often spending hours walking in order to reach the closes functional water pump. Thus, the task is to predict which water pumps are going to continue working, which are going to need repairs and which are going to fail.
#### Problem description and data download: https://www.drivendata.org/competitions/7/pump-it-up-data-mining-the-water-table/page/25/
### Import Libraries
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
### Import Datasets
```
train_set = pd.read_csv(
"/Users/carolinkroeger/Desktop/Train.csv"
)
test_set = pd.read_csv(
"/Users/carolinkroeger/Desktop/Test.csv"
)
train_labels = pd.read_csv(
"/Users/carolinkroeger/Desktop/Train_Labels.csv",
index_col="id",
)
```
### Check datasets
```
train_set.head()
test_set.head()
train_labels.head()
```
### Combine the datasets
```
dataset = pd.concat([train_set, test_set], sort=False)
```
### Check datset
```
dataset.head()
```
#### Here we can see how many functional pumps there are, how many non-functional, and how many need repairs.
```
palette=[sns.color_palette()[0],sns.color_palette()[2],sns.color_palette()[1]]
train_labels.status_group.value_counts().plot(kind='barh', colors=palette)
```
## First we will drop all the variable we believe add no value to the dataset
#### If we look at the number of unique values in wpt_name, we can see that there are 45,684 different ones. As there are many different values, we will drop this column.
```
dataset.drop(columns=["id"], inplace=True)
dataset["wpt_name"].value_counts()
len(dataset["wpt_name"].unique())
dataset.drop(columns=["wpt_name"], inplace=True)
```
#### We will drop num_private because it provides no significant information. There are a lot of 0's and we do not get information about what this variable means to begin with.
```
dataset["num_private"].value_counts()
len(dataset["num_private"].unique())
dataset.drop(columns=["num_private"], inplace=True)
```
#### We will drop subvillage as we have other variables with location names. Subvillage also has some variable called "M" or "1" which do not make sense. Additionally, there are too many unique subvillage names.
```
dataset["subvillage"].value_counts()
len(dataset["subvillage"].unique())
dataset.drop(columns=["subvillage"], inplace=True)
```
#### We will be dropping region code as it does not mach the corresponding region variable and the numbers provide no value.
```
dataset["region_code"].value_counts()
len(dataset["region_code"].unique())
dataset.drop(columns=["region_code"], inplace=True)
```
#### We will be dropping district code as it does not provide additional value. The numbers carry no significance.
```
dataset["district_code"].value_counts()
len(dataset["district_code"].unique())
dataset.drop(columns=["district_code"], inplace=True)
```
#### We will be dropping the LGA variable as it adds no additional value, and we have other location attributes like longitude and latitude that we will keep.
```
dataset["lga"].value_counts()
len(dataset["lga"].unique())
dataset.drop(columns=["lga"], inplace=True)
```
#### We will be deleting the ward variable as it provides no additonal value, and we have other location attributes like longitude and latitude that we will keep.
```
dataset["ward"].value_counts()
len(dataset["ward"].unique())
dataset.drop(columns=["ward"], inplace=True)
```
#### We will drop the variable recorded_by as it is all the same one value.
```
dataset["recorded_by"].value_counts()
dataset.drop(columns=["recorded_by"], inplace=True)
```
#### We will drop scheme_name as there are 2869 unique values, which is too much to provide additional information.
```
dataset["scheme_name"].value_counts()
len(dataset["scheme_name"].unique()) #2869 distinct values
dataset.drop(columns=["scheme_name"], inplace=True)
```
#### We will drop extration type group and extraction type class as they are very similar to extraction type. As extraction type has more informaiton we will drop the other two.
```
dataset["extraction_type_group"].value_counts()
len(dataset["extraction_type_group"].unique())
dataset["extraction_type_class"].value_counts()
len(dataset["extraction_type_class"].unique())
dataset["extraction_type"].value_counts()
len(dataset["extraction_type"].unique())
dataset.drop(columns=["extraction_type_group"], inplace=True)
dataset.drop(columns=["extraction_type_class"], inplace=True)
```
#### Similarly to extraction, we will drop management group and keep management as they are very similar, however, management has more details and information.
```
dataset["management"].value_counts()
len(dataset["management"].unique())
dataset["management_group"].value_counts()
len(dataset["management_group"].unique())
dataset.drop(columns=["management_group"], inplace=True)
```
#### We will be dropping payment and keeping payment type as they are the same variable but with different wordings.
```
dataset["payment"].value_counts()
len(dataset["payment"].unique())
dataset["payment_type"].value_counts()
len(dataset["payment_type"].unique())
dataset.drop(columns=["payment"], inplace=True)
```
#### Water quality and quality group are once again very similar, therefore, we will be dropping quality group as water quality some additional detail that might be important.
```
dataset["quality_group"].value_counts()
len(dataset["quality_group"].unique())
dataset["water_quality"].value_counts()
len(dataset["water_quality"].unique())
dataset.drop(columns=["quality_group"], inplace=True)
```
#### We will drop quantity group as is it identical to the quantity variable
```
dataset["quantity"].value_counts()
dataset["quantity_group"].value_counts()
dataset.drop(columns=["quantity_group"], inplace=True)
```
#### Similaraly to before, source class, source type, and source are all very similar. Therefore, we will be keeping source and dropping source class and source type.
```
dataset["source_class"].value_counts()
len(dataset["source_class"].unique())
dataset["source_type"].value_counts()
len(dataset["source_type"].unique())
dataset["source"].value_counts()
len(dataset["source"].unique())
dataset.drop(columns=["source_class"], inplace=True)
dataset.drop(columns=["source_type"], inplace=True)
```
#### We will be dropping water point type group and keeping water point type as they are very similar.
```
dataset["waterpoint_type"].value_counts()
len(dataset["waterpoint_type"].unique())
dataset["waterpoint_type_group"].value_counts()
len(dataset["waterpoint_type_group"].unique())
dataset.drop(columns=["waterpoint_type_group"], inplace=True)
```
## Next we will be imputing variables that have NA's, 0, None, or Other.
#### We will check what variable have NA's in order to deal with them.
```
dataset.isnull().sum()
```
#### We will also check how many variables have zeros and which ones we need to deal with.
```
(dataset == 0).sum()
```
#### From all the variables that have 0's, we will have to do something about the ones in constriction year. The 0's in gps_height and and longitude make sense as the pumps can be located at sea level. The 0's in public_meeting and permit represent the "false" boolean values.
#### Funder: We will fill in nulls with "other"
```
dataset["funder"].fillna(value="Other", inplace=True)
type("funder")
```
#### We will also change the values with count less than 100 to other in order to decrease the number of unique values.
```
funder_count = dataset["funder"].value_counts()
replace_funder = funder_count[funder_count <= 100].index
dataset["funder"].replace(replace_funder, "Other", inplace=True)
funder_count = dataset["funder"].value_counts()
funder_count
replace_zeros = dataset["funder"].loc[dataset["funder"] == "0"]
replace_zeros
dataset["funder"].replace(replace_zeros, "Other", inplace=True)
funder_count = dataset["funder"].value_counts()
funder_count
```
#### Installer: We will change the NA's and 0 to Unknown
```
dataset["installer"].fillna(value="Unknown", inplace=True)
dataset["installer"].replace("0", "Unknown", inplace=True)
```
#### We will also change the values with count less than 100 to other in order to decrease the number of unique values.
```
installer_count = dataset["installer"].value_counts()
replace_installer = installer_count[installer_count <= 100].index
dataset["installer"].replace(replace_installer, "Other", inplace=True)
```
#### Public Meeting: We will put "None" for the NA's as we do not have information on whether the public meeting took place or not.
```
dataset["public_meeting"].fillna(value=dataset["public_meeting"].mode()[0], inplace = True)
```
#### Permit: We will fill in the NA's with the "None" as we do not know whether these pumps have a permit or not.
```
dataset["permit"].fillna(value=dataset["permit"].mode()[0], inplace = True)
```
#### We will be leaving the scheme management attriute because who operates the water point could be significant as it can indicate who can take better care of it.
```
dataset["scheme_management"].fillna(value="Unknown", inplace=True)
dataset["scheme_management"].replace("None","Unknown", inplace=True)
```
#### For the 0's in the contruction year we will compute the mean year, without taking into account the 0's.
```
dataset["construction_year"].value_counts()
dataset["construction_year"].replace(0, np.nan, inplace=True)
dataset["construction_year"].value_counts()
```
#### We tested out mean but mean gave a better score.
```
dataset["construction_year"].fillna(value=dataset["construction_year"].mean(), inplace=True)
dataset["construction_year"].value_counts()
```
#### Population: we will check the skewness as there are some very large numbers and some very small ones.
```
plt.boxplot(dataset["population"])
```
#### As the population variable is very skewed we will take out the outliers
```
dataset["population"].clip(upper=12000, inplace=True)
```
## Next we will be Feature Engineering
#### We firstly split date_recorded into year, month and day respectively and finally drop the original column, keeping only the year and month. This will capture any possible trend there may be over the years or during a year in general.
```
from datetime import datetime
dataset["month_recorded"] = (dataset["date_recorded"].apply(lambda x: (datetime.strptime(x, "%Y-%m-%d").month))).apply(str)
dataset["month_recorded"].head()
dataset["year_recorded"] = (dataset["date_recorded"].apply(lambda x: (datetime.strptime(x, "%Y-%m-%d").year))).apply(str)
dataset["year_recorded"].head()
```
#### We will be deleting the date recoded variable as we seperated it into a month and year column and now is repetitive.
```
dataset.drop(columns=["date_recorded"], inplace=True)
```
#### Here we check if the funder and the installer are one and the same in the creation of the respective pump. We are looking to capture any trend there may be if they are the same or not.
```
dataset['funder_installer_equal'] = (dataset['funder']==dataset['installer']).astype(int)
dataset.funder_installer_equal.head()
```
#### In order to check for the amount of years the pump has existed, we create a variable that subtracts the construction_year by the current year 2019.
```
dataset["well_existence"] = 2019 - dataset["construction_year"]
dataset["well_existence"].head()
```
## Seperate train and test and add the labels
```
dataset = pd.get_dummies(dataset)
train_clean = dataset[0:59400]
train_clean.head()
#train_both = pd.concat([train_clean, train_labels], sort = False)
#train_both.head()
test_clean = dataset[59400::]
test_clean.head()
```
## Machine Learning Models
### Linear Regression Model
```
from sklearn import datasets, linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
logreg = LogisticRegression()
logreg.fit(train_clean, train_labels)
param_grid = {"C": [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
clf = GridSearchCV(LogisticRegression(), param_grid)
gridsearch = GridSearchCV(
cv=5,
estimator=LogisticRegression(
C=1.0,
intercept_scaling=1,
dual=False,
fit_intercept=True,
tol=0.0001,
),
param_grid={"C": [0.001, 0.01, 0.1, 1, 10, 100, 1000]},
)
regression = gridsearch.fit(train_clean, train_labels)
optimized_regression = LogisticRegression(C=regression.best_estimator_.get_params()['C'])
optimized_regression.fit(train_clean, train_labels)
predictions = optimized_regression.predict(test_clean)
print(optimized_regression)
```
#### After making a submission when running a linear regressionmodel, or score was 0.726, which was not our best score. Therefore, we will not be using linear regression as our final model.
### Gradient Booster Model
```
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
idtest = test_set["id"]
baseline = GradientBoostingClassifier(
learning_rate=0.1,
n_estimators=100,
max_depth=3,
min_samples_split=2,
min_samples_leaf=1,
subsample=1,
max_features="sqrt",
random_state=10,
)
baseline.fit(train_clean, train_labels)
predictors = list(train_clean)
feat_imp = pd.Series(baseline.feature_importances_, predictors).sort_values(
ascending=False
)
preds = baseline.predict(test_clean)
```
#### After running this model we got a score of 0.7056 which is not the best score, therefore, we will not be using this model
### Random Forest Model
```
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
RFCparams = {
"n_estimators": [600, 700, 900],
"max_depth": [30, 31, 32],
}
mod = RandomForestClassifier()
RFCmod = GridSearchCV(mod, RFCparams, cv=3, n_jobs=-1)
RFCmod.fit(train_clean, train_labels)
RFCmod.best_params_
preds = RFCmod.predict(test_clean)
idtest = test_set["id"]
submission = pd.DataFrame(data=preds, index=idtest, columns=["status_group"])
submission.head()
```
#### Write the submission file
```
#submission.to_csv("submission_final.csv")
```
#### After running the different models, the best score we got was from random forest, which was 0.8201
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
train_set = pd.read_csv(
"/Users/carolinkroeger/Desktop/Train.csv"
)
test_set = pd.read_csv(
"/Users/carolinkroeger/Desktop/Test.csv"
)
train_labels = pd.read_csv(
"/Users/carolinkroeger/Desktop/Train_Labels.csv",
index_col="id",
)
train_set.head()
test_set.head()
train_labels.head()
dataset = pd.concat([train_set, test_set], sort=False)
dataset.head()
palette=[sns.color_palette()[0],sns.color_palette()[2],sns.color_palette()[1]]
train_labels.status_group.value_counts().plot(kind='barh', colors=palette)
dataset.drop(columns=["id"], inplace=True)
dataset["wpt_name"].value_counts()
len(dataset["wpt_name"].unique())
dataset.drop(columns=["wpt_name"], inplace=True)
dataset["num_private"].value_counts()
len(dataset["num_private"].unique())
dataset.drop(columns=["num_private"], inplace=True)
dataset["subvillage"].value_counts()
len(dataset["subvillage"].unique())
dataset.drop(columns=["subvillage"], inplace=True)
dataset["region_code"].value_counts()
len(dataset["region_code"].unique())
dataset.drop(columns=["region_code"], inplace=True)
dataset["district_code"].value_counts()
len(dataset["district_code"].unique())
dataset.drop(columns=["district_code"], inplace=True)
dataset["lga"].value_counts()
len(dataset["lga"].unique())
dataset.drop(columns=["lga"], inplace=True)
dataset["ward"].value_counts()
len(dataset["ward"].unique())
dataset.drop(columns=["ward"], inplace=True)
dataset["recorded_by"].value_counts()
dataset.drop(columns=["recorded_by"], inplace=True)
dataset["scheme_name"].value_counts()
len(dataset["scheme_name"].unique()) #2869 distinct values
dataset.drop(columns=["scheme_name"], inplace=True)
dataset["extraction_type_group"].value_counts()
len(dataset["extraction_type_group"].unique())
dataset["extraction_type_class"].value_counts()
len(dataset["extraction_type_class"].unique())
dataset["extraction_type"].value_counts()
len(dataset["extraction_type"].unique())
dataset.drop(columns=["extraction_type_group"], inplace=True)
dataset.drop(columns=["extraction_type_class"], inplace=True)
dataset["management"].value_counts()
len(dataset["management"].unique())
dataset["management_group"].value_counts()
len(dataset["management_group"].unique())
dataset.drop(columns=["management_group"], inplace=True)
dataset["payment"].value_counts()
len(dataset["payment"].unique())
dataset["payment_type"].value_counts()
len(dataset["payment_type"].unique())
dataset.drop(columns=["payment"], inplace=True)
dataset["quality_group"].value_counts()
len(dataset["quality_group"].unique())
dataset["water_quality"].value_counts()
len(dataset["water_quality"].unique())
dataset.drop(columns=["quality_group"], inplace=True)
dataset["quantity"].value_counts()
dataset["quantity_group"].value_counts()
dataset.drop(columns=["quantity_group"], inplace=True)
dataset["source_class"].value_counts()
len(dataset["source_class"].unique())
dataset["source_type"].value_counts()
len(dataset["source_type"].unique())
dataset["source"].value_counts()
len(dataset["source"].unique())
dataset.drop(columns=["source_class"], inplace=True)
dataset.drop(columns=["source_type"], inplace=True)
dataset["waterpoint_type"].value_counts()
len(dataset["waterpoint_type"].unique())
dataset["waterpoint_type_group"].value_counts()
len(dataset["waterpoint_type_group"].unique())
dataset.drop(columns=["waterpoint_type_group"], inplace=True)
dataset.isnull().sum()
(dataset == 0).sum()
dataset["funder"].fillna(value="Other", inplace=True)
type("funder")
funder_count = dataset["funder"].value_counts()
replace_funder = funder_count[funder_count <= 100].index
dataset["funder"].replace(replace_funder, "Other", inplace=True)
funder_count = dataset["funder"].value_counts()
funder_count
replace_zeros = dataset["funder"].loc[dataset["funder"] == "0"]
replace_zeros
dataset["funder"].replace(replace_zeros, "Other", inplace=True)
funder_count = dataset["funder"].value_counts()
funder_count
dataset["installer"].fillna(value="Unknown", inplace=True)
dataset["installer"].replace("0", "Unknown", inplace=True)
installer_count = dataset["installer"].value_counts()
replace_installer = installer_count[installer_count <= 100].index
dataset["installer"].replace(replace_installer, "Other", inplace=True)
dataset["public_meeting"].fillna(value=dataset["public_meeting"].mode()[0], inplace = True)
dataset["permit"].fillna(value=dataset["permit"].mode()[0], inplace = True)
dataset["scheme_management"].fillna(value="Unknown", inplace=True)
dataset["scheme_management"].replace("None","Unknown", inplace=True)
dataset["construction_year"].value_counts()
dataset["construction_year"].replace(0, np.nan, inplace=True)
dataset["construction_year"].value_counts()
dataset["construction_year"].fillna(value=dataset["construction_year"].mean(), inplace=True)
dataset["construction_year"].value_counts()
plt.boxplot(dataset["population"])
dataset["population"].clip(upper=12000, inplace=True)
from datetime import datetime
dataset["month_recorded"] = (dataset["date_recorded"].apply(lambda x: (datetime.strptime(x, "%Y-%m-%d").month))).apply(str)
dataset["month_recorded"].head()
dataset["year_recorded"] = (dataset["date_recorded"].apply(lambda x: (datetime.strptime(x, "%Y-%m-%d").year))).apply(str)
dataset["year_recorded"].head()
dataset.drop(columns=["date_recorded"], inplace=True)
dataset['funder_installer_equal'] = (dataset['funder']==dataset['installer']).astype(int)
dataset.funder_installer_equal.head()
dataset["well_existence"] = 2019 - dataset["construction_year"]
dataset["well_existence"].head()
dataset = pd.get_dummies(dataset)
train_clean = dataset[0:59400]
train_clean.head()
#train_both = pd.concat([train_clean, train_labels], sort = False)
#train_both.head()
test_clean = dataset[59400::]
test_clean.head()
from sklearn import datasets, linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
logreg = LogisticRegression()
logreg.fit(train_clean, train_labels)
param_grid = {"C": [0.001, 0.01, 0.1, 1, 10, 100, 1000]}
clf = GridSearchCV(LogisticRegression(), param_grid)
gridsearch = GridSearchCV(
cv=5,
estimator=LogisticRegression(
C=1.0,
intercept_scaling=1,
dual=False,
fit_intercept=True,
tol=0.0001,
),
param_grid={"C": [0.001, 0.01, 0.1, 1, 10, 100, 1000]},
)
regression = gridsearch.fit(train_clean, train_labels)
optimized_regression = LogisticRegression(C=regression.best_estimator_.get_params()['C'])
optimized_regression.fit(train_clean, train_labels)
predictions = optimized_regression.predict(test_clean)
print(optimized_regression)
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
idtest = test_set["id"]
baseline = GradientBoostingClassifier(
learning_rate=0.1,
n_estimators=100,
max_depth=3,
min_samples_split=2,
min_samples_leaf=1,
subsample=1,
max_features="sqrt",
random_state=10,
)
baseline.fit(train_clean, train_labels)
predictors = list(train_clean)
feat_imp = pd.Series(baseline.feature_importances_, predictors).sort_values(
ascending=False
)
preds = baseline.predict(test_clean)
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
RFCparams = {
"n_estimators": [600, 700, 900],
"max_depth": [30, 31, 32],
}
mod = RandomForestClassifier()
RFCmod = GridSearchCV(mod, RFCparams, cv=3, n_jobs=-1)
RFCmod.fit(train_clean, train_labels)
RFCmod.best_params_
preds = RFCmod.predict(test_clean)
idtest = test_set["id"]
submission = pd.DataFrame(data=preds, index=idtest, columns=["status_group"])
submission.head()
#submission.to_csv("submission_final.csv")
| 0.258794 | 0.982557 |
# Transaction Amt Dec
```
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
%matplotlib inline
train = pd.read_parquet('../../data/train_FE003.parquet')
test = pd.read_parquet('../../data/test_FE003.parquet')
train['TransactionAmt_Dec'] = train['TransactionAmt'] % 1
test['TransactionAmt_Dec'] = test['TransactionAmt'] % 1
train['TransactionAmt_Dec'] = train['TransactionAmt'] % 1
for i, d in train.query("ProductCD == 4").groupby('isFraud'):
d.set_index('TransactionDT')['TransactionAmt_Dec'].plot(figsize=(15, 5), style='.')
train['TransactionAmt_2Dec'] = train['TransactionAmt_Dec'].round(2)
test['TransactionAmt_2Dec'] = test['TransactionAmt_Dec'].round(2)
train.query("ProductCD == 4 and isFraud == True")['TransactionAmt_Dec'].value_counts()
train.query("ProductCD == 4 and isFraud == True")['TransactionAmt_2Dec'].value_counts()
train['ProductCD_W_95cents'] = False
train.loc[(train['ProductCD'] == 4) & (train['TransactionAmt_2Dec'] == 0.95),'ProductCD_W_95cents'] = True
test['ProductCD_W_95cents'] = False
test.loc[(test['ProductCD'] == 4) & (test['TransactionAmt_2Dec'] == 0.95),'ProductCD_W_95cents'] = True
train['ProductCD_W_00cents'] = False
train.loc[(train['ProductCD'] == 4) & (train['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_00cents'] = True
test['ProductCD_W_00cents'] = False
test.loc[(test['ProductCD'] == 4) & (test['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_00cents'] = True
train['ProductCD_W_50cents'] = False
train.loc[(train['ProductCD'] == 4) & (train['TransactionAmt_2Dec'] == 0.50),'ProductCD_W_50cents'] = True
test['ProductCD_W_50cents'] = False
test.loc[(test['ProductCD'] == 4) & (test['TransactionAmt_2Dec'] == 0.50),'ProductCD_W_50cents'] = True
train['ProductCD_W_50_95_0_cents'] = False
train.loc[(train['ProductCD'] == 4) &
(train['TransactionAmt_2Dec'] == 0.95) &
(train['TransactionAmt_2Dec'] == 0.50) &
(train['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_50_95_0_cents'] = True
test['ProductCD_W_50_95_0_cents'] = False
test.loc[(test['ProductCD'] == 4) &
(test['TransactionAmt_2Dec'] == 0.95) &
(test['TransactionAmt_2Dec'] == 0.50) &
(test['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_50_95_0_cents'] = True
train['ProductCD_W_NOT_50_95_0_cents'] = False
train.loc[(train['ProductCD'] == 4) &
~(train['TransactionAmt_2Dec'] == 0.95) &
~(train['TransactionAmt_2Dec'] == 0.50) &
~(train['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_NOT_50_95_0_cents'] = True
test['ProductCD_W_NOT_50_95_0_cents'] = False
test.loc[(test['ProductCD'] == 4) &
~(test['TransactionAmt_2Dec'] == 0.95) &
~(test['TransactionAmt_2Dec'] == 0.50) &
~(test['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_NOT_50_95_0_cents'] = True
train.groupby('ProductCD_W_NOT_50_95_0_cents')['isFraud'].count()
train.to_parquet('../../data/train_FE004.parquet')
test.to_parquet('../../data/test_FE004.parquet')
train.dtypes
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
%matplotlib inline
train = pd.read_parquet('../../data/train_FE003.parquet')
test = pd.read_parquet('../../data/test_FE003.parquet')
train['TransactionAmt_Dec'] = train['TransactionAmt'] % 1
test['TransactionAmt_Dec'] = test['TransactionAmt'] % 1
train['TransactionAmt_Dec'] = train['TransactionAmt'] % 1
for i, d in train.query("ProductCD == 4").groupby('isFraud'):
d.set_index('TransactionDT')['TransactionAmt_Dec'].plot(figsize=(15, 5), style='.')
train['TransactionAmt_2Dec'] = train['TransactionAmt_Dec'].round(2)
test['TransactionAmt_2Dec'] = test['TransactionAmt_Dec'].round(2)
train.query("ProductCD == 4 and isFraud == True")['TransactionAmt_Dec'].value_counts()
train.query("ProductCD == 4 and isFraud == True")['TransactionAmt_2Dec'].value_counts()
train['ProductCD_W_95cents'] = False
train.loc[(train['ProductCD'] == 4) & (train['TransactionAmt_2Dec'] == 0.95),'ProductCD_W_95cents'] = True
test['ProductCD_W_95cents'] = False
test.loc[(test['ProductCD'] == 4) & (test['TransactionAmt_2Dec'] == 0.95),'ProductCD_W_95cents'] = True
train['ProductCD_W_00cents'] = False
train.loc[(train['ProductCD'] == 4) & (train['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_00cents'] = True
test['ProductCD_W_00cents'] = False
test.loc[(test['ProductCD'] == 4) & (test['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_00cents'] = True
train['ProductCD_W_50cents'] = False
train.loc[(train['ProductCD'] == 4) & (train['TransactionAmt_2Dec'] == 0.50),'ProductCD_W_50cents'] = True
test['ProductCD_W_50cents'] = False
test.loc[(test['ProductCD'] == 4) & (test['TransactionAmt_2Dec'] == 0.50),'ProductCD_W_50cents'] = True
train['ProductCD_W_50_95_0_cents'] = False
train.loc[(train['ProductCD'] == 4) &
(train['TransactionAmt_2Dec'] == 0.95) &
(train['TransactionAmt_2Dec'] == 0.50) &
(train['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_50_95_0_cents'] = True
test['ProductCD_W_50_95_0_cents'] = False
test.loc[(test['ProductCD'] == 4) &
(test['TransactionAmt_2Dec'] == 0.95) &
(test['TransactionAmt_2Dec'] == 0.50) &
(test['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_50_95_0_cents'] = True
train['ProductCD_W_NOT_50_95_0_cents'] = False
train.loc[(train['ProductCD'] == 4) &
~(train['TransactionAmt_2Dec'] == 0.95) &
~(train['TransactionAmt_2Dec'] == 0.50) &
~(train['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_NOT_50_95_0_cents'] = True
test['ProductCD_W_NOT_50_95_0_cents'] = False
test.loc[(test['ProductCD'] == 4) &
~(test['TransactionAmt_2Dec'] == 0.95) &
~(test['TransactionAmt_2Dec'] == 0.50) &
~(test['TransactionAmt_2Dec'] == 0.00),'ProductCD_W_NOT_50_95_0_cents'] = True
train.groupby('ProductCD_W_NOT_50_95_0_cents')['isFraud'].count()
train.to_parquet('../../data/train_FE004.parquet')
test.to_parquet('../../data/test_FE004.parquet')
train.dtypes
| 0.138491 | 0.751283 |
# Anna KaRNNa
In this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's [post on RNNs](http://karpathy.github.io/2015/05/21/rnn-effectiveness/) and [implementation in Torch](https://github.com/karpathy/char-rnn). Also, some information [here at r2rt](http://r2rt.com/recurrent-neural-networks-in-tensorflow-ii.html) and from [Sherjil Ozair](https://github.com/sherjilozair/char-rnn-tensorflow) on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
```
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
```
First we'll load the text file and convert it into integers for our network to use.
```
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
```
Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.
The idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the `split_frac` keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.
```
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
```
I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size `batch_size X num_steps`. For example, if we want our network to train on a sequence of 100 characters, `num_steps = 100`. For the next batch, we'll shift this window the next sequence of `num_steps` characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
```
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(x_one_hot, num_steps, 1)]
outputs, state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=initial_state)
final_state = tf.identity(state, name='final_state')
# Reshape output so it's a bunch of rows, one row for each cell output
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN putputs to a softmax layer and calculate the cost
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
preds = tf.nn.softmax(logits, name='predictions')
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
```
## Hyperparameters
Here I'm defining the hyperparameters for the network. The two you probably haven't seen before are `lstm_size` and `num_layers`. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.
```
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
```
## Write out the graph for TensorBoard
```
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
file_writer = tf.summary.FileWriter('./logs/1', sess.graph)
```
## Training
Time for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by `save_every_n`) I calculate the validation loss and save a checkpoint.
```
!mkdir -p checkpoints/anna
epochs = 1
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
```
## Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
```
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
```
|
github_jupyter
|
import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
rnn_inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(x_one_hot, num_steps, 1)]
outputs, state = tf.contrib.rnn.static_rnn(cell, rnn_inputs, initial_state=initial_state)
final_state = tf.identity(state, name='final_state')
# Reshape output so it's a bunch of rows, one row for each cell output
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN putputs to a softmax layer and calculate the cost
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
preds = tf.nn.softmax(logits, name='predictions')
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
file_writer = tf.summary.FileWriter('./logs/1', sess.graph)
!mkdir -p checkpoints/anna
epochs = 1
save_every_n = 200
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.cost, model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
batch_loss, new_state = sess.run([model.cost, model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
| 0.743447 | 0.958421 |
# Getting started with DoWhy: A simple example
This is a quick introduction to the DoWhy causal inference library.
We will load in a sample dataset and estimate the causal effect of a (pre-specified)treatment variable on a (pre-specified) outcome variable.
First, let us add the required path for Python to find the DoWhy code and load all required packages.
```
import os, sys
sys.path.append(os.path.abspath("../../../"))
```
Let's check the python version.
```
print(sys.version)
import numpy as np
import pandas as pd
import dowhy
from dowhy import CausalModel
import dowhy.datasets
```
Now, let us load a dataset. For simplicity, we simulate a dataset with linear relationships between common causes and treatment, and common causes and outcome.
Beta is the true causal effect.
```
data = dowhy.datasets.linear_dataset(beta=10,
num_common_causes=5,
num_instruments = 2,
num_effect_modifiers=1,
num_samples=10000,
treatment_is_binary=True)
df = data["df"]
print(df.head())
print(data["dot_graph"])
print("\n")
print(data["gml_graph"])
```
Note that we are using a pandas dataframe to load the data. At present, DoWhy only supports pandas dataframe as input.
## Interface 1 (recommended): Input causal graph
We now input a causal graph in the GML graph format (recommended). You can also use the DOT format.
```
# With graph
model=CausalModel(
data = df,
treatment=data["treatment_name"],
outcome=data["outcome_name"],
graph=data["gml_graph"]
)
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
```
The above causal graph shows the assumptions encoded in the causal model. We can now use this graph to first identify
the causal effect (go from a causal estimand to a probability expression), and then estimate the causal effect.
**DoWhy philosophy: Keep identification and estimation separate**
Identification can be achieved without access to the data, acccesing only the graph. This results in an expression to be computed. This expression can then be evaluated using the available data in the estimation step.
It is important to understand that these are orthogonal steps.
* Identification
```
identified_estimand = model.identify_effect()
print(identified_estimand)
```
If you want to disable the warning for ignoring unobserved confounders, you can add a parameter flag ( *proceed\_when\_unidentifiable* ). The same parameter can also be added when instantiating the CausalModel object.
```
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
```
* Estimation
```
causal_estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification")
print(causal_estimate)
print("Causal Estimate is " + str(causal_estimate.value))
```
## Interface 2: Specify common causes and instruments
```
# Without graph
model= CausalModel(
data=df,
treatment=data["treatment_name"],
outcome=data["outcome_name"],
common_causes=data["common_causes_names"],
effect_modifiers=data["effect_modifier_names"])
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
```
We get the same causal graph. Now identification and estimation is done as before.
```
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
```
* Estimation
```
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification")
print(estimate)
print("Causal Estimate is " + str(estimate.value))
```
## Refuting the estimate
Let us now look at ways of refuting the estimate obtained.
### Adding a random common cause variable
```
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
```
### Adding an unobserved common cause variable
```
res_unobserved=model.refute_estimate(identified_estimand, estimate, method_name="add_unobserved_common_cause",
confounders_effect_on_treatment="binary_flip", confounders_effect_on_outcome="linear",
effect_strength_on_treatment=0.01, effect_strength_on_outcome=0.02)
print(res_unobserved)
```
### Replacing treatment with a random (placebo) variable
```
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
```
### Removing a random subset of the data
```
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
```
As you can see, the propensity score stratification estimator is reasonably robust to refutations.
For reproducibility, you can add a parameter "random_seed" to any refutation method, as shown below.
```
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9, random_seed = 1)
print(res_subset)
```
|
github_jupyter
|
import os, sys
sys.path.append(os.path.abspath("../../../"))
print(sys.version)
import numpy as np
import pandas as pd
import dowhy
from dowhy import CausalModel
import dowhy.datasets
data = dowhy.datasets.linear_dataset(beta=10,
num_common_causes=5,
num_instruments = 2,
num_effect_modifiers=1,
num_samples=10000,
treatment_is_binary=True)
df = data["df"]
print(df.head())
print(data["dot_graph"])
print("\n")
print(data["gml_graph"])
# With graph
model=CausalModel(
data = df,
treatment=data["treatment_name"],
outcome=data["outcome_name"],
graph=data["gml_graph"]
)
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
identified_estimand = model.identify_effect()
print(identified_estimand)
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
causal_estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification")
print(causal_estimate)
print("Causal Estimate is " + str(causal_estimate.value))
# Without graph
model= CausalModel(
data=df,
treatment=data["treatment_name"],
outcome=data["outcome_name"],
common_causes=data["common_causes_names"],
effect_modifiers=data["effect_modifier_names"])
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
estimate = model.estimate_effect(identified_estimand,
method_name="backdoor.propensity_score_stratification")
print(estimate)
print("Causal Estimate is " + str(estimate.value))
res_random=model.refute_estimate(identified_estimand, estimate, method_name="random_common_cause")
print(res_random)
res_unobserved=model.refute_estimate(identified_estimand, estimate, method_name="add_unobserved_common_cause",
confounders_effect_on_treatment="binary_flip", confounders_effect_on_outcome="linear",
effect_strength_on_treatment=0.01, effect_strength_on_outcome=0.02)
print(res_unobserved)
res_placebo=model.refute_estimate(identified_estimand, estimate,
method_name="placebo_treatment_refuter", placebo_type="permute")
print(res_placebo)
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9)
print(res_subset)
res_subset=model.refute_estimate(identified_estimand, estimate,
method_name="data_subset_refuter", subset_fraction=0.9, random_seed = 1)
print(res_subset)
| 0.311741 | 0.979195 |
```
import torch
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = func.relu(x)
x = self.conv2(x)
x = func.relu(x)
x = func.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = func.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = func.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = func.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += func.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
class InitArgs:
def __init__(self):
self.batch_size = 64 #input batch size for training
self.test_batch_size = 1000 # input batch size for testing
self.epochs = 14 # number of epochs to train
self.lr = 1.0 # learning rate
self.gamma = 0.7 # Learning rate step gamma
self.use_cuda = True # enable CUDA training
self.drt_run = False # quickly check a single pass
self.seed = 1 # random seed
self.log_interval = 10 # how many batches to wait before logging training status
self.save_model = True # For Saving the current Model
args = InitArgs()
use_cuda = args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST(root = './data', train=True, download=False,
transform=transform)
dataset2 = datasets.MNIST(root = './data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
```
|
github_jupyter
|
import torch
import torch.nn as nn
import torch.nn.functional as func
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = func.relu(x)
x = self.conv2(x)
x = func.relu(x)
x = func.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = func.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = func.log_softmax(x, dim=1)
return output
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = func.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
if args.dry_run:
break
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += func.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
class InitArgs:
def __init__(self):
self.batch_size = 64 #input batch size for training
self.test_batch_size = 1000 # input batch size for testing
self.epochs = 14 # number of epochs to train
self.lr = 1.0 # learning rate
self.gamma = 0.7 # Learning rate step gamma
self.use_cuda = True # enable CUDA training
self.drt_run = False # quickly check a single pass
self.seed = 1 # random seed
self.log_interval = 10 # how many batches to wait before logging training status
self.save_model = True # For Saving the current Model
args = InitArgs()
use_cuda = args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
train_kwargs = {'batch_size': args.batch_size}
test_kwargs = {'batch_size': args.test_batch_size}
if use_cuda:
cuda_kwargs = {'num_workers': 1,
'pin_memory': True,
'shuffle': True}
train_kwargs.update(cuda_kwargs)
test_kwargs.update(cuda_kwargs)
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST(root = './data', train=True, download=False,
transform=transform)
dataset2 = datasets.MNIST(root = './data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
for epoch in range(1, args.epochs + 1):
train(args, model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step()
if args.save_model:
torch.save(model.state_dict(), "mnist_cnn.pt")
| 0.955651 | 0.667276 |
```
import pandas as pd
from sqlalchemy import create_engine
import psycopg2
from config import db_password
import os
from os import listdir
from os.path import isfile, join
from pathlib import Path
# Make dataframes for psy2019 and psy2020 CSVs
psy2019_df = pd.read_csv(r'resources\psy2019.csv')
psy2020_df = pd.read_csv(r'resources\psy2020.csv')
# Drop rows with bugged values
psy2019_df= psy2019_df.drop(index=[52,53,54,55,56,57,58])
# Drop rows with bugged values
psy2020_df = psy2020_df.drop(index=[52,53,54,55,56,57])
# Drop unnecessary data from Area Name column
psy2019_df['Area Name'] = psy2019_df['Area Name'].str.replace(r"\(.*\)","")
psy2020_df['Area Name'] = psy2020_df['Area Name'].str.replace(r"\(.*\)","")
# Rename all column names to be more readable and workable for postgres
psy2020_df = psy2020_df.rename(columns={'Area Name' : 'State', 'Employment(1)' : 'psy2020_number_employed', 'Annual mean wage(2)' : 'psy2020_annual_mean_wage', 'Employment per 1,000 jobs' : 'psy2020_employ_per_1k_jobs' , 'Location Quotient' : 'psy2020_loc_quotient' })
# Rename all column names to be more readable and workable for postgres
psy2019_df = psy2019_df.rename(columns={'Area Name' : 'State', 'Employment(1)' : 'psy2019_number_employed', 'Employment percent relative standard error(3)' : 'psy2019_percent_relative_standard_error', 'Annual mean wage(2)' : 'psy2019_annual_mean_wage', 'Employment per 1,000 jobs' : 'psy2019_employ_per_1k_jobs' , 'Location Quotient' : 'psy2019_loc_quotient' })
# Make psyjobs dataframe
psyjobs_df = pd.read_csv(r'resources\us_psyjobs_trends.csv')
psyjobs_df.head()
# Split up psyjobs df by year
psyjobs_df2018 = psyjobs_df.groupby(psyjobs_df.Year).get_group(2018)
psyjobs_df2019 = psyjobs_df.groupby(psyjobs_df.Year).get_group(2019)
psyjobs_df2020 = psyjobs_df.groupby(psyjobs_df.Year).get_group(2020)
# Drop year column for psyjobs dataframes
psyjobs_df2018 = psyjobs_df2018.drop(columns=['Year'])
psyjobs_df2019 = psyjobs_df2019.drop(columns=['Year'])
psyjobs_df2020 = psyjobs_df2020.drop(columns=['Year'])
# Add suffix for variable names
psyjobs_df2018 = psyjobs_df2018.add_suffix('_2018')
psyjobs_df2019 = psyjobs_df2019.add_suffix('_2019')
psyjobs_df2020 = psyjobs_df2020.add_suffix('_2020')
# Standardize state column
psyjobs_df2018 = psyjobs_df2018.rename(columns={'STATE_2018' : 'State'})
# Standardize state column
psyjobs_df2019 = psyjobs_df2019.rename(columns={'STATE_2019' : 'State'})
# Standardize state column
psyjobs_df2020 = psyjobs_df2020.rename(columns={'STATE_2020' : 'State'})
# Setup postgres variables
pg_user = "postgres"
pg_pwd = db_password
pg_port = "5432"
host = 'project-vu-database-piecharts.c7rvpt2rehpr.us-east-2.rds.amazonaws.com'
# Create connection string
db_string = "postgresql://{username}:{password}@{host}:{port}/project_db".format(username=pg_user, password=pg_pwd, host = host, port=pg_port)
# Connect to the database engine
engine = create_engine(db_string)
# Load psyjobs dataframes to sql server
psyjobs_df2020.to_sql(name='psyjobs2020', if_exists='replace', con=engine, index=False)
psyjobs_df2019.to_sql(name='psyjobs2019', if_exists='replace', con=engine, index=False)
psyjobs_df2018.to_sql(name='psyjobs2018', if_exists='replace', con=engine, index=False)
# Load general psy dataframes to sql server
psy2019_df.to_sql(name='psy2019', if_exists='replace', con=engine, index=False)
psy2020_df.to_sql(name='psy2020', if_exists='replace', con=engine, index=False)
```
|
github_jupyter
|
import pandas as pd
from sqlalchemy import create_engine
import psycopg2
from config import db_password
import os
from os import listdir
from os.path import isfile, join
from pathlib import Path
# Make dataframes for psy2019 and psy2020 CSVs
psy2019_df = pd.read_csv(r'resources\psy2019.csv')
psy2020_df = pd.read_csv(r'resources\psy2020.csv')
# Drop rows with bugged values
psy2019_df= psy2019_df.drop(index=[52,53,54,55,56,57,58])
# Drop rows with bugged values
psy2020_df = psy2020_df.drop(index=[52,53,54,55,56,57])
# Drop unnecessary data from Area Name column
psy2019_df['Area Name'] = psy2019_df['Area Name'].str.replace(r"\(.*\)","")
psy2020_df['Area Name'] = psy2020_df['Area Name'].str.replace(r"\(.*\)","")
# Rename all column names to be more readable and workable for postgres
psy2020_df = psy2020_df.rename(columns={'Area Name' : 'State', 'Employment(1)' : 'psy2020_number_employed', 'Annual mean wage(2)' : 'psy2020_annual_mean_wage', 'Employment per 1,000 jobs' : 'psy2020_employ_per_1k_jobs' , 'Location Quotient' : 'psy2020_loc_quotient' })
# Rename all column names to be more readable and workable for postgres
psy2019_df = psy2019_df.rename(columns={'Area Name' : 'State', 'Employment(1)' : 'psy2019_number_employed', 'Employment percent relative standard error(3)' : 'psy2019_percent_relative_standard_error', 'Annual mean wage(2)' : 'psy2019_annual_mean_wage', 'Employment per 1,000 jobs' : 'psy2019_employ_per_1k_jobs' , 'Location Quotient' : 'psy2019_loc_quotient' })
# Make psyjobs dataframe
psyjobs_df = pd.read_csv(r'resources\us_psyjobs_trends.csv')
psyjobs_df.head()
# Split up psyjobs df by year
psyjobs_df2018 = psyjobs_df.groupby(psyjobs_df.Year).get_group(2018)
psyjobs_df2019 = psyjobs_df.groupby(psyjobs_df.Year).get_group(2019)
psyjobs_df2020 = psyjobs_df.groupby(psyjobs_df.Year).get_group(2020)
# Drop year column for psyjobs dataframes
psyjobs_df2018 = psyjobs_df2018.drop(columns=['Year'])
psyjobs_df2019 = psyjobs_df2019.drop(columns=['Year'])
psyjobs_df2020 = psyjobs_df2020.drop(columns=['Year'])
# Add suffix for variable names
psyjobs_df2018 = psyjobs_df2018.add_suffix('_2018')
psyjobs_df2019 = psyjobs_df2019.add_suffix('_2019')
psyjobs_df2020 = psyjobs_df2020.add_suffix('_2020')
# Standardize state column
psyjobs_df2018 = psyjobs_df2018.rename(columns={'STATE_2018' : 'State'})
# Standardize state column
psyjobs_df2019 = psyjobs_df2019.rename(columns={'STATE_2019' : 'State'})
# Standardize state column
psyjobs_df2020 = psyjobs_df2020.rename(columns={'STATE_2020' : 'State'})
# Setup postgres variables
pg_user = "postgres"
pg_pwd = db_password
pg_port = "5432"
host = 'project-vu-database-piecharts.c7rvpt2rehpr.us-east-2.rds.amazonaws.com'
# Create connection string
db_string = "postgresql://{username}:{password}@{host}:{port}/project_db".format(username=pg_user, password=pg_pwd, host = host, port=pg_port)
# Connect to the database engine
engine = create_engine(db_string)
# Load psyjobs dataframes to sql server
psyjobs_df2020.to_sql(name='psyjobs2020', if_exists='replace', con=engine, index=False)
psyjobs_df2019.to_sql(name='psyjobs2019', if_exists='replace', con=engine, index=False)
psyjobs_df2018.to_sql(name='psyjobs2018', if_exists='replace', con=engine, index=False)
# Load general psy dataframes to sql server
psy2019_df.to_sql(name='psy2019', if_exists='replace', con=engine, index=False)
psy2020_df.to_sql(name='psy2020', if_exists='replace', con=engine, index=False)
| 0.421314 | 0.174797 |
# Capstone - Pneumonia Detection Challenge - Modeling using Mask RCNN
### Here are the insights from Mask R CNN model:
Results
Import Necessary Packages
```
%tensorflow_version 1.x
import tensorflow
tensorflow.__version__
!pip3 install -q pydicom
!pip3 install -q tqdm
!pip3 install -q imgaug
import os
import sys
import random
import math
import numpy as np
import cv2
import json
import pydicom
from imgaug import augmenters as iaa
from tqdm import tqdm
import glob
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook
from matplotlib.patches import Rectangle
import seaborn as sns
import pydicom as dcm
%matplotlib inline
import cv2
import keras
import tensorflow as tf
import tensorflow.keras
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization, MaxPooling2D, GlobalAveragePooling2D, ZeroPadding2D
from tensorflow.keras.applications import DenseNet201
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from keras.callbacks import ReduceLROnPlateau
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from google.colab import drive
drive.mount('/content/drive')
ROOT_DIR = '/content/drive/MyDrive/RSNA_PneumoniaDetectionChallenge'
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, 'logs')
print(ROOT_DIR)
print(MODEL_DIR)
os.chdir(ROOT_DIR)
os.chdir(ROOT_DIR)
!git clone https://github.com/matterport/Mask_RCNN.git
os.chdir('Mask_RCNN')
!python setup.py -q install
# Import Mask RCNN
sys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN')) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
train_dicom_dir = os.path.join(ROOT_DIR, 'stage_1_train_images')
test_dicom_dir = os.path.join(ROOT_DIR, 'stage_1_test_images')
```
Some setup functions and classes for Mask-RCNN
dicom_fps is a list of the dicom image path and filenames
image_annotions is a dictionary of the annotations keyed by the filenames
parsing the dataset returns a list of the image filenames and the annotations dictionary
```
def get_dicom_fps(dicom_dir):
dicom_fps = glob.glob(dicom_dir+'/'+'*.dcm')
return list(set(dicom_fps))
def parse_dataset(dicom_dir, anns):
image_fps = get_dicom_fps(dicom_dir)
image_annotations = {fp: [] for fp in image_fps}
for index, row in anns.iterrows():
fp = os.path.join(dicom_dir, row['patientId']+'.dcm')
image_annotations[fp].append(row)
return image_fps, image_annotations
# The following parameters have been selected to reduce running time for demonstration purposes
# These are not optimal
class DetectorConfig(Config):
"""Configuration for training pneumonia detection on the RSNA pneumonia dataset.
Overrides values in the base Config class.
"""
# Give the configuration a recognizable name
NAME = 'pneumonia'
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
BACKBONE = 'resnet50'
NUM_CLASSES = 2 # background + 1 pneumonia classes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 64
IMAGE_MAX_DIM = 64
RPN_ANCHOR_SCALES = (32, 64)
TRAIN_ROIS_PER_IMAGE = 16
MAX_GT_INSTANCES = 3
DETECTION_MAX_INSTANCES = 3
DETECTION_MIN_CONFIDENCE = 0.9
DETECTION_NMS_THRESHOLD = 0.1
RPN_TRAIN_ANCHORS_PER_IMAGE = 16
STEPS_PER_EPOCH = 100
TOP_DOWN_PYRAMID_SIZE = 32
STEPS_PER_EPOCH = 100
config = DetectorConfig()
config.display()
class DetectorDataset(utils.Dataset):
"""Dataset class for training pneumonia detection on the RSNA pneumonia dataset.
"""
def __init__(self, image_fps, image_annotations, orig_height, orig_width):
super().__init__(self)
# Add classes
self.add_class('pneumonia', 1, 'Lung Opacity')
# add images
for i, fp in enumerate(image_fps):
annotations = image_annotations[fp]
self.add_image('pneumonia', image_id=i, path=fp,
annotations=annotations, orig_height=orig_height, orig_width=orig_width)
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
def load_image(self, image_id):
info = self.image_info[image_id]
fp = info['path']
ds = pydicom.read_file(fp)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
return image
def load_mask(self, image_id):
info = self.image_info[image_id]
annotations = info['annotations']
count = len(annotations)
if count == 0:
mask = np.zeros((info['orig_height'], info['orig_width'], 1), dtype=np.uint8)
class_ids = np.zeros((1,), dtype=np.int32)
else:
mask = np.zeros((info['orig_height'], info['orig_width'], count), dtype=np.uint8)
class_ids = np.zeros((count,), dtype=np.int32)
for i, a in enumerate(annotations):
if a['Target'] == 1:
x = int(a['x'])
y = int(a['y'])
w = int(a['width'])
h = int(a['height'])
mask_instance = mask[:, :, i].copy()
cv2.rectangle(mask_instance, (x, y), (x+w, y+h), 255, -1)
mask[:, :, i] = mask_instance
class_ids[i] = 1
return mask.astype(np.bool), class_ids.astype(np.int32)
# training dataset
anns = pd.read_csv(os.path.join(ROOT_DIR, 'stage_1_train_labels.csv'))
anns.head(10)
image_fps, image_annotations = parse_dataset(train_dicom_dir, anns=anns)
print(len(image_fps))
image_fps[0]
print(len(image_annotations))
val = image_annotations.items()
value_iterator = iter(val)
print(next(value_iterator))
ds = pydicom.read_file(image_fps[0]) # read dicom image from filepath
image = ds.pixel_array # get image array
# show dicom fields
ds
# Original DICOM image size: 1024 x 1024
ORIG_SIZE = 1024
```
Split the data into training and validation datasets
```
######################################################################
# Modify this line to use more or fewer images for training/validation.
# To use all images, do: image_fps_list = list(image_fps)
image_fps_list = list(image_fps)
#####################################################################
# split dataset into training vs. validation dataset
# split ratio is set to 0.9 vs. 0.1 (train vs. validation, respectively)
sorted(image_fps_list)
random.seed(42)
random.shuffle(image_fps_list)
validation_split = 0.1
split_index = int((1 - validation_split) * len(image_fps_list))
image_fps_train = image_fps_list[:split_index]
image_fps_val = image_fps_list[split_index:]
print(len(image_fps_train), len(image_fps_val))
```
Create and prepare the training dataset using the DetectorDataset class.
```
# prepare the training dataset
dataset_train = DetectorDataset(image_fps_train, image_annotations, ORIG_SIZE, ORIG_SIZE)
dataset_train.prepare()
dataset_train
# prepare the validation dataset
dataset_val = DetectorDataset(image_fps_val, image_annotations, ORIG_SIZE, ORIG_SIZE)
dataset_val.prepare()
```
Display a random image with bounding boxes
```
# Load and display random samples and their bounding boxes
# Suggestion: Run this a few times to see different examples.
image_id = random.choice(dataset_train.image_ids)
image_fp = dataset_train.image_reference(image_id)
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
print(image.shape)
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
plt.imshow(image[:, :, 0], cmap='gray')
plt.axis('off')
plt.subplot(1, 2, 2)
masked = np.zeros(image.shape[:2])
for i in range(mask.shape[2]):
masked += image[:, :, 0] * mask[:, :, i]
plt.imshow(masked, cmap='gray')
plt.axis('off')
print(image_fp)
print(class_ids)
model = modellib.MaskRCNN(mode='training', config=config, model_dir=MODEL_DIR)
```
Image Augmentation. Try finetuning some variables to custom values
```
# Image augmentation
augmentation = iaa.SomeOf((0, 1), [
iaa.Fliplr(0.5),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-25, 25),
shear=(-8, 8)
),
iaa.Multiply((0.9, 1.1))
])
```
Training the model
dataset_train and dataset_val are derived from DetectorDataset
DetectorDataset loads images from image filenames and masks from the annotation data
model is Mask-RCNN
```
# Changes done in Model.py
# Change tensor_metrics to metrics (line 2199)
#!python setup.py -q install
NUM_EPOCHS = 5
# Train Mask-RCNN Model
import warnings
warnings.filterwarnings("ignore")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=NUM_EPOCHS,
layers='all'
#augmentation=augmentation
)
# select trained model
dir_names = next(os.walk(model.model_dir))[1]
key = config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
fps = []
# Pick last directory
for d in dir_names:
dir_name = os.path.join(model.model_dir, d)
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
print('No weight files in {}'.format(dir_name))
else:
checkpoint = os.path.join(dir_name, checkpoints[-1])
fps.append(checkpoint)
model_path = sorted(fps)[-1]
print('Found model {}'.format(model_path))
class InferenceConfig(DetectorConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode='inference',
config=inference_config,
model_dir=MODEL_DIR)
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# set color for class
def get_colors_for_class_ids(class_ids):
colors = []
for class_id in class_ids:
if class_id == 1:
colors.append((.100, .904, .204))
return colors
```
### Compare predicted box to the expected value
```
# Show few example of ground truth vs. predictions on the validation dataset
dataset = dataset_val
fig = plt.figure(figsize=(10, 30))
for i in range(4):
image_id = random.choice(dataset.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
plt.subplot(6, 2, 2*i + 1)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset.class_names,
colors=get_colors_for_class_ids(gt_class_id), ax=fig.axes[-1])
plt.subplot(6, 2, 2*i + 2)
results = model.detect([original_image]) #, verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
colors=get_colors_for_class_ids(r['class_ids']), ax=fig.axes[-1])
```
### Predictions on the Validation Set
```
# Make predictions on test images, write out sample submission
def predict(image_fps, filepath='output_94pc.csv', min_conf=0.94):
# assume square image
with open(filepath, 'w') as file:
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
results = model.detect([image])
r = results[0]
#print(r)
out_str = ""
out_str += patient_id
assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )
flags = 0
if len(r['rois']) == 0:
out_str += ",Not,0"
else:
num_instances = len(r['rois'])
out_str += ','
for i in range(num_instances):
if r['scores'][i] > min_conf:
flags = 1
out_str += str(round(r['scores'][i], 2))
out_str += ' '
# x1, y1, width, height
x1 = r['rois'][i][1]
y1 = r['rois'][i][0]
width = r['rois'][i][3] - x1
height = r['rois'][i][2] - y1
bboxes_str = "{} {} {} {}".format(x1, y1, \
width, height)
out_str += bboxes_str
if flags == 0:
out_str += "Not,0"
else:
out_str += ",1"
#print(out_str)
file.write(out_str+"\n")
# Predict on the validation set
output_file = 'output_94pc.csv'
predict(image_fps_val, filepath=output_file)
output = pd.read_csv(output_file, names = ['patientId', 'PredString', 'Prediction'])
print(output.shape)
print(output.dtypes)
output.head()
trains = pd.read_csv('train_labels.csv')
dataval = pd.DataFrame(trains.loc[trains.patientId.isin(output.patientId), ['patientId', 'Target']])
print(dataval.shape, dataval.dtypes)
dataval.head()
dataval.drop_duplicates(keep='first',inplace=True)
print(dataval.shape)
dataval.to_csv('dataval.csv')
dataval.head()
df_merge_col = pd.merge(dataval, output, on='patientId')
df_merge_col.shape
df_merge_col.head()
df_merge.to_csv('df_merge_col.csv')
print(df_merge_col.Prediction.value_counts())
print(df_merge_col.Target.value_counts())
trueVal = []
trueVal = df_merge_col.Target.astype('int')
print(len(trueVal))
predVal = []
predVal = df_merge_col.Prediction.astype('int')
print(len(predVal))
cm_maskrcnn = confusion_matrix(y_true=trueVal, y_pred=predVal)
print(cm_maskrcnn)
print(classification_report(trueVal, predVal))
print("Accuracy using Mask RCNN on Validation set: ", round(accuracy_score(trueVal, predVal)*100,2))
```
### Predictions on the Train Set
```
# Make predictions on test images, write out sample submission
def predict(image_fps, filepath='output.csv', min_conf=0.80):
# assume square image
with open(filepath, 'w') as file:
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
results = model.detect([image])
r = results[0]
#print(r)
out_str = ""
out_str += patient_id
assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )
flags = 0
if len(r['rois']) == 0:
out_str += ",Not,0"
else:
num_instances = len(r['rois'])
out_str += ','
for i in range(num_instances):
if r['scores'][i] > min_conf:
flags = 1
out_str += str(round(r['scores'][i], 2))
out_str += ' '
# x1, y1, width, height
x1 = r['rois'][i][1]
y1 = r['rois'][i][0]
width = r['rois'][i][3] - x1
height = r['rois'][i][2] - y1
bboxes_str = "{} {} {} {}".format(x1, y1, \
width, height)
out_str += bboxes_str
if flags == 0:
out_str += "Not,0"
else:
out_str += ",1"
#print(out_str)
file.write(out_str+"\n")
# Predict on the validation set
output_file = 'output_train.csv'
predict(image_fps_train[:5000], filepath=output_file)
output_train = pd.read_csv(output_file, names = ['patientId', 'PredString', 'Prediction'])
print(output_train.shape)
print(output_train.dtypes)
output_train.head(12)
trains = pd.read_csv('train_labels.csv')
trains.head()
dataval_train = pd.DataFrame(trains.loc[trains.patientId.isin(output_train.patientId), ['patientId', 'Target']])
dataval_train.head()
dataval_train.shape
keras.metrics.accuracy()
dataval_train = dataval_train.astype('str')
dataval_train.head()
dataval_train.drop_duplicates(keep=False,inplace=True)
print(dataval_train.shape)
dataval_train.head()
df_merge_col_train = pd.merge(output_train, dataval_train, on='patientId')
print(df_merge_col_train.shape)
df_merge_col_train.head()
df_merge_col_train.Prediction.value_counts()
df_merge_col_train.Target.value_counts()
trueVal_train = []
trueVal_train = df_merge_col_train.Target.astype('int')
len(trueVal_train)
predVal_train = []
predVal_train = df_merge_col_train.Prediction.astype('int')
len(predVal_train)
cm_maskrcnn_train = confusion_matrix(y_true=trueVal_train, y_pred=predVal_train)
print(cm_maskrcnn_train)
print(classification_report(trueVal_train, predVal_train))
print("Accuracy using Mask RCNN on Train set: ", round(accuracy_score(trueVal_train, predVal_train)*100,2))
```
Creating Submission file and finding the score
```
# Get filenames of test dataset DICOM images
test_image_fps = get_dicom_fps(test_dicom_dir)
# Make predictions on test images, write out sample submission
def predict(image_fps, filepath='PneumoniaDetection_Submission.csv', min_conf=0.94):
# assume square image
with open(filepath, 'w') as file:
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
results = model.detect([image])
r = results[0]
out_str = ""
out_str += patient_id
assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )
if len(r['rois']) == 0:
pass
else:
num_instances = len(r['rois'])
out_str += ","
for i in range(num_instances):
if r['scores'][i] > min_conf:
out_str += ' '
out_str += str(round(r['scores'][i], 2))
out_str += ' '
# x1, y1, width, height
x1 = r['rois'][i][1]
y1 = r['rois'][i][0]
width = r['rois'][i][3] - x1
height = r['rois'][i][2] - y1
bboxes_str = "{} {} {} {}".format(x1, y1, \
width, height)
out_str += bboxes_str
file.write(out_str+"\n")
# predict only the first 3000 entries
sample_submission_fp = 'PneumoniaDetection_Submission_.94pc.csv'
predict(test_image_fps, filepath=sample_submission_fp)
```
Score on Kaggle based on this submissions file: 0.03125
```
```
|
github_jupyter
|
%tensorflow_version 1.x
import tensorflow
tensorflow.__version__
!pip3 install -q pydicom
!pip3 install -q tqdm
!pip3 install -q imgaug
import os
import sys
import random
import math
import numpy as np
import cv2
import json
import pydicom
from imgaug import augmenters as iaa
from tqdm import tqdm
import glob
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook
from matplotlib.patches import Rectangle
import seaborn as sns
import pydicom as dcm
%matplotlib inline
import cv2
import keras
import tensorflow as tf
import tensorflow.keras
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D , MaxPool2D , Flatten , Dropout , BatchNormalization, MaxPooling2D, GlobalAveragePooling2D, ZeroPadding2D
from tensorflow.keras.applications import DenseNet201
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from keras.callbacks import ReduceLROnPlateau
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.model_selection import cross_val_score
from google.colab import drive
drive.mount('/content/drive')
ROOT_DIR = '/content/drive/MyDrive/RSNA_PneumoniaDetectionChallenge'
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, 'logs')
print(ROOT_DIR)
print(MODEL_DIR)
os.chdir(ROOT_DIR)
os.chdir(ROOT_DIR)
!git clone https://github.com/matterport/Mask_RCNN.git
os.chdir('Mask_RCNN')
!python setup.py -q install
# Import Mask RCNN
sys.path.append(os.path.join(ROOT_DIR, 'Mask_RCNN')) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
train_dicom_dir = os.path.join(ROOT_DIR, 'stage_1_train_images')
test_dicom_dir = os.path.join(ROOT_DIR, 'stage_1_test_images')
def get_dicom_fps(dicom_dir):
dicom_fps = glob.glob(dicom_dir+'/'+'*.dcm')
return list(set(dicom_fps))
def parse_dataset(dicom_dir, anns):
image_fps = get_dicom_fps(dicom_dir)
image_annotations = {fp: [] for fp in image_fps}
for index, row in anns.iterrows():
fp = os.path.join(dicom_dir, row['patientId']+'.dcm')
image_annotations[fp].append(row)
return image_fps, image_annotations
# The following parameters have been selected to reduce running time for demonstration purposes
# These are not optimal
class DetectorConfig(Config):
"""Configuration for training pneumonia detection on the RSNA pneumonia dataset.
Overrides values in the base Config class.
"""
# Give the configuration a recognizable name
NAME = 'pneumonia'
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 8
BACKBONE = 'resnet50'
NUM_CLASSES = 2 # background + 1 pneumonia classes
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = 64
IMAGE_MAX_DIM = 64
RPN_ANCHOR_SCALES = (32, 64)
TRAIN_ROIS_PER_IMAGE = 16
MAX_GT_INSTANCES = 3
DETECTION_MAX_INSTANCES = 3
DETECTION_MIN_CONFIDENCE = 0.9
DETECTION_NMS_THRESHOLD = 0.1
RPN_TRAIN_ANCHORS_PER_IMAGE = 16
STEPS_PER_EPOCH = 100
TOP_DOWN_PYRAMID_SIZE = 32
STEPS_PER_EPOCH = 100
config = DetectorConfig()
config.display()
class DetectorDataset(utils.Dataset):
"""Dataset class for training pneumonia detection on the RSNA pneumonia dataset.
"""
def __init__(self, image_fps, image_annotations, orig_height, orig_width):
super().__init__(self)
# Add classes
self.add_class('pneumonia', 1, 'Lung Opacity')
# add images
for i, fp in enumerate(image_fps):
annotations = image_annotations[fp]
self.add_image('pneumonia', image_id=i, path=fp,
annotations=annotations, orig_height=orig_height, orig_width=orig_width)
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
def load_image(self, image_id):
info = self.image_info[image_id]
fp = info['path']
ds = pydicom.read_file(fp)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
return image
def load_mask(self, image_id):
info = self.image_info[image_id]
annotations = info['annotations']
count = len(annotations)
if count == 0:
mask = np.zeros((info['orig_height'], info['orig_width'], 1), dtype=np.uint8)
class_ids = np.zeros((1,), dtype=np.int32)
else:
mask = np.zeros((info['orig_height'], info['orig_width'], count), dtype=np.uint8)
class_ids = np.zeros((count,), dtype=np.int32)
for i, a in enumerate(annotations):
if a['Target'] == 1:
x = int(a['x'])
y = int(a['y'])
w = int(a['width'])
h = int(a['height'])
mask_instance = mask[:, :, i].copy()
cv2.rectangle(mask_instance, (x, y), (x+w, y+h), 255, -1)
mask[:, :, i] = mask_instance
class_ids[i] = 1
return mask.astype(np.bool), class_ids.astype(np.int32)
# training dataset
anns = pd.read_csv(os.path.join(ROOT_DIR, 'stage_1_train_labels.csv'))
anns.head(10)
image_fps, image_annotations = parse_dataset(train_dicom_dir, anns=anns)
print(len(image_fps))
image_fps[0]
print(len(image_annotations))
val = image_annotations.items()
value_iterator = iter(val)
print(next(value_iterator))
ds = pydicom.read_file(image_fps[0]) # read dicom image from filepath
image = ds.pixel_array # get image array
# show dicom fields
ds
# Original DICOM image size: 1024 x 1024
ORIG_SIZE = 1024
######################################################################
# Modify this line to use more or fewer images for training/validation.
# To use all images, do: image_fps_list = list(image_fps)
image_fps_list = list(image_fps)
#####################################################################
# split dataset into training vs. validation dataset
# split ratio is set to 0.9 vs. 0.1 (train vs. validation, respectively)
sorted(image_fps_list)
random.seed(42)
random.shuffle(image_fps_list)
validation_split = 0.1
split_index = int((1 - validation_split) * len(image_fps_list))
image_fps_train = image_fps_list[:split_index]
image_fps_val = image_fps_list[split_index:]
print(len(image_fps_train), len(image_fps_val))
# prepare the training dataset
dataset_train = DetectorDataset(image_fps_train, image_annotations, ORIG_SIZE, ORIG_SIZE)
dataset_train.prepare()
dataset_train
# prepare the validation dataset
dataset_val = DetectorDataset(image_fps_val, image_annotations, ORIG_SIZE, ORIG_SIZE)
dataset_val.prepare()
# Load and display random samples and their bounding boxes
# Suggestion: Run this a few times to see different examples.
image_id = random.choice(dataset_train.image_ids)
image_fp = dataset_train.image_reference(image_id)
image = dataset_train.load_image(image_id)
mask, class_ids = dataset_train.load_mask(image_id)
print(image.shape)
plt.figure(figsize=(10, 10))
plt.subplot(1, 2, 1)
plt.imshow(image[:, :, 0], cmap='gray')
plt.axis('off')
plt.subplot(1, 2, 2)
masked = np.zeros(image.shape[:2])
for i in range(mask.shape[2]):
masked += image[:, :, 0] * mask[:, :, i]
plt.imshow(masked, cmap='gray')
plt.axis('off')
print(image_fp)
print(class_ids)
model = modellib.MaskRCNN(mode='training', config=config, model_dir=MODEL_DIR)
# Image augmentation
augmentation = iaa.SomeOf((0, 1), [
iaa.Fliplr(0.5),
iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
rotate=(-25, 25),
shear=(-8, 8)
),
iaa.Multiply((0.9, 1.1))
])
# Changes done in Model.py
# Change tensor_metrics to metrics (line 2199)
#!python setup.py -q install
NUM_EPOCHS = 5
# Train Mask-RCNN Model
import warnings
warnings.filterwarnings("ignore")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=NUM_EPOCHS,
layers='all'
#augmentation=augmentation
)
# select trained model
dir_names = next(os.walk(model.model_dir))[1]
key = config.NAME.lower()
dir_names = filter(lambda f: f.startswith(key), dir_names)
dir_names = sorted(dir_names)
if not dir_names:
import errno
raise FileNotFoundError(
errno.ENOENT,
"Could not find model directory under {}".format(self.model_dir))
fps = []
# Pick last directory
for d in dir_names:
dir_name = os.path.join(model.model_dir, d)
# Find the last checkpoint
checkpoints = next(os.walk(dir_name))[2]
checkpoints = filter(lambda f: f.startswith("mask_rcnn"), checkpoints)
checkpoints = sorted(checkpoints)
if not checkpoints:
print('No weight files in {}'.format(dir_name))
else:
checkpoint = os.path.join(dir_name, checkpoints[-1])
fps.append(checkpoint)
model_path = sorted(fps)[-1]
print('Found model {}'.format(model_path))
class InferenceConfig(DetectorConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode='inference',
config=inference_config,
model_dir=MODEL_DIR)
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# set color for class
def get_colors_for_class_ids(class_ids):
colors = []
for class_id in class_ids:
if class_id == 1:
colors.append((.100, .904, .204))
return colors
# Show few example of ground truth vs. predictions on the validation dataset
dataset = dataset_val
fig = plt.figure(figsize=(10, 30))
for i in range(4):
image_id = random.choice(dataset.image_ids)
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
plt.subplot(6, 2, 2*i + 1)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset.class_names,
colors=get_colors_for_class_ids(gt_class_id), ax=fig.axes[-1])
plt.subplot(6, 2, 2*i + 2)
results = model.detect([original_image]) #, verbose=1)
r = results[0]
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'],
colors=get_colors_for_class_ids(r['class_ids']), ax=fig.axes[-1])
# Make predictions on test images, write out sample submission
def predict(image_fps, filepath='output_94pc.csv', min_conf=0.94):
# assume square image
with open(filepath, 'w') as file:
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
results = model.detect([image])
r = results[0]
#print(r)
out_str = ""
out_str += patient_id
assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )
flags = 0
if len(r['rois']) == 0:
out_str += ",Not,0"
else:
num_instances = len(r['rois'])
out_str += ','
for i in range(num_instances):
if r['scores'][i] > min_conf:
flags = 1
out_str += str(round(r['scores'][i], 2))
out_str += ' '
# x1, y1, width, height
x1 = r['rois'][i][1]
y1 = r['rois'][i][0]
width = r['rois'][i][3] - x1
height = r['rois'][i][2] - y1
bboxes_str = "{} {} {} {}".format(x1, y1, \
width, height)
out_str += bboxes_str
if flags == 0:
out_str += "Not,0"
else:
out_str += ",1"
#print(out_str)
file.write(out_str+"\n")
# Predict on the validation set
output_file = 'output_94pc.csv'
predict(image_fps_val, filepath=output_file)
output = pd.read_csv(output_file, names = ['patientId', 'PredString', 'Prediction'])
print(output.shape)
print(output.dtypes)
output.head()
trains = pd.read_csv('train_labels.csv')
dataval = pd.DataFrame(trains.loc[trains.patientId.isin(output.patientId), ['patientId', 'Target']])
print(dataval.shape, dataval.dtypes)
dataval.head()
dataval.drop_duplicates(keep='first',inplace=True)
print(dataval.shape)
dataval.to_csv('dataval.csv')
dataval.head()
df_merge_col = pd.merge(dataval, output, on='patientId')
df_merge_col.shape
df_merge_col.head()
df_merge.to_csv('df_merge_col.csv')
print(df_merge_col.Prediction.value_counts())
print(df_merge_col.Target.value_counts())
trueVal = []
trueVal = df_merge_col.Target.astype('int')
print(len(trueVal))
predVal = []
predVal = df_merge_col.Prediction.astype('int')
print(len(predVal))
cm_maskrcnn = confusion_matrix(y_true=trueVal, y_pred=predVal)
print(cm_maskrcnn)
print(classification_report(trueVal, predVal))
print("Accuracy using Mask RCNN on Validation set: ", round(accuracy_score(trueVal, predVal)*100,2))
# Make predictions on test images, write out sample submission
def predict(image_fps, filepath='output.csv', min_conf=0.80):
# assume square image
with open(filepath, 'w') as file:
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
results = model.detect([image])
r = results[0]
#print(r)
out_str = ""
out_str += patient_id
assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )
flags = 0
if len(r['rois']) == 0:
out_str += ",Not,0"
else:
num_instances = len(r['rois'])
out_str += ','
for i in range(num_instances):
if r['scores'][i] > min_conf:
flags = 1
out_str += str(round(r['scores'][i], 2))
out_str += ' '
# x1, y1, width, height
x1 = r['rois'][i][1]
y1 = r['rois'][i][0]
width = r['rois'][i][3] - x1
height = r['rois'][i][2] - y1
bboxes_str = "{} {} {} {}".format(x1, y1, \
width, height)
out_str += bboxes_str
if flags == 0:
out_str += "Not,0"
else:
out_str += ",1"
#print(out_str)
file.write(out_str+"\n")
# Predict on the validation set
output_file = 'output_train.csv'
predict(image_fps_train[:5000], filepath=output_file)
output_train = pd.read_csv(output_file, names = ['patientId', 'PredString', 'Prediction'])
print(output_train.shape)
print(output_train.dtypes)
output_train.head(12)
trains = pd.read_csv('train_labels.csv')
trains.head()
dataval_train = pd.DataFrame(trains.loc[trains.patientId.isin(output_train.patientId), ['patientId', 'Target']])
dataval_train.head()
dataval_train.shape
keras.metrics.accuracy()
dataval_train = dataval_train.astype('str')
dataval_train.head()
dataval_train.drop_duplicates(keep=False,inplace=True)
print(dataval_train.shape)
dataval_train.head()
df_merge_col_train = pd.merge(output_train, dataval_train, on='patientId')
print(df_merge_col_train.shape)
df_merge_col_train.head()
df_merge_col_train.Prediction.value_counts()
df_merge_col_train.Target.value_counts()
trueVal_train = []
trueVal_train = df_merge_col_train.Target.astype('int')
len(trueVal_train)
predVal_train = []
predVal_train = df_merge_col_train.Prediction.astype('int')
len(predVal_train)
cm_maskrcnn_train = confusion_matrix(y_true=trueVal_train, y_pred=predVal_train)
print(cm_maskrcnn_train)
print(classification_report(trueVal_train, predVal_train))
print("Accuracy using Mask RCNN on Train set: ", round(accuracy_score(trueVal_train, predVal_train)*100,2))
# Get filenames of test dataset DICOM images
test_image_fps = get_dicom_fps(test_dicom_dir)
# Make predictions on test images, write out sample submission
def predict(image_fps, filepath='PneumoniaDetection_Submission.csv', min_conf=0.94):
# assume square image
with open(filepath, 'w') as file:
for image_id in tqdm(image_fps):
ds = pydicom.read_file(image_id)
image = ds.pixel_array
# If grayscale. Convert to RGB for consistency.
if len(image.shape) != 3 or image.shape[2] != 3:
image = np.stack((image,) * 3, -1)
patient_id = os.path.splitext(os.path.basename(image_id))[0]
results = model.detect([image])
r = results[0]
out_str = ""
out_str += patient_id
assert( len(r['rois']) == len(r['class_ids']) == len(r['scores']) )
if len(r['rois']) == 0:
pass
else:
num_instances = len(r['rois'])
out_str += ","
for i in range(num_instances):
if r['scores'][i] > min_conf:
out_str += ' '
out_str += str(round(r['scores'][i], 2))
out_str += ' '
# x1, y1, width, height
x1 = r['rois'][i][1]
y1 = r['rois'][i][0]
width = r['rois'][i][3] - x1
height = r['rois'][i][2] - y1
bboxes_str = "{} {} {} {}".format(x1, y1, \
width, height)
out_str += bboxes_str
file.write(out_str+"\n")
# predict only the first 3000 entries
sample_submission_fp = 'PneumoniaDetection_Submission_.94pc.csv'
predict(test_image_fps, filepath=sample_submission_fp)
| 0.483892 | 0.547404 |
# Handling infeasible models with Docplex
This tutorial includes everything you need to set up Decision Optimization engines, build a mathematical programming model, then use the progress listeners to monitor progress, capture intermediate solutions and stop the solve on your own criteria.
When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.
>This notebook is part of **[Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)**
>
>It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Cloud Pak for Data as a Service](https://www.ibm.com/products/cloud-pak-for-data/as-a-service/) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)
and you can start using `IBM Cloud Pak for Data as a Service` right away).
>
> CPLEX is available on <i>IBM Cloud Pack for Data</i> and <i>IBM Cloud Pak for Data as a Service</i>:
> - <i>IBM Cloud Pak for Data as a Service</i>: Depends on the runtime used:
> - <i>Python 3.x</i> runtime: Community edition
> - <i>Python 3.x + DO</i> runtime: full edition
> - <i>Cloud Pack for Data</i>: Community edition is installed by default. Please install `DO` addon in `Watson Studio Premium` for the full edition
Table of contents:
* [Use decision optimization](#Use-decision-optimization)
* [Step 1: Set up a basic infeasible model](#Step-1:-Set-up-the-prescriptive-model)
* [Step 2: Monitoring CPLEX progress](#Step-2:-Monitoring-CPLEX-progress)
* [Step 3: Aborting the search with a custom progress listener](#Step-3:-Aborting-the-search-with-a-custom-progress-listener)
* [Variant: using matplotlib to plot a chart of gap vs. time](#Variant:-using-matplotlib-to-plot-a-chart-of-gap-vs.-time)
* [Summary](#Summary)
****
## How Decision Optimization can help
* Prescriptive analytics (Decision Optimization) technology recommends actions that are based on desired outcomes. It takes into account specific scenarios, resources, and knowledge of past and current events. With this insight, your organization can make better decisions and have greater control of business outcomes.
* Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
* Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
<br/>
<u>With prescriptive analytics, you can:</u>
* Automate the complex decisions and trade-offs to better manage your limited resources.
* Take advantage of a future opportunity or mitigate a future risk.
* Proactively update recommendations based on changing events.
* Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
```
from docplex.mp.environment import Environment
Environment().print_information()
```
## Example 1: handling a cyclic infeasible model.
You start with a very simple infeasible model: you have three variables, each of which is greater than the previous one, in a cyclic fashion. Of course this leads to an infeasible model
```
from docplex.mp.model import Model
def build_infeasible_cyclic_model3():
m = Model(name='cyclic3')
x,y,z = m.continuous_var_list(keys=['x', 'y', 'z'], name=str)
m.add( y >= x+1, name="y_gt_x")
m.add( z >= y+1, name="z_gt_y")
m.add( x >= z+1, name="x_gt_z")
# add another constraint, should noever appear in conflicts
m.add(x + y + z <= 33)
return m
cycle3 = build_infeasible_cyclic_model3()
cycle3.print_information()
```
As expected, the model is infeasible.
```
s = cycle3.solve(log_output=True)
assert s is None
print("the model is infeasible")
```
### Using the conflict refiner on the infeasible cyclic model
First, you can use the Conflict refiner on this model. The conflict refiner computes a minimal cluster of constraints, which causes the infeasibility.
Using the conflict refiner requires the following steps:
- instantiate a `ConflictRefiner` instance
- call `refine_conflict' on the model.
The output is an object of type `ConflictRefinerResults` which holds all information about the minimal conflict.
Displaying this result object lists all modeling objects which belong to the minimal conflict.
```
from docplex.mp.conflict_refiner import ConflictRefiner
cr = ConflictRefiner()
crr = cr.refine_conflict(cycle3, display=True)
```
### Using the constraint relaxer on the infeasible cyclic model
Another way to handle infeasibilities is to use the _relaxer_ (class `docplex.mp.relaxer.Relaxer`). The relaxer tries to find a _minimal_ feasible relaxation of the model, by relaxing certain constraints.
For example, a constraint `x == 1` can be relaxed with a slack of 1 to accept a value of 2 for x.
the relaxer tries to minimize the total value of slack in finding a feasible relaxation.
```
from docplex.mp.relaxer import Relaxer
rx = Relaxer()
rs = rx.relax(cycle3)
rx.print_information()
rs.display()
```
The relaxer has relaxed one constraint ( x>= z+1) by 3, and found a solution wiht x=0, x=1, z=2, breaking the cyclic chain of constraints.
Unlike the conflict refiner, the relaxer provides a _relaxed_ solution to the initial model, with minimal slack. But there's more: the relaxer can also search for the best business objective, once it has found the minimal slack.
To illustrate, add an objective to your model, try to minimize z, and see what happens:
```
# retrieve the z variable using Model.get_var_by_name()
z = cycle3.get_var_by_name('z')
assert z
cycle3.minimize(z)
rs = rx.relax(cycle3)
rx.print_information()
rs.display()
```
Th relaxed solution has changed, finding a minimum objective of 0 for z, and the relaxations have also changed: now two constraints are relaxed, but the total absolute slack remains unchanged, equal to 3.
To summarize, the relaxer finds a relaxed solution in two steps:
- first it finds the minimal amount of slack that is necessary to find a feasible solution
- second, with this minimal slack value it searches for the best objective value, if any.
In this case, the minimal conflict contains the three cyclic constraints, but not the fourth (x+y+z <= 30).
## Example 2: Handling an infeasible production problem
The model aims at minimizing the production cost for a number of products
while satisfying customer demand.
Production is constrained by the company's resources.
The model first declares the products and the resources.
The data consists of the description of the products (the demand, the inside
and outside costs, and the resource consumption) and the capacity of the
various resources.
The variables for this problem are the quantities produced for each products.
Of course, this model is naive, un-realistic and not robust, as it fails as soon as demand is not satisfied. But this is excatly why it is well adapted to show you how to repair infeasible models.
First define the data:
```
# costs are stored in a dict from product names to cost
COSTS = {"kluski": 0.6, "capellini": 0.8, "fettucine": 0.3}
# demands are stored in a dict from product names to demands
DEMANDS = {"kluski": 10, "capellini": 20, "fettucine": 30}
# resources are stored as a dict of resource name to resource capacity
RESOURCES = {"flour": 40, "eggs": 60}
CONSUMPTIONS = {("kluski", "flour"): 0.5,
("kluski", "eggs"): 0.2,
("capellini", "flour"): 0.4,
("capellini", "eggs"): 0.4,
("fettucine", "flour"): 0.3,
("fettucine", "eggs"): 0.6}
from docplex.mp.model import Model
import six
def build_production_problem(costs, resources, consumptions, demands, **kwargs):
products = [p for p, _ in six.iteritems(costs)]
mdl = Model(name='pasta_production', **kwargs)
# --- decision variables ---
mdl.q_vars = mdl.continuous_var_dict(products, name="q")
# --- constraints ---
# demand satisfaction
mdl.add_constraints((mdl.q_vars[p] >= demands[p], 'ct_demand_%s' % p) for p in products)
# --- resource capacity ---
mdl.add_constraints((mdl.sum(mdl.q_vars[p] * consumptions[p, res] for p in products) <= cap,
'ct_res_%s' % res) for res, cap in six.iteritems(resources))
# --- objective ---
mdl.minimize(mdl.dotf(mdl.q_vars, lambda p: costs[p]))
return mdl
pasta1 = build_production_problem(COSTS, RESOURCES, CONSUMPTIONS, DEMANDS)
pasta1.print_information()
```
This default model is feasible, solve the model
```
s1 = pasta1.solve()
s1.display()
demands2 = {p: 2*d for p, d in six.iteritems(DEMANDS)}
pasta2 = build_production_problem(COSTS, RESOURCES, CONSUMPTIONS, demands2)
s2 = pasta2.solve()
if s2 is None:
print("!! Pasta production with double demand is impossible")
```
Now double the demand.
### Using the conflict refiner on the production problem
Start by running the conflict refiner on the second production model.
```
from docplex.mp.conflict_refiner import ConflictRefiner
crr = ConflictRefiner().refine_conflict(pasta2, display=True)
crr.display()
```
Not surprisingly, you can see that the conflict involves all three demands but also the flour resource constraint, but we have no idea which quantity demands cannot be satisfied.
### Using the default relaxer on the production problem
The purpose of the relaxer is to allow relaxation of some constraints by a minimal amount to provide both a _relaxed solution_ and also a measure of how the constraints were infeasible.
```
from docplex.mp.relaxer import Relaxer
# create an instance of relaxer
rx = Relaxer()
rs = rx.relax(pasta2)
rx.print_information()
rs.display()
```
The relaxer managed to satisfy all demands by _relaxing_ the flour constraint by an amount of 4. What does this mean?
To explain, first remember what this flour constraint was all about. You can use the `Model.get_constraint_by_name` method to retrieve a constraint from its name.
```
# get back the constraint from its name
ctf = pasta2.get_constraint_by_name("ct_res_flour")
assert ctf is not None
print(str(ctf))
```
Now you can see what the _left hand side_ evaluates in the relaxed solution
```
ctf.lhs.solution_value
```
This explains the relaxation of 4 for the flour resource constraint
### Managing constraint priorities
It might well happen that the relaxation found by the relaxer does not make sense in real world. For example, in our production example, resource constraints can be impossible to relax, but demands could be.
This is where priorities enter the game. By setting priorities, users can control how the relaxer chooses constraints to relax. In the following code, you can set a HIGH priority to resource constraints (you could even make them mandatory) and a LOW priority to demand constraints.
```
from docplex.mp.basic import Priority
for ctr in pasta2.find_matching_linear_constraints("ct_res"):
ctr.priority = Priority.HIGH
for ctd in pasta2.find_matching_linear_constraints("ct_dem"):
ctd.priority = Priority.LOW
rx2 = Relaxer()
rs2 = rx2.relax(pasta2)
rx2.print_information()
rs2.display()
```
In this new relaxed solution, all resource constraints are satisfied, but one demand is not: kluski demand has an unfilled quantity of 8.
Setting constraint priorities explicitly is the most basic way to control relaxation, but there are others. A _function_ can be used: the relaxer will call the function for each constraint to determine its priority. Possible values are:
- relaxable priorities: VERY_LOW, LOW, MEDIUM, HIGH, VERY_HIGH
- non-relaxable priority: MANDATORY
Constraints with higher priority are less likely to be relaxed than constraints with lower priorities. Still, relaxation of a high-priority constraint cannot be ruled out, if it is the only way to provide a relaxed solution.
### Managing priorities with functions
In this section, you can see how to use a function to compute the priority of a constraint. The function must take a constraint and return a priority (an enumerated type, see `docplex.mp.basic.Priority`
First, you reset all priorities to None (the default).
```
# reset all priorities
for c in pasta2.iter_constraints():
c.priority = None
# define the constraint -> priority function
def map_priority(ct):
ctname = ct.name
if not ctname:
return Priority.MANDATORY
elif "ct_res" in ctname:
return Priority.HIGH
elif "ct_dem" in ctname:
return Priority.LOW
else:
# will not be relaxed
return Priority.MANDATORY
# create a new instance of Relaxer with this function.
rx3 = Relaxer(prioritizer=map_priority)
# use it to relax pasta2 model
rx3.relax(pasta2)
# display relaxation.
rx3.print_information()
```
As expected, you get the same result as with the explicit priorities: an unsatisfied demand of 8 for kluski.
Note that relaxer can also accept a _dictionary_ of constraints to priorities.
### The default relaxer revisited
Now that you know about setting priorities, you can understand the default behavior of the `relaxer` class: for each constraint, you use either its explicit priority (if set) or the default`MEDIUM` priority.
If no priority has been set, all constraints are considered equally relaxable.
## Example 3: Handling variable bounds in an infeasible model
### Variable bounds in conflict refiner
The conflict refiner takes into account variable bounds. This is illustrated with a very simple model, with three integer variables with lower bound 1, the sum of which should be less than 2.
```
m4 = Model(name='m4')
ijs = m4.integer_var_list(keys=["i", "j", "k", "l"], name =str, lb = 1)
m4.add(m4.sum(ijs) <= 2)
s4 = m4.solve()
assert s4 is None
ConflictRefiner().refine_conflict(m4, display=True);
```
The resulting conflict contains the sum constraint _and_ the three lower bounds.
### Variable bounds in relaxer
The relaxer only relaxes _constraints_ , so in this case it relaxes the sum constraint.
```
r4 = Relaxer()
r4.relax(m4)
r4.print_information()
```
Changing variable lower bounds to constraints allows the relaxer to take them into account.
You can set a *LOW* priority to lower bounds, so we expect the default relaxer to relax them before the sum constraint, which will be considered with the default MEDIUM priority.
```
for v in m4.iter_variables():
v.lb = 0
clb = m4.add(v >= 1, "{0}_ge_1".format(v.name))
clb.priority = Priority.LOW
r4 = Relaxer()
r4.relax(m4)
r4.print_information()
```
As expected, the relaxer has relaxed two lower bound constraints, but not the sum constraint.
## Summary
You have learned how to use both the conflict refiner and the relaxer, and the differences between them
- The conflict refiner lists constraints which are participating in the infeasibility. Constraints not mentioned in the conflict are not a problem.
- The conflict refiner considers both constraints and variable bounds.
- The conflict refiner does not provide any relaxed solution, nor any quantitative information.
In constrast, the relaxer provides a relaxed solution, and indicates which constraints are relaxed, and with what quantities. It does not consider variables bounds. It requires a mapping from constraints to Priority objects, which can take many forms: a function, a dictionary,...
#### References
* [Decision Optimization CPLEX Modeling for Python documentation](http://ibmdecisionoptimization.github.io/docplex-doc/)
* Need help with DOcplex or to report a bug? Please go [here](https://stackoverflow.com/questions/tagged/docplex)
* Contact us at [email protected]"
Copyright © 2017-2019 IBM. Sample Materials.
|
github_jupyter
|
from docplex.mp.environment import Environment
Environment().print_information()
from docplex.mp.model import Model
def build_infeasible_cyclic_model3():
m = Model(name='cyclic3')
x,y,z = m.continuous_var_list(keys=['x', 'y', 'z'], name=str)
m.add( y >= x+1, name="y_gt_x")
m.add( z >= y+1, name="z_gt_y")
m.add( x >= z+1, name="x_gt_z")
# add another constraint, should noever appear in conflicts
m.add(x + y + z <= 33)
return m
cycle3 = build_infeasible_cyclic_model3()
cycle3.print_information()
s = cycle3.solve(log_output=True)
assert s is None
print("the model is infeasible")
from docplex.mp.conflict_refiner import ConflictRefiner
cr = ConflictRefiner()
crr = cr.refine_conflict(cycle3, display=True)
from docplex.mp.relaxer import Relaxer
rx = Relaxer()
rs = rx.relax(cycle3)
rx.print_information()
rs.display()
# retrieve the z variable using Model.get_var_by_name()
z = cycle3.get_var_by_name('z')
assert z
cycle3.minimize(z)
rs = rx.relax(cycle3)
rx.print_information()
rs.display()
# costs are stored in a dict from product names to cost
COSTS = {"kluski": 0.6, "capellini": 0.8, "fettucine": 0.3}
# demands are stored in a dict from product names to demands
DEMANDS = {"kluski": 10, "capellini": 20, "fettucine": 30}
# resources are stored as a dict of resource name to resource capacity
RESOURCES = {"flour": 40, "eggs": 60}
CONSUMPTIONS = {("kluski", "flour"): 0.5,
("kluski", "eggs"): 0.2,
("capellini", "flour"): 0.4,
("capellini", "eggs"): 0.4,
("fettucine", "flour"): 0.3,
("fettucine", "eggs"): 0.6}
from docplex.mp.model import Model
import six
def build_production_problem(costs, resources, consumptions, demands, **kwargs):
products = [p for p, _ in six.iteritems(costs)]
mdl = Model(name='pasta_production', **kwargs)
# --- decision variables ---
mdl.q_vars = mdl.continuous_var_dict(products, name="q")
# --- constraints ---
# demand satisfaction
mdl.add_constraints((mdl.q_vars[p] >= demands[p], 'ct_demand_%s' % p) for p in products)
# --- resource capacity ---
mdl.add_constraints((mdl.sum(mdl.q_vars[p] * consumptions[p, res] for p in products) <= cap,
'ct_res_%s' % res) for res, cap in six.iteritems(resources))
# --- objective ---
mdl.minimize(mdl.dotf(mdl.q_vars, lambda p: costs[p]))
return mdl
pasta1 = build_production_problem(COSTS, RESOURCES, CONSUMPTIONS, DEMANDS)
pasta1.print_information()
s1 = pasta1.solve()
s1.display()
demands2 = {p: 2*d for p, d in six.iteritems(DEMANDS)}
pasta2 = build_production_problem(COSTS, RESOURCES, CONSUMPTIONS, demands2)
s2 = pasta2.solve()
if s2 is None:
print("!! Pasta production with double demand is impossible")
from docplex.mp.conflict_refiner import ConflictRefiner
crr = ConflictRefiner().refine_conflict(pasta2, display=True)
crr.display()
from docplex.mp.relaxer import Relaxer
# create an instance of relaxer
rx = Relaxer()
rs = rx.relax(pasta2)
rx.print_information()
rs.display()
# get back the constraint from its name
ctf = pasta2.get_constraint_by_name("ct_res_flour")
assert ctf is not None
print(str(ctf))
ctf.lhs.solution_value
from docplex.mp.basic import Priority
for ctr in pasta2.find_matching_linear_constraints("ct_res"):
ctr.priority = Priority.HIGH
for ctd in pasta2.find_matching_linear_constraints("ct_dem"):
ctd.priority = Priority.LOW
rx2 = Relaxer()
rs2 = rx2.relax(pasta2)
rx2.print_information()
rs2.display()
# reset all priorities
for c in pasta2.iter_constraints():
c.priority = None
# define the constraint -> priority function
def map_priority(ct):
ctname = ct.name
if not ctname:
return Priority.MANDATORY
elif "ct_res" in ctname:
return Priority.HIGH
elif "ct_dem" in ctname:
return Priority.LOW
else:
# will not be relaxed
return Priority.MANDATORY
# create a new instance of Relaxer with this function.
rx3 = Relaxer(prioritizer=map_priority)
# use it to relax pasta2 model
rx3.relax(pasta2)
# display relaxation.
rx3.print_information()
m4 = Model(name='m4')
ijs = m4.integer_var_list(keys=["i", "j", "k", "l"], name =str, lb = 1)
m4.add(m4.sum(ijs) <= 2)
s4 = m4.solve()
assert s4 is None
ConflictRefiner().refine_conflict(m4, display=True);
r4 = Relaxer()
r4.relax(m4)
r4.print_information()
for v in m4.iter_variables():
v.lb = 0
clb = m4.add(v >= 1, "{0}_ge_1".format(v.name))
clb.priority = Priority.LOW
r4 = Relaxer()
r4.relax(m4)
r4.print_information()
| 0.642208 | 0.982065 |
<h2>Load data</h2>
The code is a modified version from the code in <a href="http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html">this</a> tutorial.
```
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
# Load data
df = pd.read_csv("../data/wikipedia_300.csv")
np_data = df.values
# Split data into X and y
X_raw = np_data[:,0:-1]
# Convert class label strings to integers
y_raw = np_data[:,-1]
encoder = LabelEncoder()
encoder.fit(y_raw)
y = encoder.transform(y_raw)
# Flatten input matrix to vector
X_raw = X_raw.ravel()
print("Examples: {}".format(X_raw.shape[0]))
print("Possible categories:",np.unique(y_raw),"encoded to",np.unique(y))
```
<h2>Convert to bag of words</h2>
```
from sklearn.feature_extraction.text import CountVectorizer
#count_vect = CountVectorizer(stop_words='english')
count_vect = CountVectorizer()
X = count_vect.fit_transform(X_raw)
print(X.shape)
```
<h2>Convert from occurences to frequencies</h2>
```
from sklearn.feature_extraction.text import TfidfTransformer
tf_transformer = TfidfTransformer().fit(X)
X = tf_transformer.transform(X)
print(X.shape)
```
<h2>Function for evaluating model accuracy</h2>
```
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
def evaluate(model):
print("-- Training data --")
# train model on training dataset
model.fit(X, y)
# evaluate dataset
y_pred = model.predict(X)
# calculate accuracy
accuracy = accuracy_score(y, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y, y_pred)
print(conf_mx)
print(classification_report(y, y_pred))
print("")
print("-- 5-fold CV --")
# 5-fold CV
y_pred = cross_val_predict(model, X, y, cv=5)
# calculate accuracy
accuracy = accuracy_score(y, y_pred)
print("Average accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y, y_pred)
print(conf_mx)
print(classification_report(y, y_pred))
```
<h2>Naive Bayes</h2>
```
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
model = MultinomialNB(alpha=.01)
evaluate(model)
```
<h2>SVM</h2>
```
from sklearn import svm
model = svm.LinearSVC(random_state=42)
evaluate(model)
```
<h2>Pipeline example</h2>
```
from sklearn.pipeline import Pipeline
X = X_raw.ravel()
model = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB(alpha=.01)),])
evaluate(model)
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
# Load data
df = pd.read_csv("../data/wikipedia_300.csv")
np_data = df.values
# Split data into X and y
X_raw = np_data[:,0:-1]
# Convert class label strings to integers
y_raw = np_data[:,-1]
encoder = LabelEncoder()
encoder.fit(y_raw)
y = encoder.transform(y_raw)
# Flatten input matrix to vector
X_raw = X_raw.ravel()
print("Examples: {}".format(X_raw.shape[0]))
print("Possible categories:",np.unique(y_raw),"encoded to",np.unique(y))
from sklearn.feature_extraction.text import CountVectorizer
#count_vect = CountVectorizer(stop_words='english')
count_vect = CountVectorizer()
X = count_vect.fit_transform(X_raw)
print(X.shape)
from sklearn.feature_extraction.text import TfidfTransformer
tf_transformer = TfidfTransformer().fit(X)
X = tf_transformer.transform(X)
print(X.shape)
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
def evaluate(model):
print("-- Training data --")
# train model on training dataset
model.fit(X, y)
# evaluate dataset
y_pred = model.predict(X)
# calculate accuracy
accuracy = accuracy_score(y, y_pred)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y, y_pred)
print(conf_mx)
print(classification_report(y, y_pred))
print("")
print("-- 5-fold CV --")
# 5-fold CV
y_pred = cross_val_predict(model, X, y, cv=5)
# calculate accuracy
accuracy = accuracy_score(y, y_pred)
print("Average accuracy: %.2f%%" % (accuracy * 100.0))
# confusion matrix
print("Confusion Matrix:")
conf_mx = confusion_matrix(y, y_pred)
print(conf_mx)
print(classification_report(y, y_pred))
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
model = MultinomialNB(alpha=.01)
evaluate(model)
from sklearn import svm
model = svm.LinearSVC(random_state=42)
evaluate(model)
from sklearn.pipeline import Pipeline
X = X_raw.ravel()
model = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB(alpha=.01)),])
evaluate(model)
| 0.779909 | 0.886666 |
# Summarizing Text
In this notebook, you will get to experiment with performing extraction based summarization of text. This technique of summarization attempts to identify key sentences in a provided text and returns a summary that is the result of returning just those key sentences.
The process we will follow to summarize text is a subset of the text analytics pipeline that includes these steps:
- Normalize the text: in this case, simply to clean the text of line break characters. This followed by some simple sentence tokenization, which breaks the paragraph into sentences and removes any extra trailing spaces. Finally the cleaned up text is returned as string.
- Apply the analytic method: in this case, we use the summarize method provided by the gensim library to generate the summarized result.
**Please confirm that you have run notebook `00 Init` before proceeding**
## Task 1 - Import modules
First, we need to import the modules used by our logic.
```
import nltk
import re
import unicodedata
import numpy as np
from gensim.summarization import summarize
nltk.download('punkt')
```
## Task 2 - Normalize text
In the following, we define a method that will remove line break characters, tokenize the paragraph of text into an array of string sentences and then strip any extra spaces surrounding a sentence. This is an example of a simple, but typical, text normalization step.
```
def clean_and_parse_document(document):
document = re.sub('\n', ' ', document)
document = document.strip()
sentences = nltk.sent_tokenize(document)
sentences = [sentence.strip() for sentence in sentences]
return sentences
```
## Task 3 - Summarize text
In the following, we define a method that uses the summarize method from the gensim module. We take the pre-processed output from our clean_and_parse_document routine and convert the array of string sentences to a single text item by concatenating the sentences. When performing text analytics, some analytic methods might require tokenized input and others may require string input, so this is a common process. In this, the summarize method requires a text string as input.
```
def summarize_text(text, summary_ratio=None, word_count=30):
sentences = clean_and_parse_document(text)
cleaned_text = ' '.join(sentences)
summary = summarize(cleaned_text, split=True, ratio=summary_ratio, word_count=word_count)
return summary
```
## Task 4 - Try it out
Author an example string that represents a rather long claim description that Contoso Ltd. might encounter. An example is provided for you, but feel free to provide your own.
```
example_document = """
I was driving down El Camino and stopped at a red light.
It was about 3pm in the afternoon.
The sun was bright and shining just behind the stoplight.
This made it hard to see the lights.
There was a car on my left in the left turn lane.
A few moments later another car, a black sedan pulled up behind me.
When the left turn light changed green, the black sedan hit me thinking
that the light had changed for us, but I had not moved because the light
was still red.
After hitting my car, the black sedan backed up and then sped past me.
I did manage to catch its license plate.
The license plate of the black sedan was ABC123.
"""
```
Now, invoke your summarize_text function against the example document and observe the result.
```
summarize_text(example_document)
```
Observe that the summary is returned as an array of string. If multiple sentences were returned, there would be multiple array entries.
## Task 5 - Experiment
- The summarize text function above defaults to providing a summary that is about 30 words long. What happens if you attempt to summarize the text to 60 words?
- What happens when you submit a text to summarize that is shorter than the summary target length?
|
github_jupyter
|
import nltk
import re
import unicodedata
import numpy as np
from gensim.summarization import summarize
nltk.download('punkt')
def clean_and_parse_document(document):
document = re.sub('\n', ' ', document)
document = document.strip()
sentences = nltk.sent_tokenize(document)
sentences = [sentence.strip() for sentence in sentences]
return sentences
def summarize_text(text, summary_ratio=None, word_count=30):
sentences = clean_and_parse_document(text)
cleaned_text = ' '.join(sentences)
summary = summarize(cleaned_text, split=True, ratio=summary_ratio, word_count=word_count)
return summary
example_document = """
I was driving down El Camino and stopped at a red light.
It was about 3pm in the afternoon.
The sun was bright and shining just behind the stoplight.
This made it hard to see the lights.
There was a car on my left in the left turn lane.
A few moments later another car, a black sedan pulled up behind me.
When the left turn light changed green, the black sedan hit me thinking
that the light had changed for us, but I had not moved because the light
was still red.
After hitting my car, the black sedan backed up and then sped past me.
I did manage to catch its license plate.
The license plate of the black sedan was ABC123.
"""
summarize_text(example_document)
| 0.40486 | 0.96738 |
# Dimensionality Reduction with the Shogun Machine Learning Toolbox
#### *By Sergey Lisitsyn ([lisitsyn](https://github.com/lisitsyn)) and Fernando J. Iglesias Garcia ([iglesias](https://github.com/iglesias)).*
This notebook illustrates <a href="http://en.wikipedia.org/wiki/Unsupervised_learning">unsupervised learning</a> using the suite of dimensionality reduction algorithms available in Shogun. Shogun provides access to all these algorithms using [Tapkee](http://tapkee.lisitsyn.me/), a C++ library especialized in <a href="http://en.wikipedia.org/wiki/Dimensionality_reduction">dimensionality reduction</a>.
## Hands-on introduction to dimension reduction
First of all, let us start right away by showing what the purpose of dimensionality reduction actually is. To this end, we will begin by creating a function that provides us with some data:
```
import numpy
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
def generate_data(curve_type, num_points=1000):
if curve_type=='swissroll':
tt = numpy.array((3*numpy.pi/2)*(1+2*numpy.random.rand(num_points)))
height = numpy.array((numpy.random.rand(num_points)-0.5))
X = numpy.array([tt*numpy.cos(tt), 10*height, tt*numpy.sin(tt)])
return X,tt
if curve_type=='scurve':
tt = numpy.array((3*numpy.pi*(numpy.random.rand(num_points)-0.5)))
height = numpy.array((numpy.random.rand(num_points)-0.5))
X = numpy.array([numpy.sin(tt), 10*height, numpy.sign(tt)*(numpy.cos(tt)-1)])
return X,tt
if curve_type=='helix':
tt = numpy.linspace(1, num_points, num_points).T / num_points
tt = tt*2*numpy.pi
X = numpy.r_[[(2+numpy.cos(8*tt))*numpy.cos(tt)],
[(2+numpy.cos(8*tt))*numpy.sin(tt)],
[numpy.sin(8*tt)]]
return X,tt
```
The function above can be used to generate three-dimensional datasets with the shape of a [Swiss roll](http://en.wikipedia.org/wiki/Swiss_roll), the letter S, or an helix. These are three examples of datasets which have been extensively used to compare different dimension reduction algorithms. As an illustrative exercise of what dimensionality reduction can do, we will use a few of the algorithms available in Shogun to embed this data into a two-dimensional space. This is essentially the dimension reduction process as we reduce the number of features from 3 to 2. The question that arises is: what principle should we use to keep some important relations between datapoints? In fact, different algorithms imply different criteria to answer this question.
Just to start, lets pick some algorithm and one of the data sets, for example lets see what embedding of the Swissroll is produced by the Isomap algorithm. The Isomap algorithm is basically a slightly modified Multidimensional Scaling (MDS) algorithm which finds embedding as a solution of the following optimization problem:
$$
\min_{x'_1, x'_2, \dots} \sum_i \sum_j \| d'(x'_i, x'_j) - d(x_i, x_j)\|^2,
$$
with defined $x_1, x_2, \dots \in X~~$ and unknown variables $x_1, x_2, \dots \in X'~~$ while $\text{dim}(X') < \text{dim}(X)~~~$,
$d: X \times X \to \mathbb{R}~~$ and $d': X' \times X' \to \mathbb{R}~~$ are defined as arbitrary distance functions (for example Euclidean).
Speaking less math, the MDS algorithm finds an embedding that preserves pairwise distances between points as much as it is possible. The Isomap algorithm changes quite small detail: the distance - instead of using local pairwise relationships it takes global factor into the account with shortest path on the neighborhood graph (so-called geodesic distance). The neighborhood graph is defined as graph with datapoints as nodes and weighted edges (with weight equal to the distance between points). The edge between point $x_i~$ and $x_j~$ exists if and only if $x_j~$ is in $k~$ nearest neighbors of $x_i$. Later we will see that that 'global factor' changes the game for the swissroll dataset.
However, first we prepare a small function to plot any of the original data sets together with its embedding.
```
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
def plot(data, embedded_data, colors='m'):
fig = plt.figure()
fig.set_facecolor('white')
ax = fig.add_subplot(121,projection='3d')
ax.scatter(data[0],data[1],data[2],c=colors,cmap=plt.cm.Spectral)
plt.axis('tight'); plt.axis('off')
ax = fig.add_subplot(122)
ax.scatter(embedded_data[0],embedded_data[1],c=colors,cmap=plt.cm.Spectral)
plt.axis('tight'); plt.axis('off')
plt.show()
import shogun as sg
# wrap data into Shogun features
data, colors = generate_data('swissroll')
feats = sg.features(data)
# create instance of Isomap converter and configure it
isomap = sg.transformer('Isomap')
isomap.put('target_dim', 2)
# set the number of neighbours used in kNN search
isomap.put('k', 20)
# create instance of Multidimensional Scaling converter and configure it
mds = sg.transformer('MultidimensionalScaling')
mds.put('target_dim', 2)
# embed Swiss roll data
embedded_data_mds = mds.transform(feats).get('feature_matrix')
embedded_data_isomap = isomap.transform(feats).get('feature_matrix')
plot(data, embedded_data_mds, colors)
plot(data, embedded_data_isomap, colors)
```
As it can be seen from the figure above, Isomap has been able to "unroll" the data, reducing its dimension from three to two. At the same time, points with similar colours in the input space are close to points with similar colours in the output space. This is, a new representation of the data has been obtained; this new representation maintains the properties of the original data, while it reduces the amount of information required to represent it. Note that the fact the embedding of the Swiss roll looks good in two dimensions stems from the *intrinsic* dimension of the input data. Although the original data is in a three-dimensional space, its intrinsic dimension is lower, since the only degree of freedom are the polar angle and distance from the centre, or height.
Finally, we use yet another method, Stochastic Proximity Embedding (SPE) to embed the helix:
```
# wrap data into Shogun features
data, colors = generate_data('helix')
features = sg.features(data)
# create MDS instance
converter = sg.transformer('StochasticProximityEmbedding')
converter.put('target_dim', 2)
# embed helix data
embedded_features = converter.transform(features)
embedded_data = embedded_features.get('feature_matrix')
plot(data, embedded_data, colors)
```
## References
- Lisitsyn, S., Widmer, C., Iglesias Garcia, F. J. Tapkee: An Efficient Dimension Reduction Library. ([Link to paper in JMLR](http://jmlr.org/papers/v14/lisitsyn13a.html#!).)
- Tenenbaum, J. B., de Silva, V. and Langford, J. B. A Global Geometric Framework for Nonlinear Dimensionality Reduction. ([Link to Isomap's website](http://isomap.stanford.edu/).)
|
github_jupyter
|
import numpy
import os
SHOGUN_DATA_DIR=os.getenv('SHOGUN_DATA_DIR', '../../../data')
def generate_data(curve_type, num_points=1000):
if curve_type=='swissroll':
tt = numpy.array((3*numpy.pi/2)*(1+2*numpy.random.rand(num_points)))
height = numpy.array((numpy.random.rand(num_points)-0.5))
X = numpy.array([tt*numpy.cos(tt), 10*height, tt*numpy.sin(tt)])
return X,tt
if curve_type=='scurve':
tt = numpy.array((3*numpy.pi*(numpy.random.rand(num_points)-0.5)))
height = numpy.array((numpy.random.rand(num_points)-0.5))
X = numpy.array([numpy.sin(tt), 10*height, numpy.sign(tt)*(numpy.cos(tt)-1)])
return X,tt
if curve_type=='helix':
tt = numpy.linspace(1, num_points, num_points).T / num_points
tt = tt*2*numpy.pi
X = numpy.r_[[(2+numpy.cos(8*tt))*numpy.cos(tt)],
[(2+numpy.cos(8*tt))*numpy.sin(tt)],
[numpy.sin(8*tt)]]
return X,tt
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
%matplotlib inline
def plot(data, embedded_data, colors='m'):
fig = plt.figure()
fig.set_facecolor('white')
ax = fig.add_subplot(121,projection='3d')
ax.scatter(data[0],data[1],data[2],c=colors,cmap=plt.cm.Spectral)
plt.axis('tight'); plt.axis('off')
ax = fig.add_subplot(122)
ax.scatter(embedded_data[0],embedded_data[1],c=colors,cmap=plt.cm.Spectral)
plt.axis('tight'); plt.axis('off')
plt.show()
import shogun as sg
# wrap data into Shogun features
data, colors = generate_data('swissroll')
feats = sg.features(data)
# create instance of Isomap converter and configure it
isomap = sg.transformer('Isomap')
isomap.put('target_dim', 2)
# set the number of neighbours used in kNN search
isomap.put('k', 20)
# create instance of Multidimensional Scaling converter and configure it
mds = sg.transformer('MultidimensionalScaling')
mds.put('target_dim', 2)
# embed Swiss roll data
embedded_data_mds = mds.transform(feats).get('feature_matrix')
embedded_data_isomap = isomap.transform(feats).get('feature_matrix')
plot(data, embedded_data_mds, colors)
plot(data, embedded_data_isomap, colors)
# wrap data into Shogun features
data, colors = generate_data('helix')
features = sg.features(data)
# create MDS instance
converter = sg.transformer('StochasticProximityEmbedding')
converter.put('target_dim', 2)
# embed helix data
embedded_features = converter.transform(features)
embedded_data = embedded_features.get('feature_matrix')
plot(data, embedded_data, colors)
| 0.48438 | 0.988853 |
```
from polar_codes.polar_code import PolarCode
from polar_codes.channels.bpsk_awgn_channel import BpskAwgnChannel
import numpy as np
from joblib import Parallel, delayed
from multiprocessing import cpu_count
from tqdm import tqdm
import pickle
channel = BpskAwgnChannel(3)
ebnodb = np.arange(0, 2.5, 0.5)
ebno = 10**(ebnodb/10)
snr = ebno
sinr = ebno/2
sinrdb = 10*np.log10(sinr)
ebnodb
ebnodb = np.arange(0, 2.5, 0.5)
snrdb = ebnodb + 10 * np.log10(1/2)
sinr = 10 ** (snrdb/10) / 2
sinrdb = 10 * np.log10(sinr)
ebnodb
sinrdb
channel.get_ber()
n = 9
K = 256
code = PolarCode(n=n, K=K,
construction_method='PW',
channel=channel, CRC_len=24)
R = K / 2**n
code._K
code._K_minus_CRC
R = code._K / code._N
R
bps = 1
EsNo = ebnodb + 10*np.log10(bps);
snrdB = EsNo + 10*np.log10(R);
snrdB
10*np.log10(R)
```
115 информационных бит для передачи
```
u_message = np.asarray([0 if np.random.random_sample() > 0.5 else 1 for _ in range(0, K)], dtype='uint8')
u_message
x_message = code.encode(u_message)
to_message = channel.modulate(x_message)
from_message = channel.transmit(to_message)
y_message = channel.demodulate(from_message)
%%time
scl_u_est_message = code.decode(y_message, decoding_method='SCL', list_size=32)
#scl_u_est_message
code.copy()
class rec_dec:
def __init__(self, code, y_message, i, u_est, is_calc_llr, llr_array, pm=0, PM=0):
self.u_est = u_est
self.is_calc_llr = is_calc_llr
self.llr_array = llr_array
self.y_message = y_message
self.pm = pm #Iddooose metric
self.layer = i
self.code = code
self.PM = PM #paper_metric
def split(self):
#Создаем 2 копии этого же instance c разными параметрами
u_right = self.u_est.copy()
u_opp = self.u_est.copy()
self.llr = code._fast_llr(self.layer, self.y_message, self.u_est[:self.layer], self.llr_array, self.is_calc_llr)
u_right[self.layer] = 0 if self.llr > 0 else 1
u_opp[self.layer] = 1 - u_right[self.layer]
pm_copy_r = self.pm
pm_copy_opp = self.pm
llr_copy = self.llr
layer_copy = self.layer
#calc 2 PM and provide them
PM_right = np.log(1+np.exp(-(1-2*u_right[self.layer])*self.llr))
new_PM_right = self.PM+PM_right
PM_opp = np.log(1+np.exp(-(1-2*u_opp[self.layer])*self.llr))
new_PM_opp = self.PM+PM_opp
right = rec_dec(self.code, self.y_message, layer_copy+1, u_right, self.is_calc_llr.copy(), self.llr_array.copy(), pm_copy_r, new_PM_right)
opp = rec_dec(self.code, self.y_message, layer_copy+1, u_opp, self.is_calc_llr.copy(), self.llr_array.copy(), pm_copy_opp+np.abs(llr_copy), new_PM_opp)
return [right, opp]
def meet_frozen(self):
self.llr = code._fast_llr(self.layer, self.y_message, self.u_est[:self.layer], self.llr_array, self.is_calc_llr)
self.u_est[self.layer] = 0
self.pm += np.abs(self.llr) if (self.llr < 0) else 0
self.PM += np.log(1+np.exp(-(1-2*self.u_est[self.layer])*self.llr))
self.layer +=1
def get_pm(self):
return self.pm
def get_PM(self):
return self.PM
def get_is_calc(self):
return self.is_calc_llr
def get_l(self):
return self.layer
def get_u_est(self):
return self.u_est
def get_llr_array(self):
return self.llr_array
def scl_dec(code, y_message, list_size=32, train=None): #we should decide here whether to by Indooose Metric or paper's
u_est = np.full(code._N, -1)
is_calc_llr = [False] * code._N * (code._n + 1)
llr_array = np.full(code._N * (code._n + 1), 0.0, dtype=np.longfloat)
dec_array = [] # will store L decoders always
a = rec_dec(code, y_message, 0, u_est, is_calc_llr, llr_array)
dec_array.append(a)
M = np.zeros(code._N)
bit_for_flip_train = None
for i in range(code._N): #code._N
if i in code._frozen_bits_positions:
for elem in dec_array:
elem.meet_frozen()
else:
new_arr = []
for elem in dec_array:
temp = elem.split()
new_arr.append(temp[0])
new_arr.append(temp[1])
dec_array = new_arr
if len(dec_array) > list_size:
dec_array.sort(key=lambda x: x.get_PM())
M[i] = np.sum([x.get_PM() for x in dec_array[list_size:]]) - np.sum([x.get_PM() for x in dec_array[:list_size]])
else:
M[i] = - np.sum([x.get_PM() for x in dec_array[:list_size]])
if len(dec_array) > list_size: #pruning from 2L to L decoders
dec_array.sort(key=lambda x: x.get_PM()) # by paper (mb create new decoder parametr)
if (train is not None) and (bit_for_flip_train is None): #should save i, if right solution among discarded
for inst in dec_array[list_size:]:
if (train[:i+1] == inst.get_u_est()[:i+1]).all():
bit_for_flip_train = i
dec_array = dec_array[:list_size] # delete discarded paths
return M, bit_for_flip_train, dec_array
%%time
M, bit, c = scl_dec(code, y_message, 32, code.extend_info_bits(u_message))
bit
code._frozen_bits_positions
[x.get_PM() for x in c]
for d in c:
print(np.abs(u_message - code.get_message_info_bits(d.get_u_est())).sum())
0 in [np.abs(u_message - code.get_message_info_bits(d.get_u_est())).sum() for d in c]
code.get_message_info_bits(c[0].get_u_est())
code._calculate_CRC(code.get_message_info_bits(c[0].get_u_est()))
code._K_minus_CRC
code._K
code.c[0].get_u_est()
sinrdb
data_M_sinr = []
data_bit_sinr []
for sinrdb_i in sinrdb:
channel = BpskAwgnChannel(sinrdb_i)
n = 9
K = 256
code = PolarCode(n=n, K=K,
construction_method='PW',
channel=channel, CRC_len=24)
for _ in range(5000):
u_message = np.asarray([0 if np.random.random_sample() > 0.5 else 1 for _ in range(0, K)], dtype='uint8') # x100
x_message = code.encode(u_message)
to_message = channel.modulate(x_message)
from_message = channel.transmit(to_message)
y_message = channel.demodulate(from_message)
M, bit = scl_dec(code, y_message, 32, code.extend_info_bits(u_message))
data_M_sinr.append(M)
data_bit_sinr.append(bit)
def calc_M(sinrdb_i):
channel = BpskAwgnChannel(sinrdb_i)
n = 9
K = 256
code = PolarCode(n=n, K=K,
construction_method='PW',
channel=channel, CRC_len=24)
u_message = np.asarray([0 if np.random.random_sample() > 0.5 else 1 for _ in range(0, K)], dtype='uint8') # x100
x_message = code.encode(u_message)
to_message = channel.modulate(x_message)
from_message = channel.transmit(to_message)
y_message = channel.demodulate(from_message)
M, bit = scl_dec(code, y_message, 8, code.extend_info_bits(u_message))
return M, bit
%%time
calc_M(3)
code._scl_decode(L=16)
%%time
data = []
for sinrdb_i in sinrdb:
data.append(Parallel(n_jobs=cpu_count())(delayed(calc_M)(sinrdb_i) for _ in tqdm(range(5000))))
with open('lstm_train.pickle', 'wb') as handle:
pickle.dump(data, handle)
noisevar =
```
|
github_jupyter
|
from polar_codes.polar_code import PolarCode
from polar_codes.channels.bpsk_awgn_channel import BpskAwgnChannel
import numpy as np
from joblib import Parallel, delayed
from multiprocessing import cpu_count
from tqdm import tqdm
import pickle
channel = BpskAwgnChannel(3)
ebnodb = np.arange(0, 2.5, 0.5)
ebno = 10**(ebnodb/10)
snr = ebno
sinr = ebno/2
sinrdb = 10*np.log10(sinr)
ebnodb
ebnodb = np.arange(0, 2.5, 0.5)
snrdb = ebnodb + 10 * np.log10(1/2)
sinr = 10 ** (snrdb/10) / 2
sinrdb = 10 * np.log10(sinr)
ebnodb
sinrdb
channel.get_ber()
n = 9
K = 256
code = PolarCode(n=n, K=K,
construction_method='PW',
channel=channel, CRC_len=24)
R = K / 2**n
code._K
code._K_minus_CRC
R = code._K / code._N
R
bps = 1
EsNo = ebnodb + 10*np.log10(bps);
snrdB = EsNo + 10*np.log10(R);
snrdB
10*np.log10(R)
u_message = np.asarray([0 if np.random.random_sample() > 0.5 else 1 for _ in range(0, K)], dtype='uint8')
u_message
x_message = code.encode(u_message)
to_message = channel.modulate(x_message)
from_message = channel.transmit(to_message)
y_message = channel.demodulate(from_message)
%%time
scl_u_est_message = code.decode(y_message, decoding_method='SCL', list_size=32)
#scl_u_est_message
code.copy()
class rec_dec:
def __init__(self, code, y_message, i, u_est, is_calc_llr, llr_array, pm=0, PM=0):
self.u_est = u_est
self.is_calc_llr = is_calc_llr
self.llr_array = llr_array
self.y_message = y_message
self.pm = pm #Iddooose metric
self.layer = i
self.code = code
self.PM = PM #paper_metric
def split(self):
#Создаем 2 копии этого же instance c разными параметрами
u_right = self.u_est.copy()
u_opp = self.u_est.copy()
self.llr = code._fast_llr(self.layer, self.y_message, self.u_est[:self.layer], self.llr_array, self.is_calc_llr)
u_right[self.layer] = 0 if self.llr > 0 else 1
u_opp[self.layer] = 1 - u_right[self.layer]
pm_copy_r = self.pm
pm_copy_opp = self.pm
llr_copy = self.llr
layer_copy = self.layer
#calc 2 PM and provide them
PM_right = np.log(1+np.exp(-(1-2*u_right[self.layer])*self.llr))
new_PM_right = self.PM+PM_right
PM_opp = np.log(1+np.exp(-(1-2*u_opp[self.layer])*self.llr))
new_PM_opp = self.PM+PM_opp
right = rec_dec(self.code, self.y_message, layer_copy+1, u_right, self.is_calc_llr.copy(), self.llr_array.copy(), pm_copy_r, new_PM_right)
opp = rec_dec(self.code, self.y_message, layer_copy+1, u_opp, self.is_calc_llr.copy(), self.llr_array.copy(), pm_copy_opp+np.abs(llr_copy), new_PM_opp)
return [right, opp]
def meet_frozen(self):
self.llr = code._fast_llr(self.layer, self.y_message, self.u_est[:self.layer], self.llr_array, self.is_calc_llr)
self.u_est[self.layer] = 0
self.pm += np.abs(self.llr) if (self.llr < 0) else 0
self.PM += np.log(1+np.exp(-(1-2*self.u_est[self.layer])*self.llr))
self.layer +=1
def get_pm(self):
return self.pm
def get_PM(self):
return self.PM
def get_is_calc(self):
return self.is_calc_llr
def get_l(self):
return self.layer
def get_u_est(self):
return self.u_est
def get_llr_array(self):
return self.llr_array
def scl_dec(code, y_message, list_size=32, train=None): #we should decide here whether to by Indooose Metric or paper's
u_est = np.full(code._N, -1)
is_calc_llr = [False] * code._N * (code._n + 1)
llr_array = np.full(code._N * (code._n + 1), 0.0, dtype=np.longfloat)
dec_array = [] # will store L decoders always
a = rec_dec(code, y_message, 0, u_est, is_calc_llr, llr_array)
dec_array.append(a)
M = np.zeros(code._N)
bit_for_flip_train = None
for i in range(code._N): #code._N
if i in code._frozen_bits_positions:
for elem in dec_array:
elem.meet_frozen()
else:
new_arr = []
for elem in dec_array:
temp = elem.split()
new_arr.append(temp[0])
new_arr.append(temp[1])
dec_array = new_arr
if len(dec_array) > list_size:
dec_array.sort(key=lambda x: x.get_PM())
M[i] = np.sum([x.get_PM() for x in dec_array[list_size:]]) - np.sum([x.get_PM() for x in dec_array[:list_size]])
else:
M[i] = - np.sum([x.get_PM() for x in dec_array[:list_size]])
if len(dec_array) > list_size: #pruning from 2L to L decoders
dec_array.sort(key=lambda x: x.get_PM()) # by paper (mb create new decoder parametr)
if (train is not None) and (bit_for_flip_train is None): #should save i, if right solution among discarded
for inst in dec_array[list_size:]:
if (train[:i+1] == inst.get_u_est()[:i+1]).all():
bit_for_flip_train = i
dec_array = dec_array[:list_size] # delete discarded paths
return M, bit_for_flip_train, dec_array
%%time
M, bit, c = scl_dec(code, y_message, 32, code.extend_info_bits(u_message))
bit
code._frozen_bits_positions
[x.get_PM() for x in c]
for d in c:
print(np.abs(u_message - code.get_message_info_bits(d.get_u_est())).sum())
0 in [np.abs(u_message - code.get_message_info_bits(d.get_u_est())).sum() for d in c]
code.get_message_info_bits(c[0].get_u_est())
code._calculate_CRC(code.get_message_info_bits(c[0].get_u_est()))
code._K_minus_CRC
code._K
code.c[0].get_u_est()
sinrdb
data_M_sinr = []
data_bit_sinr []
for sinrdb_i in sinrdb:
channel = BpskAwgnChannel(sinrdb_i)
n = 9
K = 256
code = PolarCode(n=n, K=K,
construction_method='PW',
channel=channel, CRC_len=24)
for _ in range(5000):
u_message = np.asarray([0 if np.random.random_sample() > 0.5 else 1 for _ in range(0, K)], dtype='uint8') # x100
x_message = code.encode(u_message)
to_message = channel.modulate(x_message)
from_message = channel.transmit(to_message)
y_message = channel.demodulate(from_message)
M, bit = scl_dec(code, y_message, 32, code.extend_info_bits(u_message))
data_M_sinr.append(M)
data_bit_sinr.append(bit)
def calc_M(sinrdb_i):
channel = BpskAwgnChannel(sinrdb_i)
n = 9
K = 256
code = PolarCode(n=n, K=K,
construction_method='PW',
channel=channel, CRC_len=24)
u_message = np.asarray([0 if np.random.random_sample() > 0.5 else 1 for _ in range(0, K)], dtype='uint8') # x100
x_message = code.encode(u_message)
to_message = channel.modulate(x_message)
from_message = channel.transmit(to_message)
y_message = channel.demodulate(from_message)
M, bit = scl_dec(code, y_message, 8, code.extend_info_bits(u_message))
return M, bit
%%time
calc_M(3)
code._scl_decode(L=16)
%%time
data = []
for sinrdb_i in sinrdb:
data.append(Parallel(n_jobs=cpu_count())(delayed(calc_M)(sinrdb_i) for _ in tqdm(range(5000))))
with open('lstm_train.pickle', 'wb') as handle:
pickle.dump(data, handle)
noisevar =
| 0.532668 | 0.428293 |
In order to run this notebook with the correct PYTHONPATH, you can use
```
$ ./e-mission-jupyter.bash notebook
```
This notebook makes heavy use of the following python libraries.
- `pandas`: from the python computing stack (http://pandas.pydata.org/pandas-docs/stable/)
- `geojson`: standard JSON representation of geographic data (http://geojson.org/)
- `folium`: (https://github.com/python-visualization/folium) python bridge to leaflet (http://leafletjs.com/)
### Pick a user to work with ###
```
import emission.core.get_database as edb
import pandas as pd
all_users = pd.DataFrame(list(edb.get_uuid_db().find({}, {"user_email":1, "uuid": 1, "_id": 0})))
all_users
from uuid import UUID
test_user_id = all_users.iloc[1].uuid # replace with UUID from above
```
If you want to work across multiple users, just do the same thing again
```
test_user_id_2 = all_users.iloc[2].uuid
```
### Preferred access technique
The preferred technique to access wrapper objects from the timeseries is to use the abstract timeseries interface. This makes it easier for us to switch to alternative timeseries implementations later. The timeseries is conceptually a set of streams, one for each of the types, primarily indexed by time. So you can query for all entries of a particular time within a specified time range.
```
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.core.wrapper.entry as ecwe
import emission.storage.decorations.trip_queries as esdt
import emission.storage.timeseries.timequery as estt
print(test_user_id, test_user_id_2)
ts = esta.TimeSeries.get_time_series(test_user_id)
ts_2 = esta.TimeSeries.get_time_series(test_user_id_2)
```
#### Accessing entries directly
```
# Get all cleaned trips for the first user
entry_it = ts.find_entries(["analysis/cleaned_trip"], time_query=None)
```
All keys and their mapping to data model objects can be found in
https://github.com/e-mission/e-mission-server/blob/master/emission/core/wrapper/entry.py
```
for ct in entry_it:
cte = ecwe.Entry(ct)
print("=== Trip:", cte.data.start_loc, "->", cte.data.end_loc)
section_it = esdt.get_sections_for_trip("analysis/cleaned_section", test_user_id, cte.get_id())
for sec in section_it:
print(" --- Section:", sec.data.start_loc, "->", sec.data.end_loc, " on ", sec.data.sensed_mode)
# Get all cleaned trips for the second user
entry_it = ts_2.find_entries(["analysis/cleaned_trip"], time_query=None)
for ct in entry_it:
cte = ecwe.Entry(ct)
print("=== Trip:", cte.data.start_loc, "->", cte.data.end_loc)
section_it = esdt.get_sections_for_trip("analysis/cleaned_section", test_user_id, cte.get_id())
for sec in section_it:
print(" --- Section:", sec.data.start_loc, "->", sec.data.end_loc, " on ", sec.data.sensed_mode)
# Get cleaned trips for the two users that started on 1st Aug UTC
import arrow
aug_1_tq = estt.TimeQuery("data.start_ts",
arrow.get("2017-08-01").timestamp, # start of range
arrow.get("2017-08-02").timestamp) # end of range
entry_it = ts.find_entries(["analysis/cleaned_trip"], time_query=aug_1_tq)
entry_it_2 = ts_2.find_entries(["analysis/cleaned_trip"], time_query=aug_1_tq)
print("From %s -> %s, user %s had %d trips and user %s had %d trips" %
(aug_1_tq.startTs, aug_1_tq.endTs, test_user_id, len(list(entry_it)), test_user_id_2, len(list(entry_it_2))))
```
#### Accessing a dataframe
```
# Get all cleaned trips for the first user
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=None)
len(ct_df)
ct_df.columns
ct_df[["start_loc", "end_loc", "start_fmt_time", "end_fmt_time"]]
# Get all cleaned trips for the second user
ct_df_2 = ts_2.get_data_df("analysis/cleaned_trip", time_query=None)
ct_df_2[["start_loc", "end_loc", "start_ts", "end_ts"]]
# Get cleaned trips for the two users that started on 1st Aug UTC
import arrow
aug_1_tq = estt.TimeQuery("data.start_ts",
arrow.get("2017-08-01").timestamp, # start of range
arrow.get("2017-08-02").timestamp) # end of range
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=aug_1_tq)
ct_df_2 = ts_2.get_data_df("analysis/cleaned_trip", time_query=aug_1_tq)
print("From %s -> %s, user %s had %d trips and user %s had %d trips" %
(aug_1_tq.startTs, aug_1_tq.endTs, test_user_id, len(ct_df), test_user_id_2, len(ct_df_2)))
cs_df = ts.get_data_df("analysis/cleaned_section", time_query=None)
len(cs_df)
cs_df.columns
cs_df[["start_loc", "end_loc", "start_ts", "end_ts", "sensed_mode"]]
```
### Direct mongodb queries
You can also use direct mongodb queries during exploratory work. I do ask that you create a storage decorator (`emission/storage/decorations`) when you submit a pull request for ongoing use
```
import emission.core.get_database as edb
edb.get_timeseries_db().find_one()
edb.get_timeseries_db().distinct("metadata.key")
```
Note that in this case, you need to know whether to use the `timeseries` or the `analysis_timeseries` collection
```
edb.get_analysis_timeseries_db().distinct("metadata.key")
edb.get_analysis_timeseries_db().find({"user_id": test_user_id, "metadata.key": "analysis/cleaned_trip"}).count()
```
In particular, you can use this to access entries that are not in the timeseries
```
edb.get_uuid_db().distinct("uuid")
```
### Timeline
The trips and places maintain links to each other - e.g. `start_place`, `end_place`
```
ct_df[["start_place", "end_place"]]
```
These are _primary key links_ to other entries in the database. It would be useful to have a doubly linked list representing this properly. The Timeline helps with that.
```
import emission.storage.decorations.timeline as esdl
trip_start_end_fuzz = 10 # seconds
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=None)
tl = esdl.get_cleaned_timeline(test_user_id, ct_df.iloc[0].start_ts - trip_start_end_fuzz, ct_df.iloc[-1].end_ts + trip_start_end_fuzz)
for e in tl:
if 'enter_ts' in e.data:
# Must be place-like
print(e.metadata.key, e.data.enter_fmt_time, "->", e.data.exit_fmt_time)
else:
print(e.metadata.key, e.data.start_fmt_time, "->", e.data.end_fmt_time)
# The timeline is an iterator, so after it is consumed, it is empty
for e in tl:
if 'enter_ts' in e.data:
# Must be place-like
print(e.metadata.key, e.data.enter_fmt_time, "->", e.data.exit_fmt_time)
else:
print(e.metadata.key, e.data.start_fmt_time, "->", e.data.end_fmt_time)
stl = esdt.get_cleaned_timeline_for_trip(test_user_id, tl.first_trip().get_id())
for e in stl:
print(e.metadata.key)
stl = esdt.get_cleaned_timeline_for_trip(test_user_id, tl.last_trip().get_id())
for e in stl:
print(e.metadata.key)
```
## Getting trip and section details ##
Once we have trip and section objects, we can retrieve the sensed data associated with them by querying for data in various streams that falls within the time ranges associated with the trip/section. Here again, our architecture of storing the analysis results as a separate datastream makes it easy to retrieve data at various levels of processing.
### Plot a processed trip or set of trips ###
```
import emission.analysis.plotting.geojson.geojson_feature_converter as gfc
import emission.analysis.plotting.leaflet_osm.our_plotter as lo
first_trip_for_user = ct_df.iloc[0]
first_trip_start_ts = first_trip_for_user.start_ts
first_trip_end_ts = first_trip_for_user.end_ts
trip_start_end_fuzz = 10 # seconds
trips_geojson_list = gfc.get_geojson_for_ts(test_user_id, first_trip_start_ts-trip_start_end_fuzz, ct_df.iloc[-1].end_ts+trip_start_end_fuzz)
len(trips_geojson_list)
map_list = lo.get_maps_for_geojson_trip_list(trips_geojson_list)
len(map_list)
map_list[0]
map_list[-1]
import branca.element as bre
nrows = 2
ncols = 3
fig = bre.Figure()
for i, m in enumerate(map_list[:6]):
fig.add_subplot(nrows,ncols,i+1).add_child(m)
fig
nrows = 2
ncols = 3
fig = bre.Figure()
for i, map in enumerate(map_list[-6:]):
fig.add_subplot(nrows,ncols,i+1).add_child(map)
fig
```
## Can you do better? ##
### Get locations with no processing, basic filtering and resampling for the first trip ###
```
all_locs = ts.get_data_df("background/location",
time_query = esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
filtered_locs = ts.get_data_df("background/filtered_location",
time_query = esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
resampled_locs = ts.get_data_df("analysis/recreated_location",
time_query = esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
print("Locations go from all = %d -> filtered = %d -> resampled = %d" % (len(all_locs),
len(filtered_locs),
len(resampled_locs)))
all_locs[["_id", "latitude", "longitude", "fmt_time"]]
```
### Get the raw motion activity, in case you want to do different segmentation ###
```
all_activity = ts.get_data_df("background/motion_activity",
esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
all_activity.columns
import emission.core.wrapper.motionactivity as ecwm
print("Found %d walking entries, %d on_foot entries" % (len(all_activity[all_activity.type == ecwm.MotionTypes.WALKING.value]),
len(all_activity[all_activity.type == ecwm.MotionTypes.ON_FOOT.value])))
print("Found %d motorized entries" % (len(all_activity[all_activity.type == ecwm.MotionTypes.IN_VEHICLE.value])))
```
### Plot the location points ###
```
map_list = lo.get_maps_for_geojson_unsectioned([gfc.get_feature_list_from_df(all_locs),
gfc.get_feature_list_from_df(filtered_locs),
gfc.get_feature_list_from_df(resampled_locs)])
fig = bre.Figure()
for i, map in enumerate(map_list):
fig.add_subplot(1,3,i+1).add_child(map)
fig
```
|
github_jupyter
|
$ ./e-mission-jupyter.bash notebook
import emission.core.get_database as edb
import pandas as pd
all_users = pd.DataFrame(list(edb.get_uuid_db().find({}, {"user_email":1, "uuid": 1, "_id": 0})))
all_users
from uuid import UUID
test_user_id = all_users.iloc[1].uuid # replace with UUID from above
test_user_id_2 = all_users.iloc[2].uuid
import emission.storage.timeseries.abstract_timeseries as esta
import emission.storage.decorations.analysis_timeseries_queries as esda
import emission.core.wrapper.entry as ecwe
import emission.storage.decorations.trip_queries as esdt
import emission.storage.timeseries.timequery as estt
print(test_user_id, test_user_id_2)
ts = esta.TimeSeries.get_time_series(test_user_id)
ts_2 = esta.TimeSeries.get_time_series(test_user_id_2)
# Get all cleaned trips for the first user
entry_it = ts.find_entries(["analysis/cleaned_trip"], time_query=None)
for ct in entry_it:
cte = ecwe.Entry(ct)
print("=== Trip:", cte.data.start_loc, "->", cte.data.end_loc)
section_it = esdt.get_sections_for_trip("analysis/cleaned_section", test_user_id, cte.get_id())
for sec in section_it:
print(" --- Section:", sec.data.start_loc, "->", sec.data.end_loc, " on ", sec.data.sensed_mode)
# Get all cleaned trips for the second user
entry_it = ts_2.find_entries(["analysis/cleaned_trip"], time_query=None)
for ct in entry_it:
cte = ecwe.Entry(ct)
print("=== Trip:", cte.data.start_loc, "->", cte.data.end_loc)
section_it = esdt.get_sections_for_trip("analysis/cleaned_section", test_user_id, cte.get_id())
for sec in section_it:
print(" --- Section:", sec.data.start_loc, "->", sec.data.end_loc, " on ", sec.data.sensed_mode)
# Get cleaned trips for the two users that started on 1st Aug UTC
import arrow
aug_1_tq = estt.TimeQuery("data.start_ts",
arrow.get("2017-08-01").timestamp, # start of range
arrow.get("2017-08-02").timestamp) # end of range
entry_it = ts.find_entries(["analysis/cleaned_trip"], time_query=aug_1_tq)
entry_it_2 = ts_2.find_entries(["analysis/cleaned_trip"], time_query=aug_1_tq)
print("From %s -> %s, user %s had %d trips and user %s had %d trips" %
(aug_1_tq.startTs, aug_1_tq.endTs, test_user_id, len(list(entry_it)), test_user_id_2, len(list(entry_it_2))))
# Get all cleaned trips for the first user
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=None)
len(ct_df)
ct_df.columns
ct_df[["start_loc", "end_loc", "start_fmt_time", "end_fmt_time"]]
# Get all cleaned trips for the second user
ct_df_2 = ts_2.get_data_df("analysis/cleaned_trip", time_query=None)
ct_df_2[["start_loc", "end_loc", "start_ts", "end_ts"]]
# Get cleaned trips for the two users that started on 1st Aug UTC
import arrow
aug_1_tq = estt.TimeQuery("data.start_ts",
arrow.get("2017-08-01").timestamp, # start of range
arrow.get("2017-08-02").timestamp) # end of range
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=aug_1_tq)
ct_df_2 = ts_2.get_data_df("analysis/cleaned_trip", time_query=aug_1_tq)
print("From %s -> %s, user %s had %d trips and user %s had %d trips" %
(aug_1_tq.startTs, aug_1_tq.endTs, test_user_id, len(ct_df), test_user_id_2, len(ct_df_2)))
cs_df = ts.get_data_df("analysis/cleaned_section", time_query=None)
len(cs_df)
cs_df.columns
cs_df[["start_loc", "end_loc", "start_ts", "end_ts", "sensed_mode"]]
import emission.core.get_database as edb
edb.get_timeseries_db().find_one()
edb.get_timeseries_db().distinct("metadata.key")
edb.get_analysis_timeseries_db().distinct("metadata.key")
edb.get_analysis_timeseries_db().find({"user_id": test_user_id, "metadata.key": "analysis/cleaned_trip"}).count()
edb.get_uuid_db().distinct("uuid")
ct_df[["start_place", "end_place"]]
import emission.storage.decorations.timeline as esdl
trip_start_end_fuzz = 10 # seconds
ct_df = ts.get_data_df("analysis/cleaned_trip", time_query=None)
tl = esdl.get_cleaned_timeline(test_user_id, ct_df.iloc[0].start_ts - trip_start_end_fuzz, ct_df.iloc[-1].end_ts + trip_start_end_fuzz)
for e in tl:
if 'enter_ts' in e.data:
# Must be place-like
print(e.metadata.key, e.data.enter_fmt_time, "->", e.data.exit_fmt_time)
else:
print(e.metadata.key, e.data.start_fmt_time, "->", e.data.end_fmt_time)
# The timeline is an iterator, so after it is consumed, it is empty
for e in tl:
if 'enter_ts' in e.data:
# Must be place-like
print(e.metadata.key, e.data.enter_fmt_time, "->", e.data.exit_fmt_time)
else:
print(e.metadata.key, e.data.start_fmt_time, "->", e.data.end_fmt_time)
stl = esdt.get_cleaned_timeline_for_trip(test_user_id, tl.first_trip().get_id())
for e in stl:
print(e.metadata.key)
stl = esdt.get_cleaned_timeline_for_trip(test_user_id, tl.last_trip().get_id())
for e in stl:
print(e.metadata.key)
import emission.analysis.plotting.geojson.geojson_feature_converter as gfc
import emission.analysis.plotting.leaflet_osm.our_plotter as lo
first_trip_for_user = ct_df.iloc[0]
first_trip_start_ts = first_trip_for_user.start_ts
first_trip_end_ts = first_trip_for_user.end_ts
trip_start_end_fuzz = 10 # seconds
trips_geojson_list = gfc.get_geojson_for_ts(test_user_id, first_trip_start_ts-trip_start_end_fuzz, ct_df.iloc[-1].end_ts+trip_start_end_fuzz)
len(trips_geojson_list)
map_list = lo.get_maps_for_geojson_trip_list(trips_geojson_list)
len(map_list)
map_list[0]
map_list[-1]
import branca.element as bre
nrows = 2
ncols = 3
fig = bre.Figure()
for i, m in enumerate(map_list[:6]):
fig.add_subplot(nrows,ncols,i+1).add_child(m)
fig
nrows = 2
ncols = 3
fig = bre.Figure()
for i, map in enumerate(map_list[-6:]):
fig.add_subplot(nrows,ncols,i+1).add_child(map)
fig
all_locs = ts.get_data_df("background/location",
time_query = esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
filtered_locs = ts.get_data_df("background/filtered_location",
time_query = esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
resampled_locs = ts.get_data_df("analysis/recreated_location",
time_query = esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
print("Locations go from all = %d -> filtered = %d -> resampled = %d" % (len(all_locs),
len(filtered_locs),
len(resampled_locs)))
all_locs[["_id", "latitude", "longitude", "fmt_time"]]
all_activity = ts.get_data_df("background/motion_activity",
esda.get_time_query_for_trip_like(
"analysis/cleaned_section", ct_df.iloc[0]._id))
all_activity.columns
import emission.core.wrapper.motionactivity as ecwm
print("Found %d walking entries, %d on_foot entries" % (len(all_activity[all_activity.type == ecwm.MotionTypes.WALKING.value]),
len(all_activity[all_activity.type == ecwm.MotionTypes.ON_FOOT.value])))
print("Found %d motorized entries" % (len(all_activity[all_activity.type == ecwm.MotionTypes.IN_VEHICLE.value])))
map_list = lo.get_maps_for_geojson_unsectioned([gfc.get_feature_list_from_df(all_locs),
gfc.get_feature_list_from_df(filtered_locs),
gfc.get_feature_list_from_df(resampled_locs)])
fig = bre.Figure()
for i, map in enumerate(map_list):
fig.add_subplot(1,3,i+1).add_child(map)
fig
| 0.263979 | 0.884039 |
# Dimensionality Reduction
## Reducing the number of degrees of freedom, overarching view
Many Machine Learning problems involve thousands or even millions of
features for each training instance. Not only does this make training
extremely slow, it can also make it much harder to find a good
solution, as we will see. This problem is often referred to as the
curse of dimensionality. Fortunately, in real-world problems, it is
often possible to reduce the number of features considerably, turning
an intractable problem into a tractable one.
Here we will discuss some of the most popular dimensionality reduction
techniques: the principal component analysis (PCA), Kernel PCA, and
Locally Linear Embedding (LLE). Furthermore, we will start by looking
at some simple preprocessing of the data which allow us to rescale the
data.
Principal component analysis and its various variants deal with the
problem of fitting a low-dimensional [affine
subspace](https://en.wikipedia.org/wiki/Affine_space) to a set of of
data points in a high-dimensional space. With its family of methods it
is one of the most used tools in data modeling, compression and
visualization.
## Preprocessing our data
Before we proceed however, we will discuss how to preprocess our
data. Till now and in connection with our previous examples we have
not met so many cases where we are too sensitive to the scaling of our
data. Normally the data may need a rescaling and/or may be sensitive
to extreme values. Scaling the data renders our inputs much more
suitable for the algorithms we want to employ.
**Scikit-Learn** has several functions which allow us to rescale the
data, normally resulting in much better results in terms of various
accuracy scores. The **StandardScaler** function in **Scikit-Learn**
ensures that for each feature/predictor we study the mean value is
zero and the variance is one (every column in the design/feature
matrix). This scaling has the drawback that it does not ensure that
we have a particular maximum or minimum in our data set. Another
function included in **Scikit-Learn** is the **MinMaxScaler** which
ensures that all features are exactly between $0$ and $1$. The
## More preprocessing
The **Normalizer** scales each data
point such that the feature vector has a euclidean length of one. In other words, it
projects a data point on the circle (or sphere in the case of higher dimensions) with a
radius of 1. This means every data point is scaled by a different number (by the
inverse of it’s length).
This normalization is often used when only the direction (or angle) of the data matters,
not the length of the feature vector.
The **RobustScaler** works similarly to the StandardScaler in that it
ensures statistical properties for each feature that guarantee that
they are on the same scale. However, the RobustScaler uses the median
and quartiles, instead of mean and variance. This makes the
RobustScaler ignore data points that are very different from the rest
(like measurement errors). These odd data points are also called
outliers, and might often lead to trouble for other scaling
techniques.
## Simple preprocessing examples, Franke function and regression
```
%matplotlib inline
# Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.linear_model as skl
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer
from sklearn.svm import SVR
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 5
N = 1000
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
# split in training and test data
X_train, X_test, y_train, y_test = train_test_split(X,z,test_size=0.2)
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train, y_train)
# The mean squared error and R2 score
print("MSE before scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test), y_test)))
print("R2 score before scaling {:.2f}".format(svm.score(X_test,y_test)))
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("Feature min values before scaling:\n {}".format(X_train.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train.max(axis=0)))
print("Feature min values after scaling:\n {}".format(X_train_scaled.min(axis=0)))
print("Feature max values after scaling:\n {}".format(X_train_scaled.max(axis=0)))
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train_scaled, y_train)
print("MSE after scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test_scaled), y_test)))
print("R2 score for scaled data: {:.2f}".format(svm.score(X_test_scaled,y_test)))
```
## Simple preprocessing examples, breast cancer data and classification, Support Vector Machines
We show here how we can use a simple regression case on the breast
cancer data using support vector machines (SVM) as algorithm for
classification.
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
svm = SVC(C=100)
svm.fit(X_train, y_train)
print("Test set accuracy: {:.2f}".format(svm.score(X_test,y_test)))
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("Feature min values before scaling:\n {}".format(X_train.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train.max(axis=0)))
print("Feature min values before scaling:\n {}".format(X_train_scaled.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train_scaled.max(axis=0)))
svm.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data with Min-Max scaling: {:.2f}".format(svm.score(X_test_scaled,y_test)))
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
svm.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data with Standar Scaler: {:.2f}".format(svm.score(X_test_scaled,y_test)))
```
## More on Cancer Data, now with Logistic Regression
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
# Set up training data
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Test set accuracy: {:.2f}".format(logreg.score(X_test,y_test)))
# Scale data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
```
## Why should we think of reducing the dimensionality
In addition to the plot of the features, we study now also the covariance (and the correlation matrix).
We use also **Pandas** to compute the correlation matrix.
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
import pandas as pd
# Making a data frame
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
fig, axes = plt.subplots(15,2,figsize=(10,20))
malignant = cancer.data[cancer.target == 0]
benign = cancer.data[cancer.target == 1]
ax = axes.ravel()
for i in range(30):
_, bins = np.histogram(cancer.data[:,i], bins =50)
ax[i].hist(malignant[:,i], bins = bins, alpha = 0.5)
ax[i].hist(benign[:,i], bins = bins, alpha = 0.5)
ax[i].set_title(cancer.feature_names[i])
ax[i].set_yticks(())
ax[0].set_xlabel("Feature magnitude")
ax[0].set_ylabel("Frequency")
ax[0].legend(["Malignant", "Benign"], loc ="best")
fig.tight_layout()
plt.show()
import seaborn as sns
correlation_matrix = cancerpd.corr().round(1)
# use the heatmap function from seaborn to plot the correlation matrix
# annot = True to print the values inside the square
sns.heatmap(data=correlation_matrix, annot=True)
plt.show()
#print eigvalues of correlation matrix
EigValues, EigVectors = np.linalg.eig(correlation_matrix)
print(EigValues)
```
In the above example we note two things. In the first plot we display
the overlap of benign and malignant tumors as functions of the various
features in the Wisconsing breast cancer data set. We see that for
some of the features we can distinguish clearly the benign and
malignant cases while for other features we cannot. This can point to
us which features may be of greater interest when we wish to classify
a benign or not benign tumour.
In the second figure we have computed the so-called correlation
matrix, which in our case with thirty features becomes a $30\times 30$
matrix.
We constructed this matrix using **pandas** via the statements
```
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
```
and then
```
correlation_matrix = cancerpd.corr().round(1)
```
Diagonalizing this matrix we can in turn say something about which
features are of relevance and which are not. But before we proceed we
need to define covariance and correlation matrices. This leads us to
the classical Principal Component Analysis (PCA) theorem with
applications.
## Basic ideas of the Principal Component Analysis (PCA)
The principal component analysis deals with the problem of fitting a
low-dimensional affine subspace $S$ of dimension $d$ much smaller than
the totaldimension $D$ of the problem at hand (our data
set). Mathematically it can be formulated as a statistical problem or
a geometric problem. In our discussion of the theorem for the
classical PCA, we will stay with a statistical approach. This is also
what set the scene historically which for the PCA.
We have a data set defined by a design/feature matrix $\boldsymbol{X}$ (see below for its definition)
* Each data point is determined by $p$ extrinsic (measurement) variables
* We may want to ask the following question: Are there fewer intrinsic variables (say $d << p$) that still approximately describe the data?
* If so, these intrinsic variables may tell us something important and finding these intrinsic variables is what dimension reduction methods do.
## Introducing the Covariance and Correlation functions
Before we discuss the PCA theorem, we need to remind ourselves about
the definition of the covariance and the correlation function. These are quantities
Suppose we have defined two vectors
$\hat{x}$ and $\hat{y}$ with $n$ elements each. The covariance matrix $\boldsymbol{C}$ is defined as
$$
\boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{cov}[\boldsymbol{x},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\
\mathrm{cov}[\boldsymbol{y},\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{y},\boldsymbol{y}] \\
\end{bmatrix},
$$
where for example
$$
\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] =\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})(y_i- \overline{y}).
$$
With this definition and recalling that the variance is defined as
$$
\mathrm{var}[\boldsymbol{x}]=\frac{1}{n} \sum_{i=0}^{n-1}(x_i- \overline{x})^2,
$$
we can rewrite the covariance matrix as
$$
\boldsymbol{C}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} \mathrm{var}[\boldsymbol{x}] & \mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] \\
\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}] & \mathrm{var}[\boldsymbol{y}] \\
\end{bmatrix}.
$$
The covariance takes values between zero and infinity and may thus
lead to problems with loss of numerical precision for particularly
large values. It is common to scale the covariance matrix by
introducing instead the correlation matrix defined via the so-called
correlation function
$$
\mathrm{corr}[\boldsymbol{x},\boldsymbol{y}]=\frac{\mathrm{cov}[\boldsymbol{x},\boldsymbol{y}]}{\sqrt{\mathrm{var}[\boldsymbol{x}] \mathrm{var}[\boldsymbol{y}]}}.
$$
The correlation function is then given by values $\mathrm{corr}[\boldsymbol{x},\boldsymbol{y}]
\in [-1,1]$. This avoids eventual problems with too large values. We
can then define the correlation matrix for the two vectors $\boldsymbol{x}$
and $\boldsymbol{y}$ as
$$
\boldsymbol{K}[\boldsymbol{x},\boldsymbol{y}] = \begin{bmatrix} 1 & \mathrm{corr}[\boldsymbol{x},\boldsymbol{y}] \\
\mathrm{corr}[\boldsymbol{y},\boldsymbol{x}] & 1 \\
\end{bmatrix},
$$
In the above example this is the function we constructed using **pandas**.
## Correlation Function and Design/Feature Matrix
In our derivation of the various regression algorithms like **Ordinary Least Squares** or **Ridge regression**
we defined the design/feature matrix $\boldsymbol{X}$ as
$$
\boldsymbol{X}=\begin{bmatrix}
x_{0,0} & x_{0,1} & x_{0,2}& \dots & \dots x_{0,p-1}\\
x_{1,0} & x_{1,1} & x_{1,2}& \dots & \dots x_{1,p-1}\\
x_{2,0} & x_{2,1} & x_{2,2}& \dots & \dots x_{2,p-1}\\
\dots & \dots & \dots & \dots \dots & \dots \\
x_{n-2,0} & x_{n-2,1} & x_{n-2,2}& \dots & \dots x_{n-2,p-1}\\
x_{n-1,0} & x_{n-1,1} & x_{n-1,2}& \dots & \dots x_{n-1,p-1}\\
\end{bmatrix},
$$
with $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$, with the predictors/features $p$ refering to the column numbers and the
entries $n$ being the row elements.
We can rewrite the design/feature matrix in terms of its column vectors as
$$
\boldsymbol{X}=\begin{bmatrix} \boldsymbol{x}_0 & \boldsymbol{x}_1 & \boldsymbol{x}_2 & \dots & \dots & \boldsymbol{x}_{p-1}\end{bmatrix},
$$
with a given vector
$$
\boldsymbol{x}_i^T = \begin{bmatrix}x_{0,i} & x_{1,i} & x_{2,i}& \dots & \dots x_{n-1,i}\end{bmatrix}.
$$
With these definitions, we can now rewrite our $2\times 2$
correaltion/covariance matrix in terms of a moe general design/feature
matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$. This leads to a $p\times p$
covariance matrix for the vectors $\boldsymbol{x}_i$ with $i=0,1,\dots,p-1$
$$
\boldsymbol{C}[\boldsymbol{x}] = \begin{bmatrix}
\mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\
\mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\
\mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_1] & \mathrm{var}[\boldsymbol{x}_2] & \dots & \dots & \mathrm{cov}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\
\dots & \dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots & \dots \\
\mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{cov}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & \mathrm{var}[\boldsymbol{x}_{p-1}]\\
\end{bmatrix},
$$
and the correlation matrix
$$
\boldsymbol{K}[\boldsymbol{x}] = \begin{bmatrix}
1 & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_0,\boldsymbol{x}_{p-1}]\\
\mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_0] & 1 & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_2] & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_1,\boldsymbol{x}_{p-1}]\\
\mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_1] & 1 & \dots & \dots & \mathrm{corr}[\boldsymbol{x}_2,\boldsymbol{x}_{p-1}]\\
\dots & \dots & \dots & \dots & \dots & \dots \\
\dots & \dots & \dots & \dots & \dots & \dots \\
\mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_0] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_1] & \mathrm{corr}[\boldsymbol{x}_{p-1},\boldsymbol{x}_{2}] & \dots & \dots & 1\\
\end{bmatrix},
$$
## Covariance Matrix Examples
The Numpy function **np.cov** calculates the covariance elements using
the factor $1/(n-1)$ instead of $1/n$ since it assumes we do not have
the exact mean values. The following simple function uses the
**np.vstack** function which takes each vector of dimension $1\times n$
and produces a $2\times n$ matrix $\boldsymbol{W}$
$$
\boldsymbol{W} = \begin{bmatrix} x_0 & y_0 \\
x_1 & y_1 \\
x_2 & y_2\\
\dots & \dots \\
x_{n-2} & y_{n-2}\\
x_{n-1} & y_{n-1} &
\end{bmatrix},
$$
which in turn is converted into into the $2\times 2$ covariance matrix
$\boldsymbol{C}$ via the Numpy function **np.cov()**. We note that we can also calculate
the mean value of each set of samples $\boldsymbol{x}$ etc using the Numpy
function **np.mean(x)**. We can also extract the eigenvalues of the
covariance matrix through the **np.linalg.eig()** function.
```
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
W = np.vstack((x, y))
C = np.cov(W)
print(C)
```
## Correlation Matrix
The previous example can be converted into the correlation matrix by
simply scaling the matrix elements with the variances. We should also
subtract the mean values for each column. This leads to the following
code which sets up the correlations matrix for the previous example in
a more brute force way. Here we scale the mean values for each column of the design matrix, calculate the relevant mean values and variances and then finally set up the $2\times 2$ correlation matrix (since we have only two vectors).
```
import numpy as np
n = 100
# define two vectors
x = np.random.random(size=n)
y = 4+3*x+np.random.normal(size=n)
#scaling the x and y vectors
x = x - np.mean(x)
y = y - np.mean(y)
variance_x = np.sum(x@x)/n
variance_y = np.sum(y@y)/n
print(variance_x)
print(variance_y)
cov_xy = np.sum(x@y)/n
cov_xx = np.sum(x@x)/n
cov_yy = np.sum(y@y)/n
C = np.zeros((2,2))
C[0,0]= cov_xx/variance_x
C[1,1]= cov_yy/variance_y
C[0,1]= cov_xy/np.sqrt(variance_y*variance_x)
C[1,0]= C[0,1]
print(C)
```
We see that the matrix elements along the diagonal are one as they
should be and that the matrix is symmetric. Furthermore, diagonalizing
this matrix we easily see that it is a positive definite matrix.
The above procedure with **numpy** can be made more compact if we use **pandas**.
## Correlation Matrix with Pandas
We whow here how we can set up the correlation matrix using **pandas**, as done in this simple code
```
import numpy as np
import pandas as pd
n = 10
x = np.random.normal(size=n)
x = x - np.mean(x)
y = 4+3*x+np.random.normal(size=n)
y = y - np.mean(y)
X = (np.vstack((x, y))).T
print(X)
Xpd = pd.DataFrame(X)
print(Xpd)
correlation_matrix = Xpd.corr()
print(correlation_matrix)
```
We expand this model to the Franke function discussed above.
## Correlation Matrix with Pandas and the Franke function
```
# Common imports
import numpy as np
import pandas as pd
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 4
N = 100
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
Xpd = pd.DataFrame(X)
# subtract the mean values and set up the covariance matrix
Xpd = Xpd - Xpd.mean()
covariance_matrix = Xpd.cov()
print(covariance_matrix)
```
We note here that the covariance is zero for the first rows and
columns since all matrix elements in the design matrix were set to one
(we are fitting the function in terms of a polynomial of degree $n$).
This means that the variance for these elements will be zero and will
cause problems when we set up the correlation matrix. We can simply
drop these elements and construct a correlation
matrix without these elements.
## Rewriting the Covariance and/or Correlation Matrix
We can rewrite the covariance matrix in a more compact form in terms of the design/feature matrix $\boldsymbol{X}$ as
$$
\boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T= \mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T].
$$
To see this let us simply look at a design matrix $\boldsymbol{X}\in {\mathbb{R}}^{2\times 2}$
$$
\boldsymbol{X}=\begin{bmatrix}
x_{00} & x_{01}\\
x_{10} & x_{11}\\
\end{bmatrix}=\begin{bmatrix}
\boldsymbol{x}_{0} & \boldsymbol{x}_{1}\\
\end{bmatrix}.
$$
If we then compute the expectation value
$$
\mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T=\begin{bmatrix}
x_{00}^2+x_{01}^2 & x_{00}x_{10}+x_{01}x_{11}\\
x_{10}x_{00}+x_{11}x_{01} & x_{10}^2+x_{11}^2\\
\end{bmatrix},
$$
which is just
$$
\boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]=\begin{bmatrix} \mathrm{var}[\boldsymbol{x}_0] & \mathrm{cov}[\boldsymbol{x}_0,\boldsymbol{x}_1] \\
\mathrm{cov}[\boldsymbol{x}_1,\boldsymbol{x}_0] & \mathrm{var}[\boldsymbol{x}_1] \\
\end{bmatrix},
$$
where we wrote $$\boldsymbol{C}[\boldsymbol{x}_0,\boldsymbol{x}_1] = \boldsymbol{C}[\boldsymbol{x}]$$ to indicate that this the covariance of the vectors $\boldsymbol{x}$ of the design/feature matrix $\boldsymbol{X}$.
It is easy to generalize this to a matrix $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$.
## Towards the PCA theorem
We have that the covariance matrix (the correlation matrix involves a simple rescaling) is given as
$$
\boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T= \mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T].
$$
Let us now assume that we can perform a series of orthogonal transformations where we employ some orthogonal matrices $\boldsymbol{S}$.
These matrices are defined as $\boldsymbol{S}\in {\mathbb{R}}^{p\times p}$ and obey the orthogonality requirements $\boldsymbol{S}\boldsymbol{S}^T=\boldsymbol{S}^T\boldsymbol{S}=\boldsymbol{I}$. The matrix can be written out in terms of the column vectors $\boldsymbol{s}_i$ as $\boldsymbol{S}=[\boldsymbol{s}_0,\boldsymbol{s}_1,\dots,\boldsymbol{s}_{p-1}]$ and $\boldsymbol{s}_i \in {\mathbb{R}}^{p}$.
Assume also that there is a transformation $\boldsymbol{S}\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T=\boldsymbol{C}[\boldsymbol{y}]$ such that the new matrix $\boldsymbol{C}[\boldsymbol{y}]$ is diagonal with elements $[\lambda_0,\lambda_1,\lambda_2,\dots,\lambda_{p-1}]$.
That is we have
$$
\boldsymbol{C}[\boldsymbol{y}] = \mathbb{E}[\boldsymbol{S}\boldsymbol{X}\boldsymbol{X}^T\boldsymbol{S}^T]=\boldsymbol{S}\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T,
$$
since the matrix $\boldsymbol{S}$ is not a data dependent matrix. Multiplying with $\boldsymbol{S}^T$ from the left we have
$$
\boldsymbol{S}^T\boldsymbol{C}[\boldsymbol{y}] = \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T,
$$
and since $\boldsymbol{C}[\boldsymbol{y}]$ is diagonal we have for a given eigenvalue $i$ of the covariance matrix that
$$
\boldsymbol{S}^T_i\lambda_i = \boldsymbol{C}[\boldsymbol{x}]\boldsymbol{S}^T_i.
$$
In the derivation of the PCA theorem we will assume that the eigenvalues are ordered in descending order, that is
$\lambda_0 > \lambda_1 > \dots > \lambda_{p-1}$.
The eigenvalues tell us then how much we need to stretch the
corresponding eigenvectors. Dimensions with large eigenvalues have
thus large variations (large variance) and define therefore useful
dimensions. The data points are more spread out in the direction of
these eigenvectors. Smaller eigenvalues mean on the other hand that
the corresponding eigenvectors are shrunk accordingly and the data
points are tightly bunched together and there is not much variation in
these specific directions. Hopefully then we could leave it out
dimensions where the eigenvalues are very small. If $p$ is very large,
we could then aim at reducing $p$ to $l << p$ and handle only $l$
features/predictors.
## The Algorithm before theorem
Here's how we would proceed in setting up the algorithm for the PCA, see also discussion below here.
* Set up the datapoints for the design/feature matrix $\boldsymbol{X}$ with $\boldsymbol{X}\in {\mathbb{R}}^{n\times p}$, with the predictors/features $p$ referring to the column numbers and the entries $n$ being the row elements.
$$
\boldsymbol{X}=\begin{bmatrix}
x_{0,0} & x_{0,1} & x_{0,2}& \dots & \dots x_{0,p-1}\\
x_{1,0} & x_{1,1} & x_{1,2}& \dots & \dots x_{1,p-1}\\
x_{2,0} & x_{2,1} & x_{2,2}& \dots & \dots x_{2,p-1}\\
\dots & \dots & \dots & \dots \dots & \dots \\
x_{n-2,0} & x_{n-2,1} & x_{n-2,2}& \dots & \dots x_{n-2,p-1}\\
x_{n-1,0} & x_{n-1,1} & x_{n-1,2}& \dots & \dots x_{n-1,p-1}\\
\end{bmatrix},
$$
* Center the data by subtracting the mean value for each column. This leads to a new matrix $\boldsymbol{X}\rightarrow \overline{\boldsymbol{X}}$.
* Compute then the covariance/correlation matrix $\mathbb{E}[\overline{\boldsymbol{X}}\overline{\boldsymbol{X}}^T]$.
* Find the eigenpairs of $\boldsymbol{C}$ with eigenvalues $[\lambda_0,\lambda_1,\dots,\lambda_{p-1}]$ and eigenvectors $[\boldsymbol{s}_0,\boldsymbol{s}_1,\dots,\boldsymbol{s}_{p-1}]$.
* Order the eigenvalue (and the eigenvectors accordingly) in order of decreasing eigenvalues.
* Keep only those $l$ eigenvalues larger than a selected threshold value, discarding thus $p-l$ features since we expect small variations in the data here.
## Writing our own PCA code
We will use a simple example first with two-dimensional data
drawn from a multivariate normal distribution with the following mean and covariance matrix:
$$
\mu = (-1,2) \qquad \Sigma = \begin{bmatrix} 4 & 2 \\
2 & 2
\end{bmatrix}
$$
Note that the mean refers to each column of data.
We will generate $n = 1000$ points $X = \{ x_1, \ldots, x_N \}$ from
this distribution, and store them in the $1000 \times 2$ matrix $\boldsymbol{X}$.
The following Python code aids in setting up the data and writing out the design matrix.
Note that the function **multivariate** returns also the covariance discussed above and that it is defined by dividing by $n-1$ instead of $n$.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
n = 10000
mean = (-1, 2)
cov = [[4, 2], [2, 2]]
X = np.random.multivariate_normal(mean, cov, n)
```
Now we are going to implement the PCA algorithm. We will break it down into various substeps.
### Compute the sample mean and center the data
The first step of PCA is to compute the sample mean of the data and use it to center the data. Recall that the sample mean is
$$
\mu_n = \frac{1}{n} \sum_{i=1}^n x_i
$$
and the mean-centered data $\bar{X} = \{ \bar{x}_1, \ldots, \bar{x}_n \}$ takes the form
$$
\bar{x}_i = x_i - \mu_n.
$$
When you are done with these steps, print out $\mu_n$ to verify it is
close to $\mu$ and plot your mean centered data to verify it is
centered at the origin! Compare your code with the functionality from **Scikit-Learn** discussed above.
The following code elements perform these operations using **pandas** or using our own functionality for doing so. The latter, using **numpy** is rather simple through the **mean()** function.
```
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
# we center it ourselves
X_centered = X - X.mean(axis=0)
```
Alternatively, we could use the functions we discussed
earlier for scaling the data set. That is, we could have used the
**StandardScaler** function in **Scikit-Learn**, a function which ensures
that for each feature/predictor we study the mean value is zero and
the variance is one (every column in the design/feature matrix). You
would then not get the same results, since we divide by the
variance. The diagonal covariance matrix elements will then be one,
while the non-diagonal ones need to be divided by $2\sqrt{2}$ for our
specific case.
### Compute the sample covariance
Now we are going to use the mean centered data to compute the sample covariance of the data by using the following equation
$$
\Sigma_n = \frac{1}{n-1} \sum_{i=1}^n \bar{x}_i^T \bar{x}_i = \frac{1}{n-1} \sum_{i=1}^n (x_i - \mu_n)^T (x_i - \mu_n)
$$
where the data points $x_i \in \mathbb{R}^p$ (here in this example $p = 2$) are column vectors and $x^T$ is the transpose of $x$.
We can write our own code or simply use either the functionaly of **numpy** or that of **pandas**, as follows
```
print(df.cov())
print(np.cov(X_centered.T))
```
Note that the way we define the covariance matrix here has a factor $n-1$ instead of $n$. This is included in the **cov()** function by **numpy** and **pandas**.
Our own code here is not very elegant and asks for obvious improvements. It is tailored to this specific $2\times 2$ covariance matrix.
```
# extract the relevant columns from the centered design matrix of dim n x 2
x = X_centered[:,0]
y = X_centered[:,1]
Cov = np.zeros((2,2))
Cov[0,1] = np.sum(x.T@y)/(n-1.0)
Cov[0,0] = np.sum(x.T@x)/(n-1.0)
Cov[1,1] = np.sum(y.T@y)/(n-1.0)
Cov[1,0]= Cov[0,1]
print("Centered covariance using own code")
print(Cov)
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
```
Depending on the number of points $n$, we will get results that are close to the covariance values defined above.
The plot shows how the data are clustered around a line with slope close to one. Is this expected?
### Diagonalize the sample covariance matrix to obtain the principal components
Now we are ready to solve for the principal components! To do so we
diagonalize the sample covariance matrix $\Sigma$. We can use the
function **np.linalg.eig** to do so. It will return the eigenvalues and
eigenvectors of $\Sigma$. Once we have these we can perform the
following tasks:
* We compute the percentage of the total variance captured by the first principal component
* We plot the mean centered data and lines along the first and second principal components
* Then we project the mean centered data onto the first and second principal components, and plot the projected data.
* Finally, we approximate the data as
$$
x_i \approx \tilde{x}_i = \mu_n + \langle x_i, v_0 \rangle v_0
$$
where $v_0$ is the first principal component.
Collecting all these steps we can write our own PCA function and
compare this with the functionality included in **Scikit-Learn**.
The code here outlines some of the elements we could include in the
analysis. Feel free to extend upon this in order to address the above
questions.
```
# diagonalize and obtain eigenvalues, not necessarily sorted
EigValues, EigVectors = np.linalg.eig(Cov)
# sort eigenvectors and eigenvalues
#permute = EigValues.argsort()
#EigValues = EigValues[permute]
#EigVectors = EigVectors[:,permute]
print("Eigenvalues of Covariance matrix")
for i in range(2):
print(EigValues[i])
FirstEigvector = EigVectors[:,0]
SecondEigvector = EigVectors[:,1]
print("First eigenvector")
print(FirstEigvector)
print("Second eigenvector")
print(SecondEigvector)
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2Dsl = pca.fit_transform(X)
print("Eigenvector of largest eigenvalue")
print(pca.components_.T[:, 0])
```
This code does not contain all the above elements, but it shows how we can use **Scikit-Learn** to extract the eigenvector which corresponds to the largest eigenvalue. Try to address the questions we pose before the above code. Try also to change the values of the covariance matrix by making one of the diagonal elements much larger than the other. What do you observe then?
## Classical PCA Theorem
We assume now that we have a design matrix $\boldsymbol{X}$ which has been
centered as discussed above. For the sake of simplicity we skip the
overline symbol. The matrix is defined in terms of the various column
vectors $[\boldsymbol{x}_0,\boldsymbol{x}_1,\dots, \boldsymbol{x}_{p-1}]$ each with dimension
$\boldsymbol{x}\in {\mathbb{R}}^{n}$.
We assume also that we have an orthogonal transformation $\boldsymbol{W}\in {\mathbb{R}}^{p\times p}$. We define the reconstruction error (which is similar to the mean squared error we have seen before) as
$$
J(\boldsymbol{W},\boldsymbol{Z}) = \frac{1}{n}\sum_i (\boldsymbol{x}_i - \overline{\boldsymbol{x}}_i)^2,
$$
with $\overline{\boldsymbol{x}}_i = \boldsymbol{W}\boldsymbol{z}_i$, where $\boldsymbol{z}_i$ is a row vector with dimension ${\mathbb{R}}^{n}$ of the matrix
$\boldsymbol{Z}\in{\mathbb{R}}^{p\times n}$. When doing PCA we want to reduce this dimensionality.
The PCA theorem states that minimizing the above reconstruction error
corresponds to setting $\boldsymbol{W}=\boldsymbol{S}$, the orthogonal matrix which
diagonalizes the empirical covariance(correlation) matrix. The optimal
low-dimensional encoding of the data is then given by a set of vectors
$\boldsymbol{z}_i$ with at most $l$ vectors, with $l << p$, defined by the
orthogonal projection of the data onto the columns spanned by the
eigenvectors of the covariance(correlations matrix).
The proof which follows will be updated by mid January 2020.
## Proof of the PCA Theorem
To show the PCA theorem let us start with the assumption that there is one vector $\boldsymbol{w}_0$ which corresponds to a solution which minimized the reconstruction error $J$. This is an orthogonal vector. It means that we now approximate the reconstruction error in terms of $\boldsymbol{w}_0$ and $\boldsymbol{z}_0$ as
$$
J(\boldsymbol{w}_0,\boldsymbol{z}_0)= \frac{1}{n}\sum_i (\boldsymbol{x}_i - z_{i0}\boldsymbol{w}_0)^2=\frac{1}{n}\sum_i (\boldsymbol{x}_i^T\boldsymbol{x}_i - 2z_{i0}\boldsymbol{w}_0^T\boldsymbol{x}_i+z_{i0}^2\boldsymbol{w}_0^T\boldsymbol{w}_0),
$$
which we can rewrite due to the orthogonality of $\boldsymbol{w}_i$ as
$$
J(\boldsymbol{w}_0,\boldsymbol{z}_0)=\frac{1}{n}\sum_i (\boldsymbol{x}_i^T\boldsymbol{x}_i - 2z_{i0}\boldsymbol{w}_0^T\boldsymbol{x}_i+z_{i0}^2).
$$
Minimizing $J$ with respect to the unknown parameters $z_{0i}$ we obtain that
$$
z_{i0}=\boldsymbol{w}_0^T\boldsymbol{x}_i,
$$
where the vectors on the rhs are known.
## PCA Proof continued
We have now found the unknown parameters $z_{i0}$. These correspond to the projected coordinates and we can write
$$
J(\boldsymbol{w}_0)= \frac{1}{p}\sum_i (\boldsymbol{x}_i^T\boldsymbol{x}_i - z_{i0}^2)=\mathrm{const}-\frac{1}{n}\sum_i z_{i0}^2.
$$
We can show that the variance of the projected coordinates defined by $\boldsymbol{w}_0^T\boldsymbol{x}_i$ are given by
$$
\mathrm{var}[\boldsymbol{w}_0^T\boldsymbol{x}_i] = \frac{1}{n}\sum_i z_{i0}^2,
$$
since the expectation value of
$$
\mathbb{E}[\boldsymbol{w}_0^T\boldsymbol{x}_i] = \mathbb{E}[z_{i0}]= \boldsymbol{w}_0^T\mathbb{E}[\boldsymbol{x}_i]=0,
$$
where we have used the fact that our data are centered.
Recalling our definition of the covariance as
$$
\boldsymbol{C}[\boldsymbol{x}] = \frac{1}{n}\boldsymbol{X}\boldsymbol{X}^T=\mathbb{E}[\boldsymbol{X}\boldsymbol{X}^T],
$$
we have thus that
$$
\mathrm{var}[\boldsymbol{w}_0^T\boldsymbol{x}_i] = \frac{1}{n}\sum_i z_{i0}^2=\boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0.
$$
We are almost there, we have obtained a relation between minimizing
the reconstruction error and the variance and the covariance
matrix. Minimizing the error is equivalent to maximizing the variance
of the projected data.
## The final step
We could trivially maximize the variance of the projection (and
thereby minimize the error in the reconstruction function) by letting
the norm-2 of $\boldsymbol{w}_0$ go to infinity. However, this norm since we
want the matrix $\boldsymbol{W}$ to be an orthogonal matrix, is constrained by
$\vert\vert \boldsymbol{w}_0 \vert\vert_2^2=1$. Imposing this condition via a
Lagrange multiplier we can then in turn maximize
$$
J(\boldsymbol{w}_0)= \boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0+\lambda_0(1-\boldsymbol{w}_0^T\boldsymbol{w}_0).
$$
Taking the derivative with respect to $\boldsymbol{w}_0$ we obtain
$$
\frac{\partial J(\boldsymbol{w}_0)}{\partial \boldsymbol{w}_0}= 2\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0-2\lambda_0\boldsymbol{w}_0=0,
$$
meaning that
$$
\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0=\lambda_0\boldsymbol{w}_0.
$$
**The direction that maximizes the variance (or minimizes the construction error) is an eigenvector of the covariance matrix**! If we left multiply with $\boldsymbol{w}_0^T$ we have the variance of the projected data is
$$
\boldsymbol{w}_0^T\boldsymbol{C}[\boldsymbol{x}]\boldsymbol{w}_0=\lambda_0.
$$
If we want to maximize the variance (minimize the construction error)
we simply pick the eigenvector of the covariance matrix with the
largest eigenvalue. This establishes the link between the minimization
of the reconstruction function $J$ in terms of an orthogonal matrix
and the maximization of the variance and thereby the covariance of our
observations encoded in the design/feature matrix $\boldsymbol{X}$.
The proof
for the other eigenvectors $\boldsymbol{w}_1,\boldsymbol{w}_2,\dots$ can be
established by applying the above arguments and using the fact that
our basis of eigenvectors is orthogonal, see [Murphy chapter
12.2](https://mitpress.mit.edu/books/machine-learning-1). The
discussion in chapter 12.2 of Murphy's text has also a nice link with
the Singular Value Decomposition theorem. For categorical data, see
chapter 12.4 and discussion therein.
Additional part of the proof for the other eigenvectors will be added by mid January 2020.
## Geometric Interpretation and link with Singular Value Decomposition
This material will be added by mid January 2020.
## Principal Component Analysis
Principal Component Analysis (PCA) is by far the most popular dimensionality reduction algorithm.
First it identifies the hyperplane that lies closest to the data, and then it projects the data onto it.
The following Python code uses NumPy’s **svd()** function to obtain all the principal components of the
training set, then extracts the first two principal components. First we center the data using either **pandas** or our own code
```
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 10 x 5 vanilla matrix
rows = 10
cols = 5
X = np.random.randn(rows,cols)
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
display(df)
# we center it ourselves
X_centered = X - X.mean(axis=0)
# Then check the difference between pandas and our own set up
print(X_centered-df)
#Now we do an SVD
U, s, V = np.linalg.svd(X_centered)
c1 = V.T[:, 0]
c2 = V.T[:, 1]
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
print(X2D)
```
PCA assumes that the dataset is centered around the origin. Scikit-Learn’s PCA classes take care of centering
the data for you. However, if you implement PCA yourself (as in the preceding example), or if you use other libraries, don’t
forget to center the data first.
Once you have identified all the principal components, you can reduce the dimensionality of the dataset
down to $d$ dimensions by projecting it onto the hyperplane defined by the first $d$ principal components.
Selecting this hyperplane ensures that the projection will preserve as much variance as possible.
```
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
```
## PCA and scikit-learn
Scikit-Learn’s PCA class implements PCA using SVD decomposition just like we did before. The
following code applies PCA to reduce the dimensionality of the dataset down to two dimensions (note
that it automatically takes care of centering the data):
```
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D = pca.fit_transform(X)
print(X2D)
```
After fitting the PCA transformer to the dataset, you can access the principal components using the
components variable (note that it contains the PCs as horizontal vectors, so, for example, the first
principal component is equal to
```
pca.components_.T[:, 0].
```
Another very useful piece of information is the explained variance ratio of each principal component,
available via the $explained\_variance\_ratio$ variable. It indicates the proportion of the dataset’s
variance that lies along the axis of each principal component.
## Back to the Cancer Data
We can now repeat the above but applied to real data, in this case our breast cancer data.
Here we compute performance scores on the training data using logistic regression.
```
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Train set accuracy from Logistic Regression: {:.2f}".format(logreg.score(X_train,y_train)))
# We scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Then perform again a log reg fit
logreg.fit(X_train_scaled, y_train)
print("Train set accuracy scaled data: {:.2f}".format(logreg.score(X_train_scaled,y_train)))
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D_train = pca.fit_transform(X_train_scaled)
# and finally compute the log reg fit and the score on the training data
logreg.fit(X2D_train,y_train)
print("Train set accuracy scaled and PCA data: {:.2f}".format(logreg.score(X2D_train,y_train)))
```
We see that our training data after the PCA decomposition has a performance similar to the non-scaled data.
## More on the PCA
Instead of arbitrarily choosing the number of dimensions to reduce down to, it is generally preferable to
choose the number of dimensions that add up to a sufficiently large portion of the variance (e.g., 95%).
Unless, of course, you are reducing dimensionality for data visualization — in that case you will
generally want to reduce the dimensionality down to 2 or 3.
The following code computes PCA without reducing dimensionality, then computes the minimum number
of dimensions required to preserve 95% of the training set’s variance:
```
pca = PCA()
pca.fit(X)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) + 1
```
You could then set $n\_components=d$ and run PCA again. However, there is a much better option: instead
of specifying the number of principal components you want to preserve, you can set $n\_components$ to be
a float between 0.0 and 1.0, indicating the ratio of variance you wish to preserve:
```
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(X)
```
## Incremental PCA
One problem with the preceding implementation of PCA is that it requires the whole training set to fit in
memory in order for the SVD algorithm to run. Fortunately, Incremental PCA (IPCA) algorithms have
been developed: you can split the training set into mini-batches and feed an IPCA algorithm one minibatch
at a time. This is useful for large training sets, and also to apply PCA online (i.e., on the fly, as new
instances arrive).
## Randomized PCA
Scikit-Learn offers yet another option to perform PCA, called Randomized PCA. This is a stochastic
algorithm that quickly finds an approximation of the first d principal components. Its computational
complexity is $O(m \times d^2)+O(d^3)$, instead of $O(m \times n^2) + O(n^3)$, so it is dramatically faster than the
previous algorithms when $d$ is much smaller than $n$.
## Kernel PCA
The kernel trick is a mathematical technique that implicitly maps instances into a
very high-dimensional space (called the feature space), enabling nonlinear classification and regression
with Support Vector Machines. Recall that a linear decision boundary in the high-dimensional feature
space corresponds to a complex nonlinear decision boundary in the original space.
It turns out that the same trick can be applied to PCA, making it possible to perform complex nonlinear
projections for dimensionality reduction. This is called Kernel PCA (kPCA). It is often good at
preserving clusters of instances after projection, or sometimes even unrolling datasets that lie close to a
twisted manifold.
For example, the following code uses Scikit-Learn’s KernelPCA class to perform kPCA with an
```
from sklearn.decomposition import KernelPCA
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.04)
X_reduced = rbf_pca.fit_transform(X)
```
## LLE
Locally Linear Embedding (LLE) is another very powerful nonlinear dimensionality reduction
(NLDR) technique. It is a Manifold Learning technique that does not rely on projections like the previous
algorithms. In a nutshell, LLE works by first measuring how each training instance linearly relates to its
closest neighbors (c.n.), and then looking for a low-dimensional representation of the training set where
these local relationships are best preserved (more details shortly).
## Other techniques
There are many other dimensionality reduction techniques, several of which are available in Scikit-Learn.
Here are some of the most popular:
* **Multidimensional Scaling (MDS)** reduces dimensionality while trying to preserve the distances between the instances.
* **Isomap** creates a graph by connecting each instance to its nearest neighbors, then reduces dimensionality while trying to preserve the geodesic distances between the instances.
* **t-Distributed Stochastic Neighbor Embedding** (t-SNE) reduces dimensionality while trying to keep similar instances close and dissimilar instances apart. It is mostly used for visualization, in particular to visualize clusters of instances in high-dimensional space (e.g., to visualize the MNIST images in 2D).
* Linear Discriminant Analysis (LDA) is actually a classification algorithm, but during training it learns the most discriminative axes between the classes, and these axes can then be used to define a hyperplane onto which to project the data. The benefit is that the projection will keep classes as far apart as possible, so LDA is a good technique to reduce dimensionality before running another classification algorithm such as a Support Vector Machine (SVM) classifier discussed in the SVM lectures.
|
github_jupyter
|
%matplotlib inline
# Common imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn.linear_model as skl
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler, Normalizer
from sklearn.svm import SVR
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "DataFiles/"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 5
N = 1000
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
# split in training and test data
X_train, X_test, y_train, y_test = train_test_split(X,z,test_size=0.2)
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train, y_train)
# The mean squared error and R2 score
print("MSE before scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test), y_test)))
print("R2 score before scaling {:.2f}".format(svm.score(X_test,y_test)))
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("Feature min values before scaling:\n {}".format(X_train.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train.max(axis=0)))
print("Feature min values after scaling:\n {}".format(X_train_scaled.min(axis=0)))
print("Feature max values after scaling:\n {}".format(X_train_scaled.max(axis=0)))
svm = SVR(gamma='auto',C=10.0)
svm.fit(X_train_scaled, y_train)
print("MSE after scaling: {:.2f}".format(mean_squared_error(svm.predict(X_test_scaled), y_test)))
print("R2 score for scaled data: {:.2f}".format(svm.score(X_test_scaled,y_test)))
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.svm import SVC
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
print(X_train.shape)
print(X_test.shape)
svm = SVC(C=100)
svm.fit(X_train, y_train)
print("Test set accuracy: {:.2f}".format(svm.score(X_test,y_test)))
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("Feature min values before scaling:\n {}".format(X_train.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train.max(axis=0)))
print("Feature min values before scaling:\n {}".format(X_train_scaled.min(axis=0)))
print("Feature max values before scaling:\n {}".format(X_train_scaled.max(axis=0)))
svm.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data with Min-Max scaling: {:.2f}".format(svm.score(X_test_scaled,y_test)))
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
svm.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data with Standar Scaler: {:.2f}".format(svm.score(X_test_scaled,y_test)))
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
# Set up training data
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Test set accuracy: {:.2f}".format(logreg.score(X_test,y_test)))
# Scale data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
logreg.fit(X_train_scaled, y_train)
print("Test set accuracy scaled data: {:.2f}".format(logreg.score(X_test_scaled,y_test)))
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
import pandas as pd
# Making a data frame
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
fig, axes = plt.subplots(15,2,figsize=(10,20))
malignant = cancer.data[cancer.target == 0]
benign = cancer.data[cancer.target == 1]
ax = axes.ravel()
for i in range(30):
_, bins = np.histogram(cancer.data[:,i], bins =50)
ax[i].hist(malignant[:,i], bins = bins, alpha = 0.5)
ax[i].hist(benign[:,i], bins = bins, alpha = 0.5)
ax[i].set_title(cancer.feature_names[i])
ax[i].set_yticks(())
ax[0].set_xlabel("Feature magnitude")
ax[0].set_ylabel("Frequency")
ax[0].legend(["Malignant", "Benign"], loc ="best")
fig.tight_layout()
plt.show()
import seaborn as sns
correlation_matrix = cancerpd.corr().round(1)
# use the heatmap function from seaborn to plot the correlation matrix
# annot = True to print the values inside the square
sns.heatmap(data=correlation_matrix, annot=True)
plt.show()
#print eigvalues of correlation matrix
EigValues, EigVectors = np.linalg.eig(correlation_matrix)
print(EigValues)
cancerpd = pd.DataFrame(cancer.data, columns=cancer.feature_names)
correlation_matrix = cancerpd.corr().round(1)
# Importing various packages
import numpy as np
n = 100
x = np.random.normal(size=n)
print(np.mean(x))
y = 4+3*x+np.random.normal(size=n)
print(np.mean(y))
W = np.vstack((x, y))
C = np.cov(W)
print(C)
import numpy as np
n = 100
# define two vectors
x = np.random.random(size=n)
y = 4+3*x+np.random.normal(size=n)
#scaling the x and y vectors
x = x - np.mean(x)
y = y - np.mean(y)
variance_x = np.sum(x@x)/n
variance_y = np.sum(y@y)/n
print(variance_x)
print(variance_y)
cov_xy = np.sum(x@y)/n
cov_xx = np.sum(x@x)/n
cov_yy = np.sum(y@y)/n
C = np.zeros((2,2))
C[0,0]= cov_xx/variance_x
C[1,1]= cov_yy/variance_y
C[0,1]= cov_xy/np.sqrt(variance_y*variance_x)
C[1,0]= C[0,1]
print(C)
import numpy as np
import pandas as pd
n = 10
x = np.random.normal(size=n)
x = x - np.mean(x)
y = 4+3*x+np.random.normal(size=n)
y = y - np.mean(y)
X = (np.vstack((x, y))).T
print(X)
Xpd = pd.DataFrame(X)
print(Xpd)
correlation_matrix = Xpd.corr()
print(correlation_matrix)
# Common imports
import numpy as np
import pandas as pd
def FrankeFunction(x,y):
term1 = 0.75*np.exp(-(0.25*(9*x-2)**2) - 0.25*((9*y-2)**2))
term2 = 0.75*np.exp(-((9*x+1)**2)/49.0 - 0.1*(9*y+1))
term3 = 0.5*np.exp(-(9*x-7)**2/4.0 - 0.25*((9*y-3)**2))
term4 = -0.2*np.exp(-(9*x-4)**2 - (9*y-7)**2)
return term1 + term2 + term3 + term4
def create_X(x, y, n ):
if len(x.shape) > 1:
x = np.ravel(x)
y = np.ravel(y)
N = len(x)
l = int((n+1)*(n+2)/2) # Number of elements in beta
X = np.ones((N,l))
for i in range(1,n+1):
q = int((i)*(i+1)/2)
for k in range(i+1):
X[:,q+k] = (x**(i-k))*(y**k)
return X
# Making meshgrid of datapoints and compute Franke's function
n = 4
N = 100
x = np.sort(np.random.uniform(0, 1, N))
y = np.sort(np.random.uniform(0, 1, N))
z = FrankeFunction(x, y)
X = create_X(x, y, n=n)
Xpd = pd.DataFrame(X)
# subtract the mean values and set up the covariance matrix
Xpd = Xpd - Xpd.mean()
covariance_matrix = Xpd.cov()
print(covariance_matrix)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
n = 10000
mean = (-1, 2)
cov = [[4, 2], [2, 2]]
X = np.random.multivariate_normal(mean, cov, n)
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
# we center it ourselves
X_centered = X - X.mean(axis=0)
print(df.cov())
print(np.cov(X_centered.T))
# extract the relevant columns from the centered design matrix of dim n x 2
x = X_centered[:,0]
y = X_centered[:,1]
Cov = np.zeros((2,2))
Cov[0,1] = np.sum(x.T@y)/(n-1.0)
Cov[0,0] = np.sum(x.T@x)/(n-1.0)
Cov[1,1] = np.sum(y.T@y)/(n-1.0)
Cov[1,0]= Cov[0,1]
print("Centered covariance using own code")
print(Cov)
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
# diagonalize and obtain eigenvalues, not necessarily sorted
EigValues, EigVectors = np.linalg.eig(Cov)
# sort eigenvectors and eigenvalues
#permute = EigValues.argsort()
#EigValues = EigValues[permute]
#EigVectors = EigVectors[:,permute]
print("Eigenvalues of Covariance matrix")
for i in range(2):
print(EigValues[i])
FirstEigvector = EigVectors[:,0]
SecondEigvector = EigVectors[:,1]
print("First eigenvector")
print(FirstEigvector)
print("Second eigenvector")
print(SecondEigvector)
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2Dsl = pca.fit_transform(X)
print("Eigenvector of largest eigenvalue")
print(pca.components_.T[:, 0])
import numpy as np
import pandas as pd
from IPython.display import display
np.random.seed(100)
# setting up a 10 x 5 vanilla matrix
rows = 10
cols = 5
X = np.random.randn(rows,cols)
df = pd.DataFrame(X)
# Pandas does the centering for us
df = df -df.mean()
display(df)
# we center it ourselves
X_centered = X - X.mean(axis=0)
# Then check the difference between pandas and our own set up
print(X_centered-df)
#Now we do an SVD
U, s, V = np.linalg.svd(X_centered)
c1 = V.T[:, 0]
c2 = V.T[:, 1]
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
print(X2D)
W2 = V.T[:, :2]
X2D = X_centered.dot(W2)
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D = pca.fit_transform(X)
print(X2D)
pca.components_.T[:, 0].
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.linear_model import LogisticRegression
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data,cancer.target,random_state=0)
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
print("Train set accuracy from Logistic Regression: {:.2f}".format(logreg.score(X_train,y_train)))
# We scale the data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Then perform again a log reg fit
logreg.fit(X_train_scaled, y_train)
print("Train set accuracy scaled data: {:.2f}".format(logreg.score(X_train_scaled,y_train)))
#thereafter we do a PCA with Scikit-learn
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
X2D_train = pca.fit_transform(X_train_scaled)
# and finally compute the log reg fit and the score on the training data
logreg.fit(X2D_train,y_train)
print("Train set accuracy scaled and PCA data: {:.2f}".format(logreg.score(X2D_train,y_train)))
pca = PCA()
pca.fit(X)
cumsum = np.cumsum(pca.explained_variance_ratio_)
d = np.argmax(cumsum >= 0.95) + 1
pca = PCA(n_components=0.95)
X_reduced = pca.fit_transform(X)
from sklearn.decomposition import KernelPCA
rbf_pca = KernelPCA(n_components = 2, kernel="rbf", gamma=0.04)
X_reduced = rbf_pca.fit_transform(X)
| 0.628179 | 0.981524 |
# WeakAlign demo notebook
This notebook shows how to run a trained model on a given image pair
## Imports
```
from __future__ import print_function, division
import os
from os.path import exists
import argparse
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from model.cnn_geometric_model import CNNGeometric, TwoStageCNNGeometric
from data.pf_dataset import PFDataset
from data.download_datasets import download_PF_pascal
from image.normalization import NormalizeImageDict, normalize_image
from util.torch_util import BatchTensorToVars, str_to_bool
from geotnf.transformation import GeometricTnf
from geotnf.point_tnf import *
import matplotlib.pyplot as plt
from skimage import io
import warnings
from torchvision.transforms import Normalize
from collections import OrderedDict
import torch.nn.functional as F
import glob
from PIL import Image
warnings.filterwarnings('ignore')
from model.loss import TransformedGridLoss, WeakInlierCount, TwoStageWeakInlierCount
```
## Parameters
```
# Select one of the following models:
# cnngeo_vgg16, cnngeo_resnet101, proposed_resnet101
model_selection = 'proposed_resnet101'
model_aff_path = ''
model_tps_path = ''
model_aff_tps_path = ''
if model_selection=='cnngeo_vgg16':
model_aff_path = 'trained_models/trained_models/cnngeo_vgg16_affine.pth.tar'
model_tps_path = 'trained_models/trained_models/cnngeo_vgg16_tps.pth.tar'
feature_extraction_cnn = 'vgg'
elif model_selection=='cnngeo_resnet101':
model_aff_path = 'trained_models/trained_models/cnngeo_resnet101_affine.pth.tar'
model_tps_path = 'trained_models/trained_models/cnngeo_resnet101_tps.pth.tar'
feature_extraction_cnn = 'resnet101'
elif model_selection=='proposed_resnet101':
model_aff_tps_path = 'trained_models/weakalign_resnet101_affine_tps.pth.tar'
feature_extraction_cnn = 'resnet101'
source_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2008_006325.jpg'
target_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2010_004954.jpg'
source_image_path='datasets/3.JPEG'
target_image_path='datasets/4.JPEG'
if not exists(source_image_path):
download_PF_pascal('datasets/proposal-flow-pascal/')
```
## Load models
```
use_cuda = torch.cuda.is_available()
model = TwoStageCNNGeometric(use_cuda=use_cuda,
return_correlation=True,
feature_extraction_cnn=feature_extraction_cnn)
# load pre-trained model
if model_aff_tps_path!='':
checkpoint = torch.load(model_aff_tps_path, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression.' + name])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression2.' + name])
else:
checkpoint_aff = torch.load(model_aff_path, map_location=lambda storage, loc: storage)
checkpoint_aff['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_aff['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureRegression.' + name])
checkpoint_tps = torch.load(model_tps_path, map_location=lambda storage, loc: storage)
checkpoint_tps['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_tps['state_dict'].items()])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint_tps['state_dict']['FeatureRegression.' + name])
arg_groups = {'tps_grid_size': 3, 'tps_reg_factor': 0.2, 'normalize_inlier_count': True, 'dilation_filter': 0, 'use_conv_filter': False}
```
## inlier counter 모듈 인스턴스
```
inliersAffine = WeakInlierCount(geometric_model='affine',**arg_groups)
#inliersTps = WeakInlierCount(geometric_model='tps',**arg_groups['weak_loss'])
inliersComposed = TwoStageWeakInlierCount(use_cuda=use_cuda,**arg_groups)
```
# Pascal dataset 가져오기
```
from data.pf_dataset import PFDataset, PFPascalDataset
from data.download_datasets import download_PF_willow
pf_path='datasets/proposal-flow-pascal/'
# Dataset and dataloader
dataset = PFPascalDataset(csv_file=os.path.join(pf_path, 'test_pairs_pf_pascal.csv'),
dataset_path=pf_path,
transform=NormalizeImageDict(['source_image','target_image']))
dataloader = DataLoader(dataset, batch_size=4,
shuffle=True, num_workers=4)
batchTensorToVars = BatchTensorToVars(use_cuda=use_cuda)
```
## get one batch(4 images)
```
batch = iter(dataloader)
x = batch.next()
print(type(x), x.keys())
batch_source_img = x['source_image']
batch_target_img = x['target_image']
```
## un-normalizing and reshaping
```
source_img_numpy = normalize_image(batch_source_img, forward=False)[1].transpose(1, 0).transpose(1, 2).cpu().numpy()
plt.imshow(source_img_numpy)
target_img_numpy = normalize_image(batch_target_img, forward=False)[1].transpose(1, 0).transpose(1, 2).cpu().numpy()
plt.imshow(target_img_numpy)
tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda)
affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
type(x['source_image'])
```
## Batch FloatTensor To Variables
```
batch_tnf = BatchTensorToVars(use_cuda = True)
model.eval()
# Evaluate model
theta_aff,theta_aff_tps,corr_aff,corr_aff_tps=model(batch_tnf(x))
arg_groups = {'tps_grid_size': 3, 'tps_reg_factor': 0.2, 'normalize_inlier_count': True, 'dilation_filter': 0, 'use_conv_filter': False}
inliersAffine = WeakInlierCount(geometric_model='affine',**arg_groups)
#inliersTps = WeakInlierCount(geometric_model='tps',**arg_groups['weak_loss'])
inliersComposed = TwoStageWeakInlierCount(use_cuda=use_cuda,**arg_groups)
inliers_comp = inliersComposed(matches=corr_aff,
theta_aff=theta_aff,
theta_aff_tps=theta_aff_tps)
inliers_aff = inliersAffine(matches=corr_aff,
theta=theta_aff)
print(inliers_comp.cpu().data.numpy())
print(inliers_aff.cpu().data.numpy())
```
# Omniglot dataset 가져오기
```
n_epochs = 1
n_episodes = 10
n_way = 20
n_shot = 5
n_query = 5
n_examples = 20
im_width, im_height, channels = 128, 128, 1
h_dim = 64
z_dim = 64
# Load Train Dataset
data_generator_path = os.environ['DATA_GENERATOR']
train_split_path = os.path.join(data_generator_path, 'labels/omniglot', 'train.txt')
with open(train_split_path, 'r') as train_split:
train_classes = [line.rstrip() for line in train_split.readlines()]
n_classes = len(train_classes)
train_dataset = np.zeros([n_classes, n_examples, im_height, im_width], dtype=np.float32)
for i, tc in enumerate(train_classes):
alphabet, character, rotation = tc.split('/')
rotation = float(rotation[3:])
im_dir = os.path.join(data_generator_path, 'datasets/omniglot', alphabet, character)
im_files = sorted(glob.glob(os.path.join(im_dir, '*.png')))
for j, im_file in enumerate(im_files):
im = 1. - np.array(Image.open(im_file).rotate(rotation).resize((im_width, im_height)), np.float32, copy=False)
train_dataset[i, j] = im
print(train_dataset.shape)
x = train_dataset[0]
```
## episodic input pipeline
```
for ep in range(n_epochs):
for epi in range(n_episodes):
epi_classes = np.random.permutation(n_classes)[:n_way]
support = np.zeros([n_way, n_shot, im_height, im_width], dtype=np.float32)
query = np.zeros([n_way, n_query, im_height, im_width], dtype=np.float32)
for i, epi_cls in enumerate(epi_classes):
selected = np.random.permutation(n_examples)[:n_shot + n_query]
support[i] = train_dataset[epi_cls, selected[:n_shot]]
query[i] = train_dataset[epi_cls, selected[n_shot:]]
support = np.expand_dims(support, axis=-1)
query = np.expand_dims(query, axis=-1)
labels = np.tile(np.arange(n_way)[:, np.newaxis], (1, n_query)).astype(np.uint8)
print('support', support.shape)
print('query', query.shape)
# _, ls, ac = sess.run([train_op, ce_loss, acc], feed_dict={x: support, q: query, y:labels})
# if (epi+1) % 50 == 0:
# print('[epoch {}/{}, episode {}/{}] => loss: {:.5f}, acc: {:.5f}'.format(ep+1, n_epochs, epi+1, n_episodes, ls, ac))
```
## Create image transformers
```
tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda)
affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
```
## Load and preprocess images
```
resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False)
normalizeTnf = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def preprocess_image(image):
# convert to torch Variable
image = np.expand_dims(image.transpose((2,0,1)),0)
image = torch.Tensor(image.astype(np.float32)/255.0)
image_var = Variable(image,requires_grad=False)
# Resize image using bilinear sampling with identity affine tnf
image_var = resizeCNN(image_var)
# Normalize image
image_var = normalize_image(image_var)
return image_var
source_image = io.imread(source_image_path)
target_image = io.imread(target_image_path)
source_image_var = preprocess_image(source_image)
target_image_var = preprocess_image(target_image)
if use_cuda:
source_image_var = source_image_var.cuda()
target_image_var = target_image_var.cuda()
batch = {'source_image': source_image_var, 'target_image':target_image_var}
resizeTgt = GeometricTnf(out_h=target_image.shape[0], out_w=target_image.shape[1], use_cuda = use_cuda)
```
## Evaluate model
```
model.eval()
# Evaluate model
theta_aff,theta_aff_tps=model(batch)
```
## Compute warped images
```
def affTpsTnf(source_image, theta_aff, theta_aff_tps, use_cuda=use_cuda):
tpstnf = GeometricTnf(geometric_model = 'tps',use_cuda=use_cuda)
sampling_grid = tpstnf(image_batch=source_image,
theta_batch=theta_aff_tps,
return_sampling_grid=True)[1]
X = sampling_grid[:,:,:,0].unsqueeze(3)
Y = sampling_grid[:,:,:,1].unsqueeze(3)
Xp = X*theta_aff[:,0].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,1].unsqueeze(1).unsqueeze(2)+theta_aff[:,2].unsqueeze(1).unsqueeze(2)
Yp = X*theta_aff[:,3].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,4].unsqueeze(1).unsqueeze(2)+theta_aff[:,5].unsqueeze(1).unsqueeze(2)
sg = torch.cat((Xp,Yp),3)
warped_image_batch = F.grid_sample(source_image, sg)
return warped_image_batch
warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3))
warped_image_aff_tps = affTpsTnf(batch['source_image'],theta_aff,theta_aff_tps)
```
## Display
```
# Un-normalize images and convert to numpy
warped_image_aff_np = normalize_image(resizeTgt(warped_image_aff),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff_tps_np = normalize_image(resizeTgt(warped_image_aff_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
N_subplots = 4
fig, axs = plt.subplots(1,N_subplots)
axs[0].imshow(source_image)
axs[0].set_title('src')
axs[1].imshow(target_image)
axs[1].set_title('tgt')
axs[2].imshow(warped_image_aff_np)
axs[2].set_title('aff')
axs[3].imshow(warped_image_aff_tps_np)
axs[3].set_title('aff+tps')
for i in range(N_subplots):
axs[i].axis('off')
fig.set_dpi(150)
plt.show()
```
|
github_jupyter
|
from __future__ import print_function, division
import os
from os.path import exists
import argparse
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from model.cnn_geometric_model import CNNGeometric, TwoStageCNNGeometric
from data.pf_dataset import PFDataset
from data.download_datasets import download_PF_pascal
from image.normalization import NormalizeImageDict, normalize_image
from util.torch_util import BatchTensorToVars, str_to_bool
from geotnf.transformation import GeometricTnf
from geotnf.point_tnf import *
import matplotlib.pyplot as plt
from skimage import io
import warnings
from torchvision.transforms import Normalize
from collections import OrderedDict
import torch.nn.functional as F
import glob
from PIL import Image
warnings.filterwarnings('ignore')
from model.loss import TransformedGridLoss, WeakInlierCount, TwoStageWeakInlierCount
# Select one of the following models:
# cnngeo_vgg16, cnngeo_resnet101, proposed_resnet101
model_selection = 'proposed_resnet101'
model_aff_path = ''
model_tps_path = ''
model_aff_tps_path = ''
if model_selection=='cnngeo_vgg16':
model_aff_path = 'trained_models/trained_models/cnngeo_vgg16_affine.pth.tar'
model_tps_path = 'trained_models/trained_models/cnngeo_vgg16_tps.pth.tar'
feature_extraction_cnn = 'vgg'
elif model_selection=='cnngeo_resnet101':
model_aff_path = 'trained_models/trained_models/cnngeo_resnet101_affine.pth.tar'
model_tps_path = 'trained_models/trained_models/cnngeo_resnet101_tps.pth.tar'
feature_extraction_cnn = 'resnet101'
elif model_selection=='proposed_resnet101':
model_aff_tps_path = 'trained_models/weakalign_resnet101_affine_tps.pth.tar'
feature_extraction_cnn = 'resnet101'
source_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2008_006325.jpg'
target_image_path='datasets/proposal-flow-pascal/PF-dataset-PASCAL/JPEGImages/2010_004954.jpg'
source_image_path='datasets/3.JPEG'
target_image_path='datasets/4.JPEG'
if not exists(source_image_path):
download_PF_pascal('datasets/proposal-flow-pascal/')
use_cuda = torch.cuda.is_available()
model = TwoStageCNNGeometric(use_cuda=use_cuda,
return_correlation=True,
feature_extraction_cnn=feature_extraction_cnn)
# load pre-trained model
if model_aff_tps_path!='':
checkpoint = torch.load(model_aff_tps_path, map_location=lambda storage, loc: storage)
checkpoint['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression.' + name])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint['state_dict']['FeatureRegression2.' + name])
else:
checkpoint_aff = torch.load(model_aff_path, map_location=lambda storage, loc: storage)
checkpoint_aff['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_aff['state_dict'].items()])
for name, param in model.FeatureExtraction.state_dict().items():
model.FeatureExtraction.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureExtraction.' + name])
for name, param in model.FeatureRegression.state_dict().items():
model.FeatureRegression.state_dict()[name].copy_(checkpoint_aff['state_dict']['FeatureRegression.' + name])
checkpoint_tps = torch.load(model_tps_path, map_location=lambda storage, loc: storage)
checkpoint_tps['state_dict'] = OrderedDict([(k.replace('vgg', 'model'), v) for k, v in checkpoint_tps['state_dict'].items()])
for name, param in model.FeatureRegression2.state_dict().items():
model.FeatureRegression2.state_dict()[name].copy_(checkpoint_tps['state_dict']['FeatureRegression.' + name])
arg_groups = {'tps_grid_size': 3, 'tps_reg_factor': 0.2, 'normalize_inlier_count': True, 'dilation_filter': 0, 'use_conv_filter': False}
inliersAffine = WeakInlierCount(geometric_model='affine',**arg_groups)
#inliersTps = WeakInlierCount(geometric_model='tps',**arg_groups['weak_loss'])
inliersComposed = TwoStageWeakInlierCount(use_cuda=use_cuda,**arg_groups)
from data.pf_dataset import PFDataset, PFPascalDataset
from data.download_datasets import download_PF_willow
pf_path='datasets/proposal-flow-pascal/'
# Dataset and dataloader
dataset = PFPascalDataset(csv_file=os.path.join(pf_path, 'test_pairs_pf_pascal.csv'),
dataset_path=pf_path,
transform=NormalizeImageDict(['source_image','target_image']))
dataloader = DataLoader(dataset, batch_size=4,
shuffle=True, num_workers=4)
batchTensorToVars = BatchTensorToVars(use_cuda=use_cuda)
batch = iter(dataloader)
x = batch.next()
print(type(x), x.keys())
batch_source_img = x['source_image']
batch_target_img = x['target_image']
source_img_numpy = normalize_image(batch_source_img, forward=False)[1].transpose(1, 0).transpose(1, 2).cpu().numpy()
plt.imshow(source_img_numpy)
target_img_numpy = normalize_image(batch_target_img, forward=False)[1].transpose(1, 0).transpose(1, 2).cpu().numpy()
plt.imshow(target_img_numpy)
tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda)
affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
type(x['source_image'])
batch_tnf = BatchTensorToVars(use_cuda = True)
model.eval()
# Evaluate model
theta_aff,theta_aff_tps,corr_aff,corr_aff_tps=model(batch_tnf(x))
arg_groups = {'tps_grid_size': 3, 'tps_reg_factor': 0.2, 'normalize_inlier_count': True, 'dilation_filter': 0, 'use_conv_filter': False}
inliersAffine = WeakInlierCount(geometric_model='affine',**arg_groups)
#inliersTps = WeakInlierCount(geometric_model='tps',**arg_groups['weak_loss'])
inliersComposed = TwoStageWeakInlierCount(use_cuda=use_cuda,**arg_groups)
inliers_comp = inliersComposed(matches=corr_aff,
theta_aff=theta_aff,
theta_aff_tps=theta_aff_tps)
inliers_aff = inliersAffine(matches=corr_aff,
theta=theta_aff)
print(inliers_comp.cpu().data.numpy())
print(inliers_aff.cpu().data.numpy())
n_epochs = 1
n_episodes = 10
n_way = 20
n_shot = 5
n_query = 5
n_examples = 20
im_width, im_height, channels = 128, 128, 1
h_dim = 64
z_dim = 64
# Load Train Dataset
data_generator_path = os.environ['DATA_GENERATOR']
train_split_path = os.path.join(data_generator_path, 'labels/omniglot', 'train.txt')
with open(train_split_path, 'r') as train_split:
train_classes = [line.rstrip() for line in train_split.readlines()]
n_classes = len(train_classes)
train_dataset = np.zeros([n_classes, n_examples, im_height, im_width], dtype=np.float32)
for i, tc in enumerate(train_classes):
alphabet, character, rotation = tc.split('/')
rotation = float(rotation[3:])
im_dir = os.path.join(data_generator_path, 'datasets/omniglot', alphabet, character)
im_files = sorted(glob.glob(os.path.join(im_dir, '*.png')))
for j, im_file in enumerate(im_files):
im = 1. - np.array(Image.open(im_file).rotate(rotation).resize((im_width, im_height)), np.float32, copy=False)
train_dataset[i, j] = im
print(train_dataset.shape)
x = train_dataset[0]
for ep in range(n_epochs):
for epi in range(n_episodes):
epi_classes = np.random.permutation(n_classes)[:n_way]
support = np.zeros([n_way, n_shot, im_height, im_width], dtype=np.float32)
query = np.zeros([n_way, n_query, im_height, im_width], dtype=np.float32)
for i, epi_cls in enumerate(epi_classes):
selected = np.random.permutation(n_examples)[:n_shot + n_query]
support[i] = train_dataset[epi_cls, selected[:n_shot]]
query[i] = train_dataset[epi_cls, selected[n_shot:]]
support = np.expand_dims(support, axis=-1)
query = np.expand_dims(query, axis=-1)
labels = np.tile(np.arange(n_way)[:, np.newaxis], (1, n_query)).astype(np.uint8)
print('support', support.shape)
print('query', query.shape)
# _, ls, ac = sess.run([train_op, ce_loss, acc], feed_dict={x: support, q: query, y:labels})
# if (epi+1) % 50 == 0:
# print('[epoch {}/{}, episode {}/{}] => loss: {:.5f}, acc: {:.5f}'.format(ep+1, n_epochs, epi+1, n_episodes, ls, ac))
tpsTnf = GeometricTnf(geometric_model='tps', use_cuda=use_cuda)
affTnf = GeometricTnf(geometric_model='affine', use_cuda=use_cuda)
resizeCNN = GeometricTnf(out_h=240, out_w=240, use_cuda = False)
normalizeTnf = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def preprocess_image(image):
# convert to torch Variable
image = np.expand_dims(image.transpose((2,0,1)),0)
image = torch.Tensor(image.astype(np.float32)/255.0)
image_var = Variable(image,requires_grad=False)
# Resize image using bilinear sampling with identity affine tnf
image_var = resizeCNN(image_var)
# Normalize image
image_var = normalize_image(image_var)
return image_var
source_image = io.imread(source_image_path)
target_image = io.imread(target_image_path)
source_image_var = preprocess_image(source_image)
target_image_var = preprocess_image(target_image)
if use_cuda:
source_image_var = source_image_var.cuda()
target_image_var = target_image_var.cuda()
batch = {'source_image': source_image_var, 'target_image':target_image_var}
resizeTgt = GeometricTnf(out_h=target_image.shape[0], out_w=target_image.shape[1], use_cuda = use_cuda)
model.eval()
# Evaluate model
theta_aff,theta_aff_tps=model(batch)
def affTpsTnf(source_image, theta_aff, theta_aff_tps, use_cuda=use_cuda):
tpstnf = GeometricTnf(geometric_model = 'tps',use_cuda=use_cuda)
sampling_grid = tpstnf(image_batch=source_image,
theta_batch=theta_aff_tps,
return_sampling_grid=True)[1]
X = sampling_grid[:,:,:,0].unsqueeze(3)
Y = sampling_grid[:,:,:,1].unsqueeze(3)
Xp = X*theta_aff[:,0].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,1].unsqueeze(1).unsqueeze(2)+theta_aff[:,2].unsqueeze(1).unsqueeze(2)
Yp = X*theta_aff[:,3].unsqueeze(1).unsqueeze(2)+Y*theta_aff[:,4].unsqueeze(1).unsqueeze(2)+theta_aff[:,5].unsqueeze(1).unsqueeze(2)
sg = torch.cat((Xp,Yp),3)
warped_image_batch = F.grid_sample(source_image, sg)
return warped_image_batch
warped_image_aff = affTnf(batch['source_image'],theta_aff.view(-1,2,3))
warped_image_aff_tps = affTpsTnf(batch['source_image'],theta_aff,theta_aff_tps)
# Un-normalize images and convert to numpy
warped_image_aff_np = normalize_image(resizeTgt(warped_image_aff),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
warped_image_aff_tps_np = normalize_image(resizeTgt(warped_image_aff_tps),forward=False).data.squeeze(0).transpose(0,1).transpose(1,2).cpu().numpy()
N_subplots = 4
fig, axs = plt.subplots(1,N_subplots)
axs[0].imshow(source_image)
axs[0].set_title('src')
axs[1].imshow(target_image)
axs[1].set_title('tgt')
axs[2].imshow(warped_image_aff_np)
axs[2].set_title('aff')
axs[3].imshow(warped_image_aff_tps_np)
axs[3].set_title('aff+tps')
for i in range(N_subplots):
axs[i].axis('off')
fig.set_dpi(150)
plt.show()
| 0.727201 | 0.813127 |
```
from EM import EM
from NN import NN
import numpy as np
# basic parameters
D = 617
K = 26
# parameters for neural networks
neuron_1 = {0: K}
neuron_2_10 = {0: 10, 1: K}
neuron_2_100 = {0: 100, 1: K}
act_func_1 = {0: NN.softmax}
act_func_2 = {0: NN.relu, 1: NN.softmax}
def set_sample(D, K):
# read in whole train set
train = np.loadtxt("dataset/isolet1+2+3+4.data", delimiter=",")
np.random.shuffle(train)
point = np.array(train[:, :D], dtype=float)
label = np.zeros([len(train), K])
for i in range(len(train)): label[i][int(train[i][D])-1] = 1
# split whole train set into train and validation set
n_1 = int(0.7 * len(train))
train_point = np.array([point[i] for i in range(n_1)])
train_label = np.array([label[i] for i in range(n_1)])
valid_point = np.array([point[i] for i in range(n_1, len(train))])
valid_label = np.array([label[i] for i in range(n_1, len(train))])
# read in test sample
test = np.loadtxt("dataset/isolet5.data", delimiter=",")
test_point = np.array(test[:, :D], dtype=float)
test_label = np.zeros([len(test), K])
for i in range(len(test)): test_label[i][int(test[i][D])-1] = 1
return {
"train_point": train_point, "train_label": train_label,
"valid_point": valid_point, "valid_label": valid_label,
"test_point": test_point, "test_label": test_label
}
```
Conventional Neural Network (2-3)
```
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_1, act_func_1, NN_type="CNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
```
Conventional Neural Network (2-10-3)
```
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_2_10, act_func_2, NN_type="CNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
```
Conventional Neural Network (2-100-3)
```
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_2_100, act_func_2, NN_type="CNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
```
Quadratic Neural Network (2-3)
```
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_1, act_func_1, NN_type="QNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
```
|
github_jupyter
|
from EM import EM
from NN import NN
import numpy as np
# basic parameters
D = 617
K = 26
# parameters for neural networks
neuron_1 = {0: K}
neuron_2_10 = {0: 10, 1: K}
neuron_2_100 = {0: 100, 1: K}
act_func_1 = {0: NN.softmax}
act_func_2 = {0: NN.relu, 1: NN.softmax}
def set_sample(D, K):
# read in whole train set
train = np.loadtxt("dataset/isolet1+2+3+4.data", delimiter=",")
np.random.shuffle(train)
point = np.array(train[:, :D], dtype=float)
label = np.zeros([len(train), K])
for i in range(len(train)): label[i][int(train[i][D])-1] = 1
# split whole train set into train and validation set
n_1 = int(0.7 * len(train))
train_point = np.array([point[i] for i in range(n_1)])
train_label = np.array([label[i] for i in range(n_1)])
valid_point = np.array([point[i] for i in range(n_1, len(train))])
valid_label = np.array([label[i] for i in range(n_1, len(train))])
# read in test sample
test = np.loadtxt("dataset/isolet5.data", delimiter=",")
test_point = np.array(test[:, :D], dtype=float)
test_label = np.zeros([len(test), K])
for i in range(len(test)): test_label[i][int(test[i][D])-1] = 1
return {
"train_point": train_point, "train_label": train_label,
"valid_point": valid_point, "valid_label": valid_label,
"test_point": test_point, "test_label": test_label
}
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_1, act_func_1, NN_type="CNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_2_10, act_func_2, NN_type="CNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_2_100, act_func_2, NN_type="CNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
accuracy_set = []
time_set = []
for i in range(10):
sample = set_sample(D, K)
method = NN(D, neuron_1, act_func_1, NN_type="QNN")
method.train(sample["train_point"], sample["train_label"],
sample["valid_point"], sample["valid_label"], step_size=500)
accuracy = method.test(sample["test_point"], sample["test_label"])[0] * 100
time = method.train_time
accuracy_set.append(accuracy)
time_set.append(time)
mean, variance = np.mean(accuracy_set), np.std(accuracy_set)
print("accuracy: %2.2f \pm %2.2f" % (mean, variance))
mean, variance = np.mean(time_set), np.std(time_set)
print("time : %2.1f \pm %2.1f" % (mean, variance))
| 0.537527 | 0.759448 |
```
path = ('C://Users//Jongjae//Project/home_credit/')
train = pd.read_csv(path + 'application_train.csv')
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import scale, robust_scale, minmax_scale, maxabs_scale
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].round(1)
# Print some summary information
print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
train['NAME_CONTRACT_TYPE'] = LabelEncoder().fit_transform(train['NAME_CONTRACT_TYPE'])
train['CODE_GENDER'] = LabelEncoder().fit_transform(train['CODE_GENDER'])
train['FLAG_OWN_CAR'] = LabelEncoder().fit_transform(train['FLAG_OWN_CAR'])
train['FLAG_OWN_REALTY'] = LabelEncoder().fit_transform(train['FLAG_OWN_REALTY'])
train['CNT_CHILDREN'].unique()
from sklearn.preprocessing import LabelBinarizer
train_CNT_CHILDREN = pd.DataFrame(LabelBinarizer().fit_transform(train['CNT_CHILDREN']),
columns = ['CNT_CHILDREN_0', 'CNT_CHILDREN_1', 'CNT_CHILDREN_2',
'CNT_CHILDREN_3', 'CNT_CHILDREN_4', 'CNT_CHILDREN_5',
'CNT_CHILDREN_6', 'CNT_CHILDREN_7', 'CNT_CHILDREN_8', 'CNT_CHILDREN_9',
'CNT_CHILDREN_10', 'CNT_CHILDREN_11', 'CNT_CHILDREN_12',
'CNT_CHILDREN_14', 'CNT_CHILDREN_19'], index=train.index)
train = pd.concat([train, train_CNT_CHILDREN], axis = 1)
del(train['CNT_CHILDREN'])
train['AMT_INCOME_TOTAL'] = scale(train['AMT_INCOME_TOTAL'])
train['AMT_CREDIT'] = scale(train['AMT_CREDIT'])
train['AMT_ANNUITY'].fillna(train['AMT_ANNUITY'].mean(), inplace = True)
train['AMT_ANNUITY'] = scale(train['AMT_ANNUITY'])
train['AMT_GOODS_PRICE'].fillna(train['AMT_GOODS_PRICE'].mean(), inplace = True)
train['AMT_GOODS_PRICE'] = scale(train['AMT_GOODS_PRICE'])
train['NAME_TYPE_SUITE'].fillna(train['NAME_TYPE_SUITE'].mode()[0], inplace = True)
train_NAME_TYPE_SUITE = pd.get_dummies(train['NAME_TYPE_SUITE'])
train = pd.concat([train, train_NAME_TYPE_SUITE], axis = 1)
del(train['NAME_TYPE_SUITE'])
train_NAME_EDUCATION_TYPE = pd.get_dummies(train['NAME_EDUCATION_TYPE'])
train = pd.concat([train, train_NAME_EDUCATION_TYPE], axis = 1)
del(train['NAME_EDUCATION_TYPE'])
train_NAME_FAMILY_STATUS = pd.get_dummies(train['NAME_FAMILY_STATUS'])
train = pd.concat([train, train_NAME_FAMILY_STATUS], axis = 1)
del(train['NAME_FAMILY_STATUS'])
train_NAME_HOUSING_TYPE = pd.get_dummies(train['NAME_HOUSING_TYPE'])
train = pd.concat([train, train_NAME_HOUSING_TYPE], axis = 1)
del(train['NAME_HOUSING_TYPE'])
train['DAYS_BIRTH'] = scale(train['DAYS_BIRTH'])
train['DAYS_EMPLOYED'] = scale(train['DAYS_EMPLOYED'])
train['DAYS_REGISTRATION'] = scale(train['DAYS_REGISTRATION'])
train['DAYS_ID_PUBLISH'] = scale(train['DAYS_ID_PUBLISH'])
train['OWN_CAR_AGE'].fillna(train['OWN_CAR_AGE'].mode()[0], inplace=True)
print(sorted(train['OWN_CAR_AGE'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_OWN_CAR_AGE = pd.DataFrame(LabelBinarizer().fit_transform(train['OWN_CAR_AGE']),
columns = ['OWN_CAR_AGE_0', 'OWN_CAR_AGE_1', 'OWN_CAR_AGE_2',
'OWN_CAR_AGE_3', 'OWN_CAR_AGE_4', 'OWN_CAR_AGE_5',
'OWN_CAR_AGE_6', 'OWN_CAR_AGE_7', 'OWN_CAR_AGE_8',
'OWN_CAR_AGE_9', 'OWN_CAR_AGE_10', 'OWN_CAR_AGE_11',
'OWN_CAR_AGE_12', 'OWN_CAR_AGE_13', 'OWN_CAR_AGE_14',
'OWN_CAR_AGE_15', 'OWN_CAR_AGE_16', 'OWN_CAR_AGE_17',
'OWN_CAR_AGE_18', 'OWN_CAR_AGE_19', 'OWN_CAR_AGE_20',
'OWN_CAR_AGE_21', 'OWN_CAR_AGE_22', 'OWN_CAR_AGE_23',
'OWN_CAR_AGE_24', 'OWN_CAR_AGE_25', 'OWN_CAR_AGE_26',
'OWN_CAR_AGE_27', 'OWN_CAR_AGE_28', 'OWN_CAR_AGE_29',
'OWN_CAR_AGE_30', 'OWN_CAR_AGE_31', 'OWN_CAR_AGE_32',
'OWN_CAR_AGE_33', 'OWN_CAR_AGE_34', 'OWN_CAR_AGE_35',
'OWN_CAR_AGE_36', 'OWN_CAR_AGE_37', 'OWN_CAR_AGE_38',
'OWN_CAR_AGE_39', 'OWN_CAR_AGE_40', 'OWN_CAR_AGE_41',
'OWN_CAR_AGE_42', 'OWN_CAR_AGE_43', 'OWN_CAR_AGE_44',
'OWN_CAR_AGE_45', 'OWN_CAR_AGE_46', 'OWN_CAR_AGE_47',
'OWN_CAR_AGE_48', 'OWN_CAR_AGE_49',
'OWN_CAR_AGE_50', 'OWN_CAR_AGE_51', 'OWN_CAR_AGE_52',
'OWN_CAR_AGE_54', 'OWN_CAR_AGE_55', 'OWN_CAR_AGE_56',
'OWN_CAR_AGE_57', 'OWN_CAR_AGE_63', 'OWN_CAR_AGE_64',
'OWN_CAR_AGE_65', 'OWN_CAR_AGE_69', 'OWN_CAR_AGE_91'], index=train.index)
train = pd.concat([train, train_OWN_CAR_AGE], axis = 1)
del(train['OWN_CAR_AGE'])
train['FLAG_MOBIL'] = LabelEncoder().fit_transform(train['FLAG_MOBIL'])
train['FLAG_EMP_PHONE'] = LabelEncoder().fit_transform(train['FLAG_EMP_PHONE'])
train['FLAG_WORK_PHONE'] = LabelEncoder().fit_transform(train['FLAG_WORK_PHONE'])
train['FLAG_CONT_MOBILE'] = LabelEncoder().fit_transform(train['FLAG_CONT_MOBILE'])
train['FLAG_PHONE'].fillna(train['FLAG_PHONE'].mode()[0], inplace=True)
train['FLAG_PHONE'] = LabelEncoder().fit_transform(train['FLAG_PHONE'])
train['OCCUPATION_TYPE'].fillna(train['OCCUPATION_TYPE'].mode()[0], inplace=True)
train_OCCUPATION_TYPE = pd.get_dummies(train['OCCUPATION_TYPE'])
train = pd.concat([train, train_OCCUPATION_TYPE], axis = 1)
del(train['OCCUPATION_TYPE'])
print(sorted(train['CNT_FAM_MEMBERS'].unique()))
train['CNT_FAM_MEMBERS'].fillna(train['CNT_FAM_MEMBERS'].mode()[0], inplace=True)
from sklearn.preprocessing import LabelBinarizer
train_CNT_FAM_MEMBERS = pd.DataFrame(LabelBinarizer().fit_transform(train['CNT_FAM_MEMBERS']),
columns = ['CNT_FAM_MEMBERS_1', 'CNT_FAM_MEMBERS_2', 'CNT_FAM_MEMBERS_3',
'CNT_FAM_MEMBERS_4', 'CNT_FAM_MEMBERS_5', 'CNT_FAM_MEMBERS_6',
'CNT_FAM_MEMBERS_7', 'CNT_FAM_MEMBERS_8', 'CNT_FAM_MEMBERS_9',
'CNT_FAM_MEMBERS_10', 'CNT_FAM_MEMBERS_11', 'CNT_FAM_MEMBERS_12',
'CNT_FAM_MEMBERS_13', 'CNT_FAM_MEMBERS_14', 'CNT_FAM_MEMBERS_15',
'CNT_FAM_MEMBERS_16', 'CNT_FAM_MEMBERS_20'], index=train.index)
train = pd.concat([train, train_CNT_FAM_MEMBERS], axis = 1)
del(train['CNT_FAM_MEMBERS'])
train_REGION_RATING_CLIENT_W_CITY = pd.DataFrame(LabelBinarizer().fit_transform(train['REGION_RATING_CLIENT_W_CITY']),
columns = ['REGION_RATING_CLIENT_W_CITY_1', 'REGION_RATING_CLIENT_W_CITY_2', 'REGION_RATING_CLIENT_W_CITY_3'], index=train.index)
train = pd.concat([train, train_REGION_RATING_CLIENT_W_CITY], axis = 1)
del(train['REGION_RATING_CLIENT_W_CITY'])
train_WEEKDAY_APPR_PROCESS_START = pd.get_dummies(train['WEEKDAY_APPR_PROCESS_START'])
train = pd.concat([train, train_WEEKDAY_APPR_PROCESS_START], axis = 1)
del(train['WEEKDAY_APPR_PROCESS_START'])
print(sorted(train['HOUR_APPR_PROCESS_START'].unique()))
train_HOUR_APPR_PROCESS_START = pd.DataFrame(LabelBinarizer().
fit_transform(train['HOUR_APPR_PROCESS_START']),
columns = ['HOUR_APPR_PROCESS_START_0', 'HOUR_APPR_PROCESS_START_1', 'HOUR_APPR_PROCESS_START_2', 'HOUR_APPR_PROCESS_START_3',
'HOUR_APPR_PROCESS_START_4', 'HOUR_APPR_PROCESS_START_5', 'HOUR_APPR_PROCESS_START_6',
'HOUR_APPR_PROCESS_START_7', 'HOUR_APPR_PROCESS_START_8', 'HOUR_APPR_PROCESS_START_9',
'HOUR_APPR_PROCESS_START_10', 'HOUR_APPR_PROCESS_START_11',
'HOUR_APPR_PROCESS_START_12', 'HOUR_APPR_PROCESS_START_13', 'HOUR_APPR_PROCESS_START_14',
'HOUR_APPR_PROCESS_START_15', 'HOUR_APPR_PROCESS_START_16', 'HOUR_APPR_PROCESS_START_17',
'HOUR_APPR_PROCESS_START_18', 'HOUR_APPR_PROCESS_START_19', 'HOUR_APPR_PROCESS_START_20',
'HOUR_APPR_PROCESS_START_21', 'HOUR_APPR_PROCESS_START_22', 'HOUR_APPR_PROCESS_START_23'], index=train.index)
train = pd.concat([train, train_HOUR_APPR_PROCESS_START], axis = 1)
del(train['HOUR_APPR_PROCESS_START'])
train_ORGANIZATION_TYPE = pd.get_dummies(train['ORGANIZATION_TYPE'])
train = pd.concat([train, train_ORGANIZATION_TYPE], axis = 1)
del(train['ORGANIZATION_TYPE'])
print(train.columns[29], train.columns[78])
normalized_train = train.iloc[:, [29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,
55,56,57,58,59,60,61,62,63,64,65,66,67,68,
69,70,71,72,73,74,75,76,77,78]]
%matplotlib inline
missing_values_table(normalized_train)
normalized_train.dtypes
# 1. train['FONDKAPREMONT_MODE']
train['FONDKAPREMONT_MODE'].mode()[0]
train['FONDKAPREMONT_MODE'] = train['FONDKAPREMONT_MODE'].fillna('reg oper account')
train_FONDKAPREMONT_MODE = pd.get_dummies(train['FONDKAPREMONT_MODE'])
train = pd.concat([train, train_FONDKAPREMONT_MODE], axis = 1)
del(train['FONDKAPREMONT_MODE'])
# 2. train['HOUSETYPE_MODE']
train['HOUSETYPE_MODE'].mode()[0]
train['HOUSETYPE_MODE'] = train['HOUSETYPE_MODE'].fillna('block of flats')
train_HOUSETYPE_MODE = pd.get_dummies(train['HOUSETYPE_MODE'])
train = pd.concat([train, train_HOUSETYPE_MODE], axis = 1)
del(train['HOUSETYPE_MODE'])
#3, train['WALLSMATERIAL_MODE']
train['WALLSMATERIAL_MODE'].mode()[0]
train['WALLSMATERIAL_MODE'] = train['WALLSMATERIAL_MODE'].fillna('Panel')
train_WALLSMATERIAL_MODE = pd.get_dummies(train['WALLSMATERIAL_MODE'])
train = pd.concat([train, train_WALLSMATERIAL_MODE], axis = 1)
del(train['WALLSMATERIAL_MODE'])
#4. trainset['EMERGENCYSTATE_MODE']
train['EMERGENCYSTATE_MODE'].mode()[0]
train['EMERGENCYSTATE_MODE'] = train['EMERGENCYSTATE_MODE'].fillna('No')
train['EMERGENCYSTATE_MODE'] = LabelEncoder().fit_transform(train['EMERGENCYSTATE_MODE'])
train.columns[75]
troubles = train.iloc[:, [29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,
55,56,57,58,59,60,61,62,63,64,65,66,67,68,
69,70,71,72,73,74,75]]
for i in troubles.columns:
train['{}'.format(i)] = train['{}'.format(i)].fillna(train['{}'.format(i)].mean())
for i in troubles.columns:
print(i, train['{}'.format(i)].isnull().sum())
train['OBS_30_CNT_SOCIAL_CIRCLE'] = train['OBS_30_CNT_SOCIAL_CIRCLE'].fillna(train['OBS_30_CNT_SOCIAL_CIRCLE'].mode()[0])
print(sorted(train['OBS_30_CNT_SOCIAL_CIRCLE'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_OBS_30_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['OBS_30_CNT_SOCIAL_CIRCLE']),
columns = ['OBS_30_CNT_SOCIAL_CIRCLE_0', 'OBS_30_CNT_SOCIAL_CIRCLE_1', 'OBS_30_CNT_SOCIAL_CIRCLE_2',
'OBS_30_CNT_SOCIAL_CIRCLE_3', 'OBS_30_CNT_SOCIAL_CIRCLE_4', 'OBS_30_CNT_SOCIAL_CIRCLE_5',
'OBS_30_CNT_SOCIAL_CIRCLE_6', 'OBS_30_CNT_SOCIAL_CIRCLE_7', 'OBS_30_CNT_SOCIAL_CIRCLE_8',
'OBS_30_CNT_SOCIAL_CIRCLE_9', 'OBS_30_CNT_SOCIAL_CIRCLE_10', 'OBS_30_CNT_SOCIAL_CIRCLE_11',
'OBS_30_CNT_SOCIAL_CIRCLE_12', 'OBS_30_CNT_SOCIAL_CIRCLE_13', 'OBS_30_CNT_SOCIAL_CIRCLE_14',
'OBS_30_CNT_SOCIAL_CIRCLE_15', 'OBS_30_CNT_SOCIAL_CIRCLE_16', 'OBS_30_CNT_SOCIAL_CIRCLE_17',
'OBS_30_CNT_SOCIAL_CIRCLE_18', 'OBS_30_CNT_SOCIAL_CIRCLE_19', 'OBS_30_CNT_SOCIAL_CIRCLE_20',
'OBS_30_CNT_SOCIAL_CIRCLE_21', 'OBS_30_CNT_SOCIAL_CIRCLE_22', 'OBS_30_CNT_SOCIAL_CIRCLE_23',
'OBS_30_CNT_SOCIAL_CIRCLE_24', 'OBS_30_CNT_SOCIAL_CIRCLE_25', 'OBS_30_CNT_SOCIAL_CIRCLE_26',
'OBS_30_CNT_SOCIAL_CIRCLE_27', 'OBS_30_CNT_SOCIAL_CIRCLE_28', 'OBS_30_CNT_SOCIAL_CIRCLE_29',
'OBS_30_CNT_SOCIAL_CIRCLE_30', 'OBS_30_CNT_SOCIAL_CIRCLE_47', 'OBS_30_CNT_SOCIAL_CIRCLE_348'], index=train.index)
train = pd.concat([train, train_OBS_30_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['OBS_30_CNT_SOCIAL_CIRCLE'])
train['DEF_30_CNT_SOCIAL_CIRCLE'] = train['DEF_30_CNT_SOCIAL_CIRCLE'].fillna(0.0)
from sklearn.preprocessing import LabelBinarizer
train_DEF_30_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['DEF_30_CNT_SOCIAL_CIRCLE']),
columns = ['DEF_30_CNT_SOCIAL_CIRCLE_0', 'DEF_30_CNT_SOCIAL_CIRCLE_1', 'DEF_30_CNT_SOCIAL_CIRCLE_2',
'DEF_30_CNT_SOCIAL_CIRCLE_3', 'DEF_30_CNT_SOCIAL_CIRCLE_4', 'DEF_30_CNT_SOCIAL_CIRCLE_5',
'DEF_30_CNT_SOCIAL_CIRCLE_6', 'DEF_30_CNT_SOCIAL_CIRCLE_7', 'DEF_30_CNT_SOCIAL_CIRCLE_8',
'DEF_30_CNT_SOCIAL_CIRCLE_34'], index=train.index)
train = pd.concat([train, train_DEF_30_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['DEF_30_CNT_SOCIAL_CIRCLE'])
print(sorted(train['OBS_60_CNT_SOCIAL_CIRCLE'].unique()))
train['OBS_60_CNT_SOCIAL_CIRCLE'].fillna(0.0, inplace=True)
from sklearn.preprocessing import LabelBinarizer
train_OBS_60_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['OBS_60_CNT_SOCIAL_CIRCLE']),
columns = ['OBS_60_CNT_SOCIAL_CIRCLE_0', 'OBS_60_CNT_SOCIAL_CIRCLE_1', 'OBS_60_CNT_SOCIAL_CIRCLE_2',
'OBS_60_CNT_SOCIAL_CIRCLE_3', 'OBS_60_CNT_SOCIAL_CIRCLE_4', 'OBS_60_CNT_SOCIAL_CIRCLE_5',
'OBS_60_CNT_SOCIAL_CIRCLE_6', 'OBS_60_CNT_SOCIAL_CIRCLE_7', 'OBS_60_CNT_SOCIAL_CIRCLE_8',
'OBS_60_CNT_SOCIAL_CIRCLE_9', 'OBS_60_CNT_SOCIAL_CIRCLE_10', 'OBS_60_CNT_SOCIAL_CIRCLE_11',
'OBS_60_CNT_SOCIAL_CIRCLE_12', 'OBS_60_CNT_SOCIAL_CIRCLE_13', 'OBS_60_CNT_SOCIAL_CIRCLE_14',
'OBS_60_CNT_SOCIAL_CIRCLE_15', 'OBS_60_CNT_SOCIAL_CIRCLE_16', 'OBS_60_CNT_SOCIAL_CIRCLE_17',
'OBS_60_CNT_SOCIAL_CIRCLE_18', 'OBS_60_CNT_SOCIAL_CIRCLE_19', 'OBS_60_CNT_SOCIAL_CIRCLE_20',
'OBS_60_CNT_SOCIAL_CIRCLE_21', 'OBS_60_CNT_SOCIAL_CIRCLE_22', 'OBS_60_CNT_SOCIAL_CIRCLE_23',
'OBS_60_CNT_SOCIAL_CIRCLE_24', 'OBS_60_CNT_SOCIAL_CIRCLE_25', 'OBS_60_CNT_SOCIAL_CIRCLE_26',
'OBS_60_CNT_SOCIAL_CIRCLE_27', 'OBS_60_CNT_SOCIAL_CIRCLE_28', 'OBS_60_CNT_SOCIAL_CIRCLE_29',
'OBS_60_CNT_SOCIAL_CIRCLE_30', 'OBS_60_CNT_SOCIAL_CIRCLE_47', 'OBS_60_CNT_SOCIAL_CIRCLE_344'], index=train.index)
train = pd.concat([train, train_OBS_60_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['OBS_60_CNT_SOCIAL_CIRCLE'])
sorted(train['DEF_60_CNT_SOCIAL_CIRCLE'].unique())
train['DEF_60_CNT_SOCIAL_CIRCLE'] = train['DEF_60_CNT_SOCIAL_CIRCLE'].fillna(0.0)
from sklearn.preprocessing import LabelBinarizer
train_DEF_60_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['DEF_60_CNT_SOCIAL_CIRCLE']),
columns = ['DEF_60_CNT_SOCIAL_CIRCLE_0', 'DEF_60_CNT_SOCIAL_CIRCLE_1', 'DEF_60_CNT_SOCIAL_CIRCLE_2',
'DEF_60_CNT_SOCIAL_CIRCLE_3', 'DEF_60_CNT_SOCIAL_CIRCLE_4', 'DEF_60_CNT_SOCIAL_CIRCLE_5',
'DEF_60_CNT_SOCIAL_CIRCLE_6', 'DEF_60_CNT_SOCIAL_CIRCLE_7', 'DEF_60_CNT_SOCIAL_CIRCLE_24'], index=train.index)
train = pd.concat([train, train_DEF_60_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['DEF_60_CNT_SOCIAL_CIRCLE'])
train['DAYS_LAST_PHONE_CHANGE'] = train['DAYS_LAST_PHONE_CHANGE'].fillna(0.0)
train['DAYS_LAST_PHONE_CHANGE'] = scale(train['DAYS_LAST_PHONE_CHANGE'])
print(train['AMT_REQ_CREDIT_BUREAU_HOUR'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_DAY'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_WEEK'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_MON'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_QRT'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_YEAR'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_HOUR'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_DAY'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_WEEK'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_MON'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_QRT'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_YEAR'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_HOUR'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_DAY'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_WEEK'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_MON'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_QRT'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_YEAR'].isnull().sum())
train['AMT_REQ_CREDIT_BUREAU_HOUR'] = train['AMT_REQ_CREDIT_BUREAU_HOUR'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_DAY'] = train['AMT_REQ_CREDIT_BUREAU_DAY'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_WEEK'] = train['AMT_REQ_CREDIT_BUREAU_WEEK'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_MON'] = train['AMT_REQ_CREDIT_BUREAU_MON'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_QRT'] = train['AMT_REQ_CREDIT_BUREAU_QRT'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_YEAR'] = train['AMT_REQ_CREDIT_BUREAU_YEAR'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_HOUR'].unique()
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_HOUR = pd.DataFrame(LabelBinarizer().fit_transform(train['AMT_REQ_CREDIT_BUREAU_HOUR']),
columns = ['AMT_REQ_CREDIT_BUREAU_HOUR_0', 'AMT_REQ_CREDIT_BUREAU_HOUR_1',
'AMT_REQ_CREDIT_BUREAU_HOUR_2', 'AMT_REQ_CREDIT_BUREAU_HOUR_3',
'AMT_REQ_CREDIT_BUREAU_HOUR_4'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_HOUR], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_HOUR'])
sorted(train['AMT_REQ_CREDIT_BUREAU_WEEK'].unique())
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_WEEK = pd.DataFrame(LabelBinarizer().fit_transform(train['AMT_REQ_CREDIT_BUREAU_WEEK']),
columns = ['AMT_REQ_CREDIT_BUREAU_WEEK_0', 'AMT_REQ_CREDIT_BUREAU_WEEK_1', 'AMT_REQ_CREDIT_BUREAU_WEEK_2',
'AMT_REQ_CREDIT_BUREAU_WEEK_3', 'AMT_REQ_CREDIT_BUREAU_WEEK_4', 'AMT_REQ_CREDIT_BUREAU_WEEK_5',
'AMT_REQ_CREDIT_BUREAU_WEEK_6', 'AMT_REQ_CREDIT_BUREAU_WEEK_7', 'AMT_REQ_CREDIT_BUREAU_WEEK_8'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_WEEK], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_WEEK'])
print(sorted(train['AMT_REQ_CREDIT_BUREAU_MON'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_MON = pd.DataFrame(LabelBinarizer().
fit_transform(train['AMT_REQ_CREDIT_BUREAU_MON']),
columns = ['AMT_REQ_CREDIT_BUREAU_MON_0', 'AMT_REQ_CREDIT_BUREAU_MON_1', 'AMT_REQ_CREDIT_BUREAU_MON_2',
'AMT_REQ_CREDIT_BUREAU_MON_3', 'AMT_REQ_CREDIT_BUREAU_MON_4', 'AMT_REQ_CREDIT_BUREAU_MON_5',
'AMT_REQ_CREDIT_BUREAU_MON_6', 'AMT_REQ_CREDIT_BUREAU_MON_7', 'AMT_REQ_CREDIT_BUREAU_MON_8',
'AMT_REQ_CREDIT_BUREAU_MON_9', 'AMT_REQ_CREDIT_BUREAU_MON_10', 'AMT_REQ_CREDIT_BUREAU_MON_11',
'AMT_REQ_CREDIT_BUREAU_MON_12', 'AMT_REQ_CREDIT_BUREAU_MON_13', 'AMT_REQ_CREDIT_BUREAU_MON_14',
'AMT_REQ_CREDIT_BUREAU_MON_15', 'AMT_REQ_CREDIT_BUREAU_MON_16', 'AMT_REQ_CREDIT_BUREAU_MON_17',
'AMT_REQ_CREDIT_BUREAU_MON_18', 'AMT_REQ_CREDIT_BUREAU_MON_19', 'AMT_REQ_CREDIT_BUREAU_MON_22',
'AMT_REQ_CREDIT_BUREAU_MON_23', 'AMT_REQ_CREDIT_BUREAU_MON_24', 'AMT_REQ_CREDIT_BUREAU_MON_27'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_MON], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_MON'])
sorted(train['AMT_REQ_CREDIT_BUREAU_QRT'].unique())
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_QRT = pd.DataFrame(LabelBinarizer().
fit_transform(train['AMT_REQ_CREDIT_BUREAU_QRT']),
columns = ['AMT_REQ_CREDIT_BUREAU_QRT_0', 'AMT_REQ_CREDIT_BUREAU_QRT_1', 'AMT_REQ_CREDIT_BUREAU_QRT_2',
'AMT_REQ_CREDIT_BUREAU_QRT_3', 'AMT_REQ_CREDIT_BUREAU_QRT_4', 'AMT_REQ_CREDIT_BUREAU_QRT_5',
'AMT_REQ_CREDIT_BUREAU_QRT_6', 'AMT_REQ_CREDIT_BUREAU_QRT_7', 'AMT_REQ_CREDIT_BUREAU_QRT_8',
'AMT_REQ_CREDIT_BUREAU_QRT_19', 'AMT_REQ_CREDIT_BUREAU_QRT_261'
], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_QRT], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_QRT'])
print(sorted(train['AMT_REQ_CREDIT_BUREAU_YEAR'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_YEAR = pd.DataFrame(LabelBinarizer().
fit_transform(train['AMT_REQ_CREDIT_BUREAU_YEAR']),
columns = ['AMT_REQ_CREDIT_BUREAU_YEAR_0', 'AMT_REQ_CREDIT_BUREAU_YEAR_1', 'AMT_REQ_CREDIT_BUREAU_YEAR_2',
'AMT_REQ_CREDIT_BUREAU_YEAR_3', 'AMT_REQ_CREDIT_BUREAU_YEAR_4', 'AMT_REQ_CREDIT_BUREAU_YEAR_5',
'AMT_REQ_CREDIT_BUREAU_YEAR_6', 'AMT_REQ_CREDIT_BUREAU_YEAR_7', 'AMT_REQ_CREDIT_BUREAU_YEAR_8',
'AMT_REQ_CREDIT_BUREAU_YEAR_9', 'AMT_REQ_CREDIT_BUREAU_YEAR_10', 'AMT_REQ_CREDIT_BUREAU_YEAR_11',
'AMT_REQ_CREDIT_BUREAU_YEAR_12', 'AMT_REQ_CREDIT_BUREAU_YEAR_13', 'AMT_REQ_CREDIT_BUREAU_YEAR_14',
'AMT_REQ_CREDIT_BUREAU_YEAR_15', 'AMT_REQ_CREDIT_BUREAU_YEAR_16', 'AMT_REQ_CREDIT_BUREAU_YEAR_17',
'AMT_REQ_CREDIT_BUREAU_YEAR_18', 'AMT_REQ_CREDIT_BUREAU_YEAR_19', 'AMT_REQ_CREDIT_BUREAU_YEAR_20',
'AMT_REQ_CREDIT_BUREAU_YEAR_21', 'AMT_REQ_CREDIT_BUREAU_YEAR_22', 'AMT_REQ_CREDIT_BUREAU_YEAR_23',
'AMT_REQ_CREDIT_BUREAU_YEAR_25'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_YEAR], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_YEAR'])
```
# FINAL FRAME
```
train.shape
for i in train.columns:
print(i)
missing_values_table(train)
train.to_csv('fully_cleaned_application_train.csv')
```
|
github_jupyter
|
path = ('C://Users//Jongjae//Project/home_credit/')
train = pd.read_csv(path + 'application_train.csv')
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import scale, robust_scale, minmax_scale, maxabs_scale
def missing_values_table(df):
# Total missing values
mis_val = df.isnull().sum()
# Percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# Make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)
# Rename the columns
mis_val_table_ren_columns = mis_val_table.rename(
columns = {0 : 'Missing Values', 1 : '% of Total Values'})
# Sort the table by percentage of missing descending
mis_val_table_ren_columns = mis_val_table_ren_columns[
mis_val_table_ren_columns.iloc[:,1] != 0].round(1)
# Print some summary information
print ("Your selected dataframe has " + str(df.shape[1]) + " columns.\n"
"There are " + str(mis_val_table_ren_columns.shape[0]) +
" columns that have missing values.")
# Return the dataframe with missing information
return mis_val_table_ren_columns
train['NAME_CONTRACT_TYPE'] = LabelEncoder().fit_transform(train['NAME_CONTRACT_TYPE'])
train['CODE_GENDER'] = LabelEncoder().fit_transform(train['CODE_GENDER'])
train['FLAG_OWN_CAR'] = LabelEncoder().fit_transform(train['FLAG_OWN_CAR'])
train['FLAG_OWN_REALTY'] = LabelEncoder().fit_transform(train['FLAG_OWN_REALTY'])
train['CNT_CHILDREN'].unique()
from sklearn.preprocessing import LabelBinarizer
train_CNT_CHILDREN = pd.DataFrame(LabelBinarizer().fit_transform(train['CNT_CHILDREN']),
columns = ['CNT_CHILDREN_0', 'CNT_CHILDREN_1', 'CNT_CHILDREN_2',
'CNT_CHILDREN_3', 'CNT_CHILDREN_4', 'CNT_CHILDREN_5',
'CNT_CHILDREN_6', 'CNT_CHILDREN_7', 'CNT_CHILDREN_8', 'CNT_CHILDREN_9',
'CNT_CHILDREN_10', 'CNT_CHILDREN_11', 'CNT_CHILDREN_12',
'CNT_CHILDREN_14', 'CNT_CHILDREN_19'], index=train.index)
train = pd.concat([train, train_CNT_CHILDREN], axis = 1)
del(train['CNT_CHILDREN'])
train['AMT_INCOME_TOTAL'] = scale(train['AMT_INCOME_TOTAL'])
train['AMT_CREDIT'] = scale(train['AMT_CREDIT'])
train['AMT_ANNUITY'].fillna(train['AMT_ANNUITY'].mean(), inplace = True)
train['AMT_ANNUITY'] = scale(train['AMT_ANNUITY'])
train['AMT_GOODS_PRICE'].fillna(train['AMT_GOODS_PRICE'].mean(), inplace = True)
train['AMT_GOODS_PRICE'] = scale(train['AMT_GOODS_PRICE'])
train['NAME_TYPE_SUITE'].fillna(train['NAME_TYPE_SUITE'].mode()[0], inplace = True)
train_NAME_TYPE_SUITE = pd.get_dummies(train['NAME_TYPE_SUITE'])
train = pd.concat([train, train_NAME_TYPE_SUITE], axis = 1)
del(train['NAME_TYPE_SUITE'])
train_NAME_EDUCATION_TYPE = pd.get_dummies(train['NAME_EDUCATION_TYPE'])
train = pd.concat([train, train_NAME_EDUCATION_TYPE], axis = 1)
del(train['NAME_EDUCATION_TYPE'])
train_NAME_FAMILY_STATUS = pd.get_dummies(train['NAME_FAMILY_STATUS'])
train = pd.concat([train, train_NAME_FAMILY_STATUS], axis = 1)
del(train['NAME_FAMILY_STATUS'])
train_NAME_HOUSING_TYPE = pd.get_dummies(train['NAME_HOUSING_TYPE'])
train = pd.concat([train, train_NAME_HOUSING_TYPE], axis = 1)
del(train['NAME_HOUSING_TYPE'])
train['DAYS_BIRTH'] = scale(train['DAYS_BIRTH'])
train['DAYS_EMPLOYED'] = scale(train['DAYS_EMPLOYED'])
train['DAYS_REGISTRATION'] = scale(train['DAYS_REGISTRATION'])
train['DAYS_ID_PUBLISH'] = scale(train['DAYS_ID_PUBLISH'])
train['OWN_CAR_AGE'].fillna(train['OWN_CAR_AGE'].mode()[0], inplace=True)
print(sorted(train['OWN_CAR_AGE'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_OWN_CAR_AGE = pd.DataFrame(LabelBinarizer().fit_transform(train['OWN_CAR_AGE']),
columns = ['OWN_CAR_AGE_0', 'OWN_CAR_AGE_1', 'OWN_CAR_AGE_2',
'OWN_CAR_AGE_3', 'OWN_CAR_AGE_4', 'OWN_CAR_AGE_5',
'OWN_CAR_AGE_6', 'OWN_CAR_AGE_7', 'OWN_CAR_AGE_8',
'OWN_CAR_AGE_9', 'OWN_CAR_AGE_10', 'OWN_CAR_AGE_11',
'OWN_CAR_AGE_12', 'OWN_CAR_AGE_13', 'OWN_CAR_AGE_14',
'OWN_CAR_AGE_15', 'OWN_CAR_AGE_16', 'OWN_CAR_AGE_17',
'OWN_CAR_AGE_18', 'OWN_CAR_AGE_19', 'OWN_CAR_AGE_20',
'OWN_CAR_AGE_21', 'OWN_CAR_AGE_22', 'OWN_CAR_AGE_23',
'OWN_CAR_AGE_24', 'OWN_CAR_AGE_25', 'OWN_CAR_AGE_26',
'OWN_CAR_AGE_27', 'OWN_CAR_AGE_28', 'OWN_CAR_AGE_29',
'OWN_CAR_AGE_30', 'OWN_CAR_AGE_31', 'OWN_CAR_AGE_32',
'OWN_CAR_AGE_33', 'OWN_CAR_AGE_34', 'OWN_CAR_AGE_35',
'OWN_CAR_AGE_36', 'OWN_CAR_AGE_37', 'OWN_CAR_AGE_38',
'OWN_CAR_AGE_39', 'OWN_CAR_AGE_40', 'OWN_CAR_AGE_41',
'OWN_CAR_AGE_42', 'OWN_CAR_AGE_43', 'OWN_CAR_AGE_44',
'OWN_CAR_AGE_45', 'OWN_CAR_AGE_46', 'OWN_CAR_AGE_47',
'OWN_CAR_AGE_48', 'OWN_CAR_AGE_49',
'OWN_CAR_AGE_50', 'OWN_CAR_AGE_51', 'OWN_CAR_AGE_52',
'OWN_CAR_AGE_54', 'OWN_CAR_AGE_55', 'OWN_CAR_AGE_56',
'OWN_CAR_AGE_57', 'OWN_CAR_AGE_63', 'OWN_CAR_AGE_64',
'OWN_CAR_AGE_65', 'OWN_CAR_AGE_69', 'OWN_CAR_AGE_91'], index=train.index)
train = pd.concat([train, train_OWN_CAR_AGE], axis = 1)
del(train['OWN_CAR_AGE'])
train['FLAG_MOBIL'] = LabelEncoder().fit_transform(train['FLAG_MOBIL'])
train['FLAG_EMP_PHONE'] = LabelEncoder().fit_transform(train['FLAG_EMP_PHONE'])
train['FLAG_WORK_PHONE'] = LabelEncoder().fit_transform(train['FLAG_WORK_PHONE'])
train['FLAG_CONT_MOBILE'] = LabelEncoder().fit_transform(train['FLAG_CONT_MOBILE'])
train['FLAG_PHONE'].fillna(train['FLAG_PHONE'].mode()[0], inplace=True)
train['FLAG_PHONE'] = LabelEncoder().fit_transform(train['FLAG_PHONE'])
train['OCCUPATION_TYPE'].fillna(train['OCCUPATION_TYPE'].mode()[0], inplace=True)
train_OCCUPATION_TYPE = pd.get_dummies(train['OCCUPATION_TYPE'])
train = pd.concat([train, train_OCCUPATION_TYPE], axis = 1)
del(train['OCCUPATION_TYPE'])
print(sorted(train['CNT_FAM_MEMBERS'].unique()))
train['CNT_FAM_MEMBERS'].fillna(train['CNT_FAM_MEMBERS'].mode()[0], inplace=True)
from sklearn.preprocessing import LabelBinarizer
train_CNT_FAM_MEMBERS = pd.DataFrame(LabelBinarizer().fit_transform(train['CNT_FAM_MEMBERS']),
columns = ['CNT_FAM_MEMBERS_1', 'CNT_FAM_MEMBERS_2', 'CNT_FAM_MEMBERS_3',
'CNT_FAM_MEMBERS_4', 'CNT_FAM_MEMBERS_5', 'CNT_FAM_MEMBERS_6',
'CNT_FAM_MEMBERS_7', 'CNT_FAM_MEMBERS_8', 'CNT_FAM_MEMBERS_9',
'CNT_FAM_MEMBERS_10', 'CNT_FAM_MEMBERS_11', 'CNT_FAM_MEMBERS_12',
'CNT_FAM_MEMBERS_13', 'CNT_FAM_MEMBERS_14', 'CNT_FAM_MEMBERS_15',
'CNT_FAM_MEMBERS_16', 'CNT_FAM_MEMBERS_20'], index=train.index)
train = pd.concat([train, train_CNT_FAM_MEMBERS], axis = 1)
del(train['CNT_FAM_MEMBERS'])
train_REGION_RATING_CLIENT_W_CITY = pd.DataFrame(LabelBinarizer().fit_transform(train['REGION_RATING_CLIENT_W_CITY']),
columns = ['REGION_RATING_CLIENT_W_CITY_1', 'REGION_RATING_CLIENT_W_CITY_2', 'REGION_RATING_CLIENT_W_CITY_3'], index=train.index)
train = pd.concat([train, train_REGION_RATING_CLIENT_W_CITY], axis = 1)
del(train['REGION_RATING_CLIENT_W_CITY'])
train_WEEKDAY_APPR_PROCESS_START = pd.get_dummies(train['WEEKDAY_APPR_PROCESS_START'])
train = pd.concat([train, train_WEEKDAY_APPR_PROCESS_START], axis = 1)
del(train['WEEKDAY_APPR_PROCESS_START'])
print(sorted(train['HOUR_APPR_PROCESS_START'].unique()))
train_HOUR_APPR_PROCESS_START = pd.DataFrame(LabelBinarizer().
fit_transform(train['HOUR_APPR_PROCESS_START']),
columns = ['HOUR_APPR_PROCESS_START_0', 'HOUR_APPR_PROCESS_START_1', 'HOUR_APPR_PROCESS_START_2', 'HOUR_APPR_PROCESS_START_3',
'HOUR_APPR_PROCESS_START_4', 'HOUR_APPR_PROCESS_START_5', 'HOUR_APPR_PROCESS_START_6',
'HOUR_APPR_PROCESS_START_7', 'HOUR_APPR_PROCESS_START_8', 'HOUR_APPR_PROCESS_START_9',
'HOUR_APPR_PROCESS_START_10', 'HOUR_APPR_PROCESS_START_11',
'HOUR_APPR_PROCESS_START_12', 'HOUR_APPR_PROCESS_START_13', 'HOUR_APPR_PROCESS_START_14',
'HOUR_APPR_PROCESS_START_15', 'HOUR_APPR_PROCESS_START_16', 'HOUR_APPR_PROCESS_START_17',
'HOUR_APPR_PROCESS_START_18', 'HOUR_APPR_PROCESS_START_19', 'HOUR_APPR_PROCESS_START_20',
'HOUR_APPR_PROCESS_START_21', 'HOUR_APPR_PROCESS_START_22', 'HOUR_APPR_PROCESS_START_23'], index=train.index)
train = pd.concat([train, train_HOUR_APPR_PROCESS_START], axis = 1)
del(train['HOUR_APPR_PROCESS_START'])
train_ORGANIZATION_TYPE = pd.get_dummies(train['ORGANIZATION_TYPE'])
train = pd.concat([train, train_ORGANIZATION_TYPE], axis = 1)
del(train['ORGANIZATION_TYPE'])
print(train.columns[29], train.columns[78])
normalized_train = train.iloc[:, [29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,
55,56,57,58,59,60,61,62,63,64,65,66,67,68,
69,70,71,72,73,74,75,76,77,78]]
%matplotlib inline
missing_values_table(normalized_train)
normalized_train.dtypes
# 1. train['FONDKAPREMONT_MODE']
train['FONDKAPREMONT_MODE'].mode()[0]
train['FONDKAPREMONT_MODE'] = train['FONDKAPREMONT_MODE'].fillna('reg oper account')
train_FONDKAPREMONT_MODE = pd.get_dummies(train['FONDKAPREMONT_MODE'])
train = pd.concat([train, train_FONDKAPREMONT_MODE], axis = 1)
del(train['FONDKAPREMONT_MODE'])
# 2. train['HOUSETYPE_MODE']
train['HOUSETYPE_MODE'].mode()[0]
train['HOUSETYPE_MODE'] = train['HOUSETYPE_MODE'].fillna('block of flats')
train_HOUSETYPE_MODE = pd.get_dummies(train['HOUSETYPE_MODE'])
train = pd.concat([train, train_HOUSETYPE_MODE], axis = 1)
del(train['HOUSETYPE_MODE'])
#3, train['WALLSMATERIAL_MODE']
train['WALLSMATERIAL_MODE'].mode()[0]
train['WALLSMATERIAL_MODE'] = train['WALLSMATERIAL_MODE'].fillna('Panel')
train_WALLSMATERIAL_MODE = pd.get_dummies(train['WALLSMATERIAL_MODE'])
train = pd.concat([train, train_WALLSMATERIAL_MODE], axis = 1)
del(train['WALLSMATERIAL_MODE'])
#4. trainset['EMERGENCYSTATE_MODE']
train['EMERGENCYSTATE_MODE'].mode()[0]
train['EMERGENCYSTATE_MODE'] = train['EMERGENCYSTATE_MODE'].fillna('No')
train['EMERGENCYSTATE_MODE'] = LabelEncoder().fit_transform(train['EMERGENCYSTATE_MODE'])
train.columns[75]
troubles = train.iloc[:, [29,30,31,32,33,34,35,36,37,38,39,40,
41,42,43,44,45,46,47,48,49,50,51,52,53,54,
55,56,57,58,59,60,61,62,63,64,65,66,67,68,
69,70,71,72,73,74,75]]
for i in troubles.columns:
train['{}'.format(i)] = train['{}'.format(i)].fillna(train['{}'.format(i)].mean())
for i in troubles.columns:
print(i, train['{}'.format(i)].isnull().sum())
train['OBS_30_CNT_SOCIAL_CIRCLE'] = train['OBS_30_CNT_SOCIAL_CIRCLE'].fillna(train['OBS_30_CNT_SOCIAL_CIRCLE'].mode()[0])
print(sorted(train['OBS_30_CNT_SOCIAL_CIRCLE'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_OBS_30_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['OBS_30_CNT_SOCIAL_CIRCLE']),
columns = ['OBS_30_CNT_SOCIAL_CIRCLE_0', 'OBS_30_CNT_SOCIAL_CIRCLE_1', 'OBS_30_CNT_SOCIAL_CIRCLE_2',
'OBS_30_CNT_SOCIAL_CIRCLE_3', 'OBS_30_CNT_SOCIAL_CIRCLE_4', 'OBS_30_CNT_SOCIAL_CIRCLE_5',
'OBS_30_CNT_SOCIAL_CIRCLE_6', 'OBS_30_CNT_SOCIAL_CIRCLE_7', 'OBS_30_CNT_SOCIAL_CIRCLE_8',
'OBS_30_CNT_SOCIAL_CIRCLE_9', 'OBS_30_CNT_SOCIAL_CIRCLE_10', 'OBS_30_CNT_SOCIAL_CIRCLE_11',
'OBS_30_CNT_SOCIAL_CIRCLE_12', 'OBS_30_CNT_SOCIAL_CIRCLE_13', 'OBS_30_CNT_SOCIAL_CIRCLE_14',
'OBS_30_CNT_SOCIAL_CIRCLE_15', 'OBS_30_CNT_SOCIAL_CIRCLE_16', 'OBS_30_CNT_SOCIAL_CIRCLE_17',
'OBS_30_CNT_SOCIAL_CIRCLE_18', 'OBS_30_CNT_SOCIAL_CIRCLE_19', 'OBS_30_CNT_SOCIAL_CIRCLE_20',
'OBS_30_CNT_SOCIAL_CIRCLE_21', 'OBS_30_CNT_SOCIAL_CIRCLE_22', 'OBS_30_CNT_SOCIAL_CIRCLE_23',
'OBS_30_CNT_SOCIAL_CIRCLE_24', 'OBS_30_CNT_SOCIAL_CIRCLE_25', 'OBS_30_CNT_SOCIAL_CIRCLE_26',
'OBS_30_CNT_SOCIAL_CIRCLE_27', 'OBS_30_CNT_SOCIAL_CIRCLE_28', 'OBS_30_CNT_SOCIAL_CIRCLE_29',
'OBS_30_CNT_SOCIAL_CIRCLE_30', 'OBS_30_CNT_SOCIAL_CIRCLE_47', 'OBS_30_CNT_SOCIAL_CIRCLE_348'], index=train.index)
train = pd.concat([train, train_OBS_30_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['OBS_30_CNT_SOCIAL_CIRCLE'])
train['DEF_30_CNT_SOCIAL_CIRCLE'] = train['DEF_30_CNT_SOCIAL_CIRCLE'].fillna(0.0)
from sklearn.preprocessing import LabelBinarizer
train_DEF_30_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['DEF_30_CNT_SOCIAL_CIRCLE']),
columns = ['DEF_30_CNT_SOCIAL_CIRCLE_0', 'DEF_30_CNT_SOCIAL_CIRCLE_1', 'DEF_30_CNT_SOCIAL_CIRCLE_2',
'DEF_30_CNT_SOCIAL_CIRCLE_3', 'DEF_30_CNT_SOCIAL_CIRCLE_4', 'DEF_30_CNT_SOCIAL_CIRCLE_5',
'DEF_30_CNT_SOCIAL_CIRCLE_6', 'DEF_30_CNT_SOCIAL_CIRCLE_7', 'DEF_30_CNT_SOCIAL_CIRCLE_8',
'DEF_30_CNT_SOCIAL_CIRCLE_34'], index=train.index)
train = pd.concat([train, train_DEF_30_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['DEF_30_CNT_SOCIAL_CIRCLE'])
print(sorted(train['OBS_60_CNT_SOCIAL_CIRCLE'].unique()))
train['OBS_60_CNT_SOCIAL_CIRCLE'].fillna(0.0, inplace=True)
from sklearn.preprocessing import LabelBinarizer
train_OBS_60_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['OBS_60_CNT_SOCIAL_CIRCLE']),
columns = ['OBS_60_CNT_SOCIAL_CIRCLE_0', 'OBS_60_CNT_SOCIAL_CIRCLE_1', 'OBS_60_CNT_SOCIAL_CIRCLE_2',
'OBS_60_CNT_SOCIAL_CIRCLE_3', 'OBS_60_CNT_SOCIAL_CIRCLE_4', 'OBS_60_CNT_SOCIAL_CIRCLE_5',
'OBS_60_CNT_SOCIAL_CIRCLE_6', 'OBS_60_CNT_SOCIAL_CIRCLE_7', 'OBS_60_CNT_SOCIAL_CIRCLE_8',
'OBS_60_CNT_SOCIAL_CIRCLE_9', 'OBS_60_CNT_SOCIAL_CIRCLE_10', 'OBS_60_CNT_SOCIAL_CIRCLE_11',
'OBS_60_CNT_SOCIAL_CIRCLE_12', 'OBS_60_CNT_SOCIAL_CIRCLE_13', 'OBS_60_CNT_SOCIAL_CIRCLE_14',
'OBS_60_CNT_SOCIAL_CIRCLE_15', 'OBS_60_CNT_SOCIAL_CIRCLE_16', 'OBS_60_CNT_SOCIAL_CIRCLE_17',
'OBS_60_CNT_SOCIAL_CIRCLE_18', 'OBS_60_CNT_SOCIAL_CIRCLE_19', 'OBS_60_CNT_SOCIAL_CIRCLE_20',
'OBS_60_CNT_SOCIAL_CIRCLE_21', 'OBS_60_CNT_SOCIAL_CIRCLE_22', 'OBS_60_CNT_SOCIAL_CIRCLE_23',
'OBS_60_CNT_SOCIAL_CIRCLE_24', 'OBS_60_CNT_SOCIAL_CIRCLE_25', 'OBS_60_CNT_SOCIAL_CIRCLE_26',
'OBS_60_CNT_SOCIAL_CIRCLE_27', 'OBS_60_CNT_SOCIAL_CIRCLE_28', 'OBS_60_CNT_SOCIAL_CIRCLE_29',
'OBS_60_CNT_SOCIAL_CIRCLE_30', 'OBS_60_CNT_SOCIAL_CIRCLE_47', 'OBS_60_CNT_SOCIAL_CIRCLE_344'], index=train.index)
train = pd.concat([train, train_OBS_60_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['OBS_60_CNT_SOCIAL_CIRCLE'])
sorted(train['DEF_60_CNT_SOCIAL_CIRCLE'].unique())
train['DEF_60_CNT_SOCIAL_CIRCLE'] = train['DEF_60_CNT_SOCIAL_CIRCLE'].fillna(0.0)
from sklearn.preprocessing import LabelBinarizer
train_DEF_60_CNT_SOCIAL_CIRCLE = pd.DataFrame(LabelBinarizer().
fit_transform(train['DEF_60_CNT_SOCIAL_CIRCLE']),
columns = ['DEF_60_CNT_SOCIAL_CIRCLE_0', 'DEF_60_CNT_SOCIAL_CIRCLE_1', 'DEF_60_CNT_SOCIAL_CIRCLE_2',
'DEF_60_CNT_SOCIAL_CIRCLE_3', 'DEF_60_CNT_SOCIAL_CIRCLE_4', 'DEF_60_CNT_SOCIAL_CIRCLE_5',
'DEF_60_CNT_SOCIAL_CIRCLE_6', 'DEF_60_CNT_SOCIAL_CIRCLE_7', 'DEF_60_CNT_SOCIAL_CIRCLE_24'], index=train.index)
train = pd.concat([train, train_DEF_60_CNT_SOCIAL_CIRCLE], axis = 1)
del(train['DEF_60_CNT_SOCIAL_CIRCLE'])
train['DAYS_LAST_PHONE_CHANGE'] = train['DAYS_LAST_PHONE_CHANGE'].fillna(0.0)
train['DAYS_LAST_PHONE_CHANGE'] = scale(train['DAYS_LAST_PHONE_CHANGE'])
print(train['AMT_REQ_CREDIT_BUREAU_HOUR'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_DAY'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_WEEK'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_MON'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_QRT'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_YEAR'].mode()[0])
print(train['AMT_REQ_CREDIT_BUREAU_HOUR'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_DAY'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_WEEK'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_MON'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_QRT'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_YEAR'].unique())
print(train['AMT_REQ_CREDIT_BUREAU_HOUR'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_DAY'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_WEEK'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_MON'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_QRT'].isnull().sum())
print(train['AMT_REQ_CREDIT_BUREAU_YEAR'].isnull().sum())
train['AMT_REQ_CREDIT_BUREAU_HOUR'] = train['AMT_REQ_CREDIT_BUREAU_HOUR'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_DAY'] = train['AMT_REQ_CREDIT_BUREAU_DAY'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_WEEK'] = train['AMT_REQ_CREDIT_BUREAU_WEEK'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_MON'] = train['AMT_REQ_CREDIT_BUREAU_MON'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_QRT'] = train['AMT_REQ_CREDIT_BUREAU_QRT'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_YEAR'] = train['AMT_REQ_CREDIT_BUREAU_YEAR'].fillna(0.0)
train['AMT_REQ_CREDIT_BUREAU_HOUR'].unique()
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_HOUR = pd.DataFrame(LabelBinarizer().fit_transform(train['AMT_REQ_CREDIT_BUREAU_HOUR']),
columns = ['AMT_REQ_CREDIT_BUREAU_HOUR_0', 'AMT_REQ_CREDIT_BUREAU_HOUR_1',
'AMT_REQ_CREDIT_BUREAU_HOUR_2', 'AMT_REQ_CREDIT_BUREAU_HOUR_3',
'AMT_REQ_CREDIT_BUREAU_HOUR_4'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_HOUR], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_HOUR'])
sorted(train['AMT_REQ_CREDIT_BUREAU_WEEK'].unique())
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_WEEK = pd.DataFrame(LabelBinarizer().fit_transform(train['AMT_REQ_CREDIT_BUREAU_WEEK']),
columns = ['AMT_REQ_CREDIT_BUREAU_WEEK_0', 'AMT_REQ_CREDIT_BUREAU_WEEK_1', 'AMT_REQ_CREDIT_BUREAU_WEEK_2',
'AMT_REQ_CREDIT_BUREAU_WEEK_3', 'AMT_REQ_CREDIT_BUREAU_WEEK_4', 'AMT_REQ_CREDIT_BUREAU_WEEK_5',
'AMT_REQ_CREDIT_BUREAU_WEEK_6', 'AMT_REQ_CREDIT_BUREAU_WEEK_7', 'AMT_REQ_CREDIT_BUREAU_WEEK_8'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_WEEK], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_WEEK'])
print(sorted(train['AMT_REQ_CREDIT_BUREAU_MON'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_MON = pd.DataFrame(LabelBinarizer().
fit_transform(train['AMT_REQ_CREDIT_BUREAU_MON']),
columns = ['AMT_REQ_CREDIT_BUREAU_MON_0', 'AMT_REQ_CREDIT_BUREAU_MON_1', 'AMT_REQ_CREDIT_BUREAU_MON_2',
'AMT_REQ_CREDIT_BUREAU_MON_3', 'AMT_REQ_CREDIT_BUREAU_MON_4', 'AMT_REQ_CREDIT_BUREAU_MON_5',
'AMT_REQ_CREDIT_BUREAU_MON_6', 'AMT_REQ_CREDIT_BUREAU_MON_7', 'AMT_REQ_CREDIT_BUREAU_MON_8',
'AMT_REQ_CREDIT_BUREAU_MON_9', 'AMT_REQ_CREDIT_BUREAU_MON_10', 'AMT_REQ_CREDIT_BUREAU_MON_11',
'AMT_REQ_CREDIT_BUREAU_MON_12', 'AMT_REQ_CREDIT_BUREAU_MON_13', 'AMT_REQ_CREDIT_BUREAU_MON_14',
'AMT_REQ_CREDIT_BUREAU_MON_15', 'AMT_REQ_CREDIT_BUREAU_MON_16', 'AMT_REQ_CREDIT_BUREAU_MON_17',
'AMT_REQ_CREDIT_BUREAU_MON_18', 'AMT_REQ_CREDIT_BUREAU_MON_19', 'AMT_REQ_CREDIT_BUREAU_MON_22',
'AMT_REQ_CREDIT_BUREAU_MON_23', 'AMT_REQ_CREDIT_BUREAU_MON_24', 'AMT_REQ_CREDIT_BUREAU_MON_27'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_MON], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_MON'])
sorted(train['AMT_REQ_CREDIT_BUREAU_QRT'].unique())
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_QRT = pd.DataFrame(LabelBinarizer().
fit_transform(train['AMT_REQ_CREDIT_BUREAU_QRT']),
columns = ['AMT_REQ_CREDIT_BUREAU_QRT_0', 'AMT_REQ_CREDIT_BUREAU_QRT_1', 'AMT_REQ_CREDIT_BUREAU_QRT_2',
'AMT_REQ_CREDIT_BUREAU_QRT_3', 'AMT_REQ_CREDIT_BUREAU_QRT_4', 'AMT_REQ_CREDIT_BUREAU_QRT_5',
'AMT_REQ_CREDIT_BUREAU_QRT_6', 'AMT_REQ_CREDIT_BUREAU_QRT_7', 'AMT_REQ_CREDIT_BUREAU_QRT_8',
'AMT_REQ_CREDIT_BUREAU_QRT_19', 'AMT_REQ_CREDIT_BUREAU_QRT_261'
], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_QRT], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_QRT'])
print(sorted(train['AMT_REQ_CREDIT_BUREAU_YEAR'].unique()))
from sklearn.preprocessing import LabelBinarizer
train_AMT_REQ_CREDIT_BUREAU_YEAR = pd.DataFrame(LabelBinarizer().
fit_transform(train['AMT_REQ_CREDIT_BUREAU_YEAR']),
columns = ['AMT_REQ_CREDIT_BUREAU_YEAR_0', 'AMT_REQ_CREDIT_BUREAU_YEAR_1', 'AMT_REQ_CREDIT_BUREAU_YEAR_2',
'AMT_REQ_CREDIT_BUREAU_YEAR_3', 'AMT_REQ_CREDIT_BUREAU_YEAR_4', 'AMT_REQ_CREDIT_BUREAU_YEAR_5',
'AMT_REQ_CREDIT_BUREAU_YEAR_6', 'AMT_REQ_CREDIT_BUREAU_YEAR_7', 'AMT_REQ_CREDIT_BUREAU_YEAR_8',
'AMT_REQ_CREDIT_BUREAU_YEAR_9', 'AMT_REQ_CREDIT_BUREAU_YEAR_10', 'AMT_REQ_CREDIT_BUREAU_YEAR_11',
'AMT_REQ_CREDIT_BUREAU_YEAR_12', 'AMT_REQ_CREDIT_BUREAU_YEAR_13', 'AMT_REQ_CREDIT_BUREAU_YEAR_14',
'AMT_REQ_CREDIT_BUREAU_YEAR_15', 'AMT_REQ_CREDIT_BUREAU_YEAR_16', 'AMT_REQ_CREDIT_BUREAU_YEAR_17',
'AMT_REQ_CREDIT_BUREAU_YEAR_18', 'AMT_REQ_CREDIT_BUREAU_YEAR_19', 'AMT_REQ_CREDIT_BUREAU_YEAR_20',
'AMT_REQ_CREDIT_BUREAU_YEAR_21', 'AMT_REQ_CREDIT_BUREAU_YEAR_22', 'AMT_REQ_CREDIT_BUREAU_YEAR_23',
'AMT_REQ_CREDIT_BUREAU_YEAR_25'], index=train.index)
train = pd.concat([train, train_AMT_REQ_CREDIT_BUREAU_YEAR], axis = 1)
del(train['AMT_REQ_CREDIT_BUREAU_YEAR'])
train.shape
for i in train.columns:
print(i)
missing_values_table(train)
train.to_csv('fully_cleaned_application_train.csv')
| 0.390476 | 0.28164 |
```
import json
import os
from pathlib import Path
from arcgis.gis import GIS
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import requests
load_dotenv(find_dotenv())
dir_prj = Path.cwd().parent
dir_data = dir_prj/'data'
dir_reports = dir_prj/'reports'
assert dir_reports.exists()
url, user, pswd = [os.getenv(itm) for itm in ['AGOL_URL', 'AGOL_USERNAME', 'AGOL_PASSWORD']]
assert all([url, user, pswd])
gis = GIS(url, username=user, password=pswd)
gis
reports_url = gis.properties.helperServices.geoenrichment.url + "/Geoenrichment/Reports"
reports_url
infographic_url = gis.properties.helperServices.geoenrichment.url + '/Geoenrichment/Infographics/Standard/US/census'
infographic_url
payload = {
'f': 'json'
}
res = requests.get(infographic_url, params=payload)
res
res = gis._con.post(infographic_url, params={'f': 'json'}, ignore_error_key=False)
#
std_ig_df = pd.json_normalize(res['reports'])
std_ig_df.columns = [col.replace('metadata.', '') for col in std_ig_df.columns]
std_ig_df = std_ig_df[['reportID', 'title', 'itemID', 'formats', 'dataVintage', 'dataVintageDescription', 'countries', 'hierarchy']].copy()
std_ig_df['category'] = 'standard'
std_ig_df
gis.content.search('Public Safety Test')
gis.content.search('owner:jmccune_baqa')
itm_lst = gis.content.search('type:Report Template')
itm_lst
ig_itm_lst = [itm for itm in itm_lst if any([kw for kw in itm.typeKeywords if 'infographic' in kw.lower()])]
ig_dict_lst = []
for itm in ig_itm_lst:
itm_dict = {
'title': itm.__dict__['title'],
'itemID': itm.__dict__['itemid'],
'itemDescription': itm.__dict__['description'],
'countries': itm.__dict__['properties']['countries'],
'formats': itm.properties['formats'],
'dataVintage': itm.properties['dataVintage'],
'dataVintageDescription': itm.properties['dataVintageDescription']
}
ig_dict_lst.append(itm_dict)
cst_ig_df = pd.DataFrame(ig_dict_lst)
cst_ig_df['category'] = 'custom'
cst_ig_df
uniq_cntry = std_ig_df['countries'].explode().unique()
uniq_ig_df = cst_ig_df[cst_ig_df.countries.isin(uniq_cntry)]
cst_cntry_lst = cst_ig_df[cst_ig_df.countries.isin(uniq_cntry)]
pd.concat([std_ig_df, uniq_ig_df], ignore_index=True)
from pathlib import Path
from typing import Union, List
from arcgis.geoenrichment import create_report
from arcgis.geometry import Geometry
from arcgis.gis import GIS
def custom_create_report(
study_areas: Union[Geometry, List[Geometry]],
gis:GIS, report:str,
out_path: Union[str, Path],
export_format: str = 'pdf'
):
# ensure list
in_geom = [study_areas] if not isinstance(study_areas, list) else study_areas
# format geometries as list of dicts so create_report doesn't touch
in_geom = [{'geometry': json.loads(geom.JSON)} for geom in in_geom]
# validataion on export format
export_format = export_format.lower()
assert export_format in ['xlsx', 'pdf', 'html']
# get the directory and file name
out_folder = str(out_path.parent)
out_name = str(out_path.name)
# get the report
out_report = create_report(
study_areas=in_geom,
report=report,
export_format=export_format,
out_folder=out_folder,
out_name=out_name,
gis=gis
)
return Path(out_report)
study_areas = Geometry({"rings":[[[-117.26,32.81],[-117.40,32.92],[-117.12,32.80],[-117.26,32.81]]], "spatialReference":{"wkid":4326}})
load_dotenv(find_dotenv())
dir_prj = Path.cwd().parent
dir_data = dir_prj/'data'
dir_reports = dir_prj/'reports'
assert dir_reports.exists()
url, user, pswd = [os.getenv(itm) for itm in ['AGOL_URL', 'AGOL_USERNAME', 'AGOL_PASSWORD']]
assert all([url, user, pswd])
gis = GIS(url, username=user, password=pswd)
gis
out_report = custom_create_report(study_areas, gis, 'tapestry-profile', dir_reports/'tapestry-test.pdf')
out_report
report_id = 'b3102bdb4cec4795987f64e385066684'
report = {"itemid": report_id}
out_report = custom_create_report(study_areas, gis, report_id, dir_reports/'custom-infographic-test.pdf', 'pdf')
out_report
```
|
github_jupyter
|
import json
import os
from pathlib import Path
from arcgis.gis import GIS
from dotenv import find_dotenv, load_dotenv
import pandas as pd
import requests
load_dotenv(find_dotenv())
dir_prj = Path.cwd().parent
dir_data = dir_prj/'data'
dir_reports = dir_prj/'reports'
assert dir_reports.exists()
url, user, pswd = [os.getenv(itm) for itm in ['AGOL_URL', 'AGOL_USERNAME', 'AGOL_PASSWORD']]
assert all([url, user, pswd])
gis = GIS(url, username=user, password=pswd)
gis
reports_url = gis.properties.helperServices.geoenrichment.url + "/Geoenrichment/Reports"
reports_url
infographic_url = gis.properties.helperServices.geoenrichment.url + '/Geoenrichment/Infographics/Standard/US/census'
infographic_url
payload = {
'f': 'json'
}
res = requests.get(infographic_url, params=payload)
res
res = gis._con.post(infographic_url, params={'f': 'json'}, ignore_error_key=False)
#
std_ig_df = pd.json_normalize(res['reports'])
std_ig_df.columns = [col.replace('metadata.', '') for col in std_ig_df.columns]
std_ig_df = std_ig_df[['reportID', 'title', 'itemID', 'formats', 'dataVintage', 'dataVintageDescription', 'countries', 'hierarchy']].copy()
std_ig_df['category'] = 'standard'
std_ig_df
gis.content.search('Public Safety Test')
gis.content.search('owner:jmccune_baqa')
itm_lst = gis.content.search('type:Report Template')
itm_lst
ig_itm_lst = [itm for itm in itm_lst if any([kw for kw in itm.typeKeywords if 'infographic' in kw.lower()])]
ig_dict_lst = []
for itm in ig_itm_lst:
itm_dict = {
'title': itm.__dict__['title'],
'itemID': itm.__dict__['itemid'],
'itemDescription': itm.__dict__['description'],
'countries': itm.__dict__['properties']['countries'],
'formats': itm.properties['formats'],
'dataVintage': itm.properties['dataVintage'],
'dataVintageDescription': itm.properties['dataVintageDescription']
}
ig_dict_lst.append(itm_dict)
cst_ig_df = pd.DataFrame(ig_dict_lst)
cst_ig_df['category'] = 'custom'
cst_ig_df
uniq_cntry = std_ig_df['countries'].explode().unique()
uniq_ig_df = cst_ig_df[cst_ig_df.countries.isin(uniq_cntry)]
cst_cntry_lst = cst_ig_df[cst_ig_df.countries.isin(uniq_cntry)]
pd.concat([std_ig_df, uniq_ig_df], ignore_index=True)
from pathlib import Path
from typing import Union, List
from arcgis.geoenrichment import create_report
from arcgis.geometry import Geometry
from arcgis.gis import GIS
def custom_create_report(
study_areas: Union[Geometry, List[Geometry]],
gis:GIS, report:str,
out_path: Union[str, Path],
export_format: str = 'pdf'
):
# ensure list
in_geom = [study_areas] if not isinstance(study_areas, list) else study_areas
# format geometries as list of dicts so create_report doesn't touch
in_geom = [{'geometry': json.loads(geom.JSON)} for geom in in_geom]
# validataion on export format
export_format = export_format.lower()
assert export_format in ['xlsx', 'pdf', 'html']
# get the directory and file name
out_folder = str(out_path.parent)
out_name = str(out_path.name)
# get the report
out_report = create_report(
study_areas=in_geom,
report=report,
export_format=export_format,
out_folder=out_folder,
out_name=out_name,
gis=gis
)
return Path(out_report)
study_areas = Geometry({"rings":[[[-117.26,32.81],[-117.40,32.92],[-117.12,32.80],[-117.26,32.81]]], "spatialReference":{"wkid":4326}})
load_dotenv(find_dotenv())
dir_prj = Path.cwd().parent
dir_data = dir_prj/'data'
dir_reports = dir_prj/'reports'
assert dir_reports.exists()
url, user, pswd = [os.getenv(itm) for itm in ['AGOL_URL', 'AGOL_USERNAME', 'AGOL_PASSWORD']]
assert all([url, user, pswd])
gis = GIS(url, username=user, password=pswd)
gis
out_report = custom_create_report(study_areas, gis, 'tapestry-profile', dir_reports/'tapestry-test.pdf')
out_report
report_id = 'b3102bdb4cec4795987f64e385066684'
report = {"itemid": report_id}
out_report = custom_create_report(study_areas, gis, report_id, dir_reports/'custom-infographic-test.pdf', 'pdf')
out_report
| 0.466846 | 0.187839 |
## Evaluating Performance of a Binary Classifier
Binary Model is used for predicting a binary outcome (Pass/Fail, 1/0, True/False).
Some algorithms output a raw score that indicates probability of a sample belonging to positive class.
<q><i>The actual output of many binary classification algorithms is a prediction score. The score indicates the system’s certainty that the given observation belongs to the positive class</i></q><br>
Reference:
https://docs.aws.amazon.com/machine-learning/latest/dg/binary-classification.html
To convert this raw score to a positive or negative class, we need to specify a cut-off. A sample with score greater than the cut-off is classified as positive class and a sample with score less than the cut-off is classified as negative class.
To find out how good the model predictions are, we need to check predictions against previously unseen samples that were not used for training. Usually, 30% of the available samples are reserved for testing while remaining 70% of samples are used for training.
By comparing predicted values against known results in test data, we can assess overall model performance<br>
In this first part, let's look at common metrics used for evaluating binary classifier performance.
In the second part, we will look at algorithms that produce raw scores and how model performs at different cut-off thresholds.
Common Techniques for evaluating performance:<br>
<li>Visually observe using Plots</li>
<li>Confusion Matrix</li>
<li>Evaluate with Metrics like Recall, Precision, Accuracy, False Alarm, F1-score, AUC Score</li>
<p>While Plots are good for humans to visually observe the results, we often need a single metric that can indicate quality of a model. This can be useful for programmatically identifying which model is performing better (for example: using automatic model tuning to select the best performing model)</p>
Reference:<br>https://docs.aws.amazon.com/machine-learning/latest/dg/binary-classification.html<br>
Confusion Matrix:<br>https://en.wikipedia.org/wiki/Confusion_matrix<br>
<h2>Binary Classifier Metrics</h2>
positive = number of actual positives (count)<br>
negative = number of actual negatives (count)<br>
True Positive = tp = how many samples were correctly classified as positive (count)<br>
True Negative = tn = how many samples were correctly classified as negative (count)<br>
False Positive = fp = how many negative samples were mis-classified as positive (count)<br>
False Negative = fn = how many positive samples were mis-classified as negative (count)<br>
<h4>True Positive Rate (TPR, Recall, Probability of detection) = True Positive/Positive</h4>
How many positives were correctly classified? (fraction)<br>
Recall value closer to 1 is better. closer to 0 is worse
<i>Example: Radar Operator watching the skies for enemy planes.
Positive Class = Enemy Plane
Negative Class = Friendly Plane
True Positive Rate or Probability of detection – is the probability of correctly classifying an enemy plane
</i>
<h4>True Negative Rate = True Negative/Negative</h4>
How many negatives were correctly classified? (fraction)<br>
True Negative Rate value closer to 1 is better. closer to 0 is worse
<i>True negative rate – is the probability of correctly classifying a friendly plane</i>
<h4>False Positive Rate (FPR, Probability of false alarm) = False Positive/Negative</h4>
How many negatives were mis-classified as positives (fraction)<br>
False Positive Rate value closer to 0 is better. closer to 1 is worse
<i>Another name for this is Probability of false alarm – is the probability of mis-classifying a friendly plane as an enemy plane</i>
<h4>False Negative Rate (FNR, Misses) = False Negative/Positive</h4>
How many positives were mis-classified as negative (fraction)<br>
False Negative Rate value closer to 0 is better. closer to 1 is worse
<i>False Negative Rate - is the probability of mis-classifying an enemy plane as a friendly plane</i>
<h4>Precision = True Positive/(True Positive + False Positive)</h4>
How many positives classified by the algorithm are really positives? (fraction)<br>
Precision value closer to 1 is better. closer to 0 is worse
<i>Precision would go up as enemy planes are correctly identified, while minimizing false alarm</i>
<h4>Accuracy = (True Positive + True Negative)/(Positive + Negative)</h4>
How many positives and negatives were correctly classified? (fraction)<br>
Accuracy value closer to 1 is better. closer to 0 is worse
<i>Accuracy would go up when enemy planes and friendly planes are correctly identified</i>
<h4>F1 Score = harmonic mean of Precision and Recall = 2*Precision*Recall / (Precision + Recall)</h4>
F1 Score closer to 1 is better. Closer to 0 is worse.
Reference:
Harmonic Mean - https://en.wikipedia.org/wiki/Harmonic_mean <br>
Confusion Matrix - https://en.wikipedia.org/wiki/Confusion_matrix
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import itertools
```
<h2>Exam Result Data set</h2>
<h3>Columns</h3>
<li>Pass = Actual Pass or Fail for the sample. Pass=1, Fail=0</li>
<li>Model1_Prediction = Predicted Pass or Fail by model 1</li>
<li>Model2_Prediction = Predicted Pass or Fail by model 2</li>
<li>Model3_Prediction = Predicted Pass or Fail by model 3</li>
<li>Model4_Prediction = Predicted Pass or Fail by model 4</li>
<p>We are going to compare performance of these four models</p>
Hours Spent and Exam Result (Pass/Fail) Data set:<br>
https://en.wikipedia.org/wiki/Logistic_regression
```
models = ['Model 1','Model 2', 'Model 3', 'Model 4']
df = pd.read_csv('HoursExamSample.csv')
df
```
## Plot Data
Compare performance visually <br>
```
plt.figure()
plt.scatter(df['Hours'],df['Pass'],label='Actual')
plt.legend(loc=7)
plt.yticks([0,1])
plt.xlabel('Hours')
plt.ylabel('Pass / Fail')
plt.show()
# Compare performance of Actual and Model 1 Prediction
plt.figure()
plt.scatter(df['Hours'],df['Pass'],label='Actual')
plt.scatter(df['Hours'],df['Model1_Prediction'],label='Model 1',marker='^')
plt.legend(loc=7)
plt.yticks([0,1])
plt.xlabel('Hours')
plt.ylabel('Pass / Fail')
plt.show()
plt.figure(figsize=(10,10))
for idx, model in enumerate(models):
plt.subplot(2,2,idx+1)
plt.scatter(df['Hours'],df['Pass'],label='Actual')
plt.scatter(df['Hours'],df[model.replace(' ','') + '_Prediction'],
label=model,marker='^')
plt.yticks([0,1])
plt.legend(loc=7)
plt.xlabel('Hours')
plt.ylabel('Pass / Fail')
```
From the plots, we can observe that:<br>
<li>Model 1 is classifying samples as Pass if hours spent studying is greater than 1.5 hours</li>
<li>Model 2 is classifying all samples as Fail</li>
<li>Model 3 is classifying samples as Pass if hours spent studying is around 3 hours or more</li>
<li>Model 4 is classfying all samples as Pass</li>
<h2>Confusion Matrix</h2>
Confusion Matrix is a table that summarizes performance of classification model.<br><br>
It summarizes predictions into four categories:<br>
True Positive = tp = how many samples were correctly classified as positive (count)<br>
True Negative = tn = how many samples were correctly classified as negative (count)<br>
False Positive = fp = how many negative samples were mis-classified as positive (count)<br>
False Negative = fn = how many positive samples were mis-classified as negative (count)<br>
<br>
Using these four metrics, we can derive other useful metrics like Recall, Precision, Accuracy, F1-Score and so forth.<br>
<br>
Reference:<br>
https://en.wikipedia.org/wiki/Confusion_matrix
```
from sklearn.metrics import classification_report, confusion_matrix
# Compute confusion matrix
# Compare Actual Vs Model 1 Predictions
cnf_matrix = confusion_matrix(df['Pass'],df['Model1_Prediction'],labels=[1,0])
cnf_matrix
# Reference: https://scikit-learn.org/stable/modules/model_evaluation.html
# Explicitly stating labels. Pass=1, Fail=0
def true_positive(y_true, y_pred):
return confusion_matrix(y_true, y_pred,labels=[1,0])[0, 0]
def true_negative(y_true, y_pred):
return confusion_matrix(y_true,y_pred,labels=[1,0])[1, 1]
def false_positive(y_true, y_pred):
return confusion_matrix(y_true, y_pred,labels=[1,0])[1, 0]
def false_negative(y_true, y_pred):
return confusion_matrix(y_true, y_pred,labels=[1,0])[0, 1]
# Compute Binary Classifier Metrics
# Returns a dictionary {"MetricName":Value,...}
def binary_classifier_metrics(y_true, y_pred):
metrics = {}
# References:
# https://docs.aws.amazon.com/machine-learning/latest/dg/binary-classification.html
# https://en.wikipedia.org/wiki/Confusion_matrix
# Definition:
# true positive = tp = how many samples were correctly classified as positive (count)
# true negative = tn = how many samples were correctly classified as negative (count)
# false positive = fp = how many negative samples were mis-classified as positive (count)
# false_negative = fn = how many positive samples were mis-classified as negative (count)
# positive = number of positive samples (count)
# = true positive + false negative
# negative = number of negative samples (count)
# = true negative + false positive
tp = true_positive(y_true, y_pred)
tn = true_negative(y_true, y_pred)
fp = false_positive(y_true, y_pred)
fn = false_negative(y_true, y_pred)
positive = tp + fn
negative = tn + fp
metrics['TruePositive'] = tp
metrics['TrueNegative'] = tn
metrics['FalsePositive'] = fp
metrics['FalseNegative'] = fn
metrics['Positive'] = positive
metrics['Negative'] = negative
# True Positive Rate (TPR, Recall) = true positive/positive
# How many positives were correctly classified? (fraction)
# Recall value closer to 1 is better. closer to 0 is worse
if tp == 0:
recall = 0
else:
recall = tp/positive
metrics['Recall'] = recall
# True Negative Rate = True Negative/negative
# How many negatives were correctly classified? (fraction)
# True Negative Rate value closer to 1 is better. closer to 0 is worse
if tn == 0:
tnr = 0
else:
tnr = tn/(negative)
metrics['TrueNegativeRate'] = tnr
# Precision = True Positive/(True Positive + False Positive)
# How many positives classified by the algorithm are really positives? (fraction)
# Precision value closer to 1 is better. closer to 0 is worse
if tp == 0:
precision = 0
else:
precision = tp/(tp + fp)
metrics['Precision'] = precision
# Accuracy = (True Positive + True Negative)/(total positive + total negative)
# How many positives and negatives were correctly classified? (fraction)
# Accuracy value closer to 1 is better. closer to 0 is worse
accuracy = (tp + tn)/(positive + negative)
metrics['Accuracy'] = accuracy
# False Positive Rate (FPR, False Alarm) = False Positive/(total negative)
# How many negatives were mis-classified as positives (fraction)
# False Positive Rate value closer to 0 is better. closer to 1 is worse
if fp == 0:
fpr = 0
else:
fpr = fp/(negative)
metrics['FalsePositiveRate'] = fpr
# False Negative Rate (FNR, Misses) = False Negative/(total Positive)
# How many positives were mis-classified as negative (fraction)
# False Negative Rate value closer to 0 is better. closer to 1 is worse
fnr = fn/(positive)
metrics['FalseNegativeRate'] = fnr
# F1 Score = harmonic mean of Precision and Recall
# F1 Score closer to 1 is better. Closer to 0 is worse.
if precision == 0 or recall == 0:
f1 = 0
else:
f1 = 2*precision*recall/(precision+recall)
metrics['F1'] = f1
return metrics
# Reference:
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#print("Normalized confusion matrix")
#else:
# print('Confusion matrix, without normalization')
#print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Compute confusion matrix
cnf_matrix = confusion_matrix(df['Pass'],df['Model1_Prediction'],labels=[1,0])
cnf_matrix
print('TP:',true_positive(df['Pass'],df['Model1_Prediction']))
print('TN:',true_negative(df['Pass'],df['Model1_Prediction']))
print('FP:',false_positive(df['Pass'],df['Model1_Prediction']))
print('FN:',false_negative(df['Pass'],df['Model1_Prediction']))
# Plot confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title='Model 1 Confusion matrix - Count')
# Plot normalized confusion matrix (numbers are shown as a fraction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title='Model 1 Confusion matrix - Fraction',normalize=True)
# Plot confusion matrix
# Show actual counts
for model in models:
#print(model)
cnf_matrix = confusion_matrix(df['Pass'],
df[model.replace(' ','') + '_Prediction'],labels=[1,0])
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title= model + ' Confusion matrix - Count', normalize=False)
# Compute confusion matrix
# Show Fraction
for model in models:
#print(model)
cnf_matrix = confusion_matrix(df['Pass'],
df[model.replace(' ','') + '_Prediction'],labels=[1,0])
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title= model + ' Confusion matrix - Fraction', normalize=True)
# Compute Metrics for all models
all_metrics = []
for model in models:
print(model)
colname = model.replace(' ','') + '_Prediction'
metrics = binary_classifier_metrics(df['Pass'],df[colname])
all_metrics.append(metrics)
# Create a metrics dataframe
# https://stackoverflow.com/questions/41168558/python-how-to-convert-json-file-to-dataframe/41168691
df_metrics=pd.DataFrame.from_dict(all_metrics)
df_metrics.index = models
print('Counts')
print(df_metrics[['TruePositive',
'FalseNegative',
'FalsePositive',
'TrueNegative',]].round(2))
print()
print('Ratios')
print(df_metrics[['Recall',
'FalseNegativeRate',
'FalsePositiveRate',
'TrueNegativeRate',]].round(2))
print()
print(df_metrics[['Precision',
'Accuracy',
'F1']].round(2))
```
<h2>Compute Metrics using SKLearn Classification Report</h2>
```
# Using SKLearn classification report
# Micro Average Vs Macro Average
# https://datascience.stackexchange.com/questions/15989/micro-average-vs-macro-average-performance-in-a-multiclass-classification-settin
for model in models:
print(model)
print(classification_report(
df['Pass'],
df[model.replace(' ','') + '_Prediction'],
labels=[1,0],
target_names=['Pass','Fail']))
```
<h2>Summary</h2>
From these metrics, we can see that Model 1 and Model 3 are performing better than Model 2 and 4.
Model 1 has higher Recall (it correctly identifies more positive samples) at the cost of higher False Positive Rate (negative samples were misclassified as positive)
Model 3 offers balanced performance
|
github_jupyter
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import itertools
models = ['Model 1','Model 2', 'Model 3', 'Model 4']
df = pd.read_csv('HoursExamSample.csv')
df
plt.figure()
plt.scatter(df['Hours'],df['Pass'],label='Actual')
plt.legend(loc=7)
plt.yticks([0,1])
plt.xlabel('Hours')
plt.ylabel('Pass / Fail')
plt.show()
# Compare performance of Actual and Model 1 Prediction
plt.figure()
plt.scatter(df['Hours'],df['Pass'],label='Actual')
plt.scatter(df['Hours'],df['Model1_Prediction'],label='Model 1',marker='^')
plt.legend(loc=7)
plt.yticks([0,1])
plt.xlabel('Hours')
plt.ylabel('Pass / Fail')
plt.show()
plt.figure(figsize=(10,10))
for idx, model in enumerate(models):
plt.subplot(2,2,idx+1)
plt.scatter(df['Hours'],df['Pass'],label='Actual')
plt.scatter(df['Hours'],df[model.replace(' ','') + '_Prediction'],
label=model,marker='^')
plt.yticks([0,1])
plt.legend(loc=7)
plt.xlabel('Hours')
plt.ylabel('Pass / Fail')
from sklearn.metrics import classification_report, confusion_matrix
# Compute confusion matrix
# Compare Actual Vs Model 1 Predictions
cnf_matrix = confusion_matrix(df['Pass'],df['Model1_Prediction'],labels=[1,0])
cnf_matrix
# Reference: https://scikit-learn.org/stable/modules/model_evaluation.html
# Explicitly stating labels. Pass=1, Fail=0
def true_positive(y_true, y_pred):
return confusion_matrix(y_true, y_pred,labels=[1,0])[0, 0]
def true_negative(y_true, y_pred):
return confusion_matrix(y_true,y_pred,labels=[1,0])[1, 1]
def false_positive(y_true, y_pred):
return confusion_matrix(y_true, y_pred,labels=[1,0])[1, 0]
def false_negative(y_true, y_pred):
return confusion_matrix(y_true, y_pred,labels=[1,0])[0, 1]
# Compute Binary Classifier Metrics
# Returns a dictionary {"MetricName":Value,...}
def binary_classifier_metrics(y_true, y_pred):
metrics = {}
# References:
# https://docs.aws.amazon.com/machine-learning/latest/dg/binary-classification.html
# https://en.wikipedia.org/wiki/Confusion_matrix
# Definition:
# true positive = tp = how many samples were correctly classified as positive (count)
# true negative = tn = how many samples were correctly classified as negative (count)
# false positive = fp = how many negative samples were mis-classified as positive (count)
# false_negative = fn = how many positive samples were mis-classified as negative (count)
# positive = number of positive samples (count)
# = true positive + false negative
# negative = number of negative samples (count)
# = true negative + false positive
tp = true_positive(y_true, y_pred)
tn = true_negative(y_true, y_pred)
fp = false_positive(y_true, y_pred)
fn = false_negative(y_true, y_pred)
positive = tp + fn
negative = tn + fp
metrics['TruePositive'] = tp
metrics['TrueNegative'] = tn
metrics['FalsePositive'] = fp
metrics['FalseNegative'] = fn
metrics['Positive'] = positive
metrics['Negative'] = negative
# True Positive Rate (TPR, Recall) = true positive/positive
# How many positives were correctly classified? (fraction)
# Recall value closer to 1 is better. closer to 0 is worse
if tp == 0:
recall = 0
else:
recall = tp/positive
metrics['Recall'] = recall
# True Negative Rate = True Negative/negative
# How many negatives were correctly classified? (fraction)
# True Negative Rate value closer to 1 is better. closer to 0 is worse
if tn == 0:
tnr = 0
else:
tnr = tn/(negative)
metrics['TrueNegativeRate'] = tnr
# Precision = True Positive/(True Positive + False Positive)
# How many positives classified by the algorithm are really positives? (fraction)
# Precision value closer to 1 is better. closer to 0 is worse
if tp == 0:
precision = 0
else:
precision = tp/(tp + fp)
metrics['Precision'] = precision
# Accuracy = (True Positive + True Negative)/(total positive + total negative)
# How many positives and negatives were correctly classified? (fraction)
# Accuracy value closer to 1 is better. closer to 0 is worse
accuracy = (tp + tn)/(positive + negative)
metrics['Accuracy'] = accuracy
# False Positive Rate (FPR, False Alarm) = False Positive/(total negative)
# How many negatives were mis-classified as positives (fraction)
# False Positive Rate value closer to 0 is better. closer to 1 is worse
if fp == 0:
fpr = 0
else:
fpr = fp/(negative)
metrics['FalsePositiveRate'] = fpr
# False Negative Rate (FNR, Misses) = False Negative/(total Positive)
# How many positives were mis-classified as negative (fraction)
# False Negative Rate value closer to 0 is better. closer to 1 is worse
fnr = fn/(positive)
metrics['FalseNegativeRate'] = fnr
# F1 Score = harmonic mean of Precision and Recall
# F1 Score closer to 1 is better. Closer to 0 is worse.
if precision == 0 or recall == 0:
f1 = 0
else:
f1 = 2*precision*recall/(precision+recall)
metrics['F1'] = f1
return metrics
# Reference:
# https://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
#print("Normalized confusion matrix")
#else:
# print('Confusion matrix, without normalization')
#print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
# Compute confusion matrix
cnf_matrix = confusion_matrix(df['Pass'],df['Model1_Prediction'],labels=[1,0])
cnf_matrix
print('TP:',true_positive(df['Pass'],df['Model1_Prediction']))
print('TN:',true_negative(df['Pass'],df['Model1_Prediction']))
print('FP:',false_positive(df['Pass'],df['Model1_Prediction']))
print('FN:',false_negative(df['Pass'],df['Model1_Prediction']))
# Plot confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title='Model 1 Confusion matrix - Count')
# Plot normalized confusion matrix (numbers are shown as a fraction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title='Model 1 Confusion matrix - Fraction',normalize=True)
# Plot confusion matrix
# Show actual counts
for model in models:
#print(model)
cnf_matrix = confusion_matrix(df['Pass'],
df[model.replace(' ','') + '_Prediction'],labels=[1,0])
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title= model + ' Confusion matrix - Count', normalize=False)
# Compute confusion matrix
# Show Fraction
for model in models:
#print(model)
cnf_matrix = confusion_matrix(df['Pass'],
df[model.replace(' ','') + '_Prediction'],labels=[1,0])
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Pass','Fail'],
title= model + ' Confusion matrix - Fraction', normalize=True)
# Compute Metrics for all models
all_metrics = []
for model in models:
print(model)
colname = model.replace(' ','') + '_Prediction'
metrics = binary_classifier_metrics(df['Pass'],df[colname])
all_metrics.append(metrics)
# Create a metrics dataframe
# https://stackoverflow.com/questions/41168558/python-how-to-convert-json-file-to-dataframe/41168691
df_metrics=pd.DataFrame.from_dict(all_metrics)
df_metrics.index = models
print('Counts')
print(df_metrics[['TruePositive',
'FalseNegative',
'FalsePositive',
'TrueNegative',]].round(2))
print()
print('Ratios')
print(df_metrics[['Recall',
'FalseNegativeRate',
'FalsePositiveRate',
'TrueNegativeRate',]].round(2))
print()
print(df_metrics[['Precision',
'Accuracy',
'F1']].round(2))
# Using SKLearn classification report
# Micro Average Vs Macro Average
# https://datascience.stackexchange.com/questions/15989/micro-average-vs-macro-average-performance-in-a-multiclass-classification-settin
for model in models:
print(model)
print(classification_report(
df['Pass'],
df[model.replace(' ','') + '_Prediction'],
labels=[1,0],
target_names=['Pass','Fail']))
| 0.799599 | 0.988536 |
# Tests and conditions
## 1) Introduction
In Python tests are executed with the instruction if and a comparisons between variables. The instructions are contained in an indented block and are executed only if the condition is correct.
```
x=2
if x==2:
print("It is an integer")
if x=="tiger":
print("It is an animal")
```
## 2) Multiple conditions
If one need to make a tests and execute an alternative conditions if it is failed, this is done like this:
```
x=2
if x==2:
print("It is an integer")
else:
print("It is not an integer ")
```
More complicated cases where several tests needs to be done are handled as:
```
x=3
if x==2:
print("x is 2")
elif x>2:
print("x is greather than 2 ")
elif x==0:
print("x is equal to 0")
else:
print("x is not equal to 0, and is less or equal to 2")
```
It is very important to be carefull to the indentation at this point, since each block of instruction is separated witht he indentation. Two codes very similar can give you two very different results if you don't pay attention:
```
num = [4, 5, 6]
for nb in num :
if nb == 5:
print (" The test is true ")
print (" because nb is equal to {}". format (nb ))
print ("############")
for nb in num :
if nb == 5:
print (" The test is true ")
print (" because nb is equal to {}". format (nb ))
```
## 3) and, or
One can also combine different conditions using the "and" and "or" instructions. In the following table the result of combining is summarized:
| condition 1 | operator | condition 2 | result |
|-------------|-----------|-------------|--------|
| True | OR | True | True |
| True | OR | False | True |
| False | OR | True | True |
| False | OR | False | False |
| True | AND | True | True |
| True | AND | False | False |
| False | AND | True | False |
| False | AND | False | False |
Here are some examples:
```
x=2
y=2
if x==2 and y==2:
print ("The test is true")
print(True or False)
```
## 4) break and continue
These instructions are used to modifie the behavior of loops (for or while). Break will stop a loop, while continue will ignore the block of instruction in the loop after it. For example:
```
print("Example of break:")
for i in range (5):
if i > 2:
break
print (i)
print("Example of continue:")
for i in range (5):
if i == 2:
continue
print (i)
```
## 5) Testing float numbers:
When testing floating numbers it is not wise to test them using the equality operators. Indeed float numbers are only calculated up to a given precision, therefore two numbers that are the same will not have the exact same calculated value. For example:
```
print((3 - 2.7) == 0.3)
print(3-2.7)
```
A better way to realize this is to look at a number using the formated printout that we saw before.
```
print(0.3)
print("{:.5f}".format(0.3))
print("{:.60f}".format(0.3))
print("{:.60f}".format(3.0 - 2.7))
```
The easiest way to test the equality of two floating variable is to test them under a given precision. For instance:
```
delta = 0.0001
var = 3.0 - 2.7
print(abs ( var - 0.3) < delta) #Using the absolute value
```
## 6) Examples:
- The min function return the smallest element in a given list. Without using this function, determine what is the smallest element in the following list: [8, 4, 6, 1, 5]
- Get the maximum value in the following list [14, 9, 13, 15 et 12], and get the mean value after having removed the two extremum numbers of the list.
- Make a loop that will construct 4 lists using all numbers from 0 to 100, in the first list will be stored the even numbers, in the second list the numbers dividable by 3, in the 4th list the ones dividable by 5, and then the ones by 10. These lists should not be exclusive of each others.
->Si j ai le temps tapper d autres exerice 43.
|
github_jupyter
|
x=2
if x==2:
print("It is an integer")
if x=="tiger":
print("It is an animal")
x=2
if x==2:
print("It is an integer")
else:
print("It is not an integer ")
x=3
if x==2:
print("x is 2")
elif x>2:
print("x is greather than 2 ")
elif x==0:
print("x is equal to 0")
else:
print("x is not equal to 0, and is less or equal to 2")
num = [4, 5, 6]
for nb in num :
if nb == 5:
print (" The test is true ")
print (" because nb is equal to {}". format (nb ))
print ("############")
for nb in num :
if nb == 5:
print (" The test is true ")
print (" because nb is equal to {}". format (nb ))
x=2
y=2
if x==2 and y==2:
print ("The test is true")
print(True or False)
print("Example of break:")
for i in range (5):
if i > 2:
break
print (i)
print("Example of continue:")
for i in range (5):
if i == 2:
continue
print (i)
print((3 - 2.7) == 0.3)
print(3-2.7)
print(0.3)
print("{:.5f}".format(0.3))
print("{:.60f}".format(0.3))
print("{:.60f}".format(3.0 - 2.7))
delta = 0.0001
var = 3.0 - 2.7
print(abs ( var - 0.3) < delta) #Using the absolute value
| 0.141104 | 0.984546 |
# Regression Week 2: Multiple Regression (Interpretation)
The goal of this first notebook is to explore multiple regression and feature engineering with existing graphlab functions.
In this notebook you will use data on house sales in King County to predict prices using multiple regression. You will:
* Use SFrames to do some feature engineering
* Use built-in graphlab functions to compute the regression weights (coefficients/parameters)
* Given the regression weights, predictors and outcome write a function to compute the Residual Sum of Squares
* Look at coefficients and interpret their meanings
* Evaluate multiple models via RSS
# Fire up graphlab create
```
import graphlab
```
# Load in house sales data
Dataset is from house sales in King County, the region where the city of Seattle, WA is located.
```
sales = graphlab.SFrame('kc_house_data.gl/')
```
# Split data into training and testing.
We use seed=0 so that everyone running this notebook gets the same results. In practice, you may set a random seed (or let GraphLab Create pick a random seed for you).
```
train_data,test_data = sales.random_split(.8,seed=0)
```
# Learning a multiple regression model
Recall we can use the following code to learn a multiple regression model predicting 'price' based on the following features:
example_features = ['sqft_living', 'bedrooms', 'bathrooms'] on training data with the following code:
(Aside: We set validation_set = None to ensure that the results are always the same)
```
example_features = ['sqft_living', 'bedrooms', 'bathrooms']
example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features,
validation_set = None)
```
Now that we have fitted the model we can extract the regression weights (coefficients) as an SFrame as follows:
```
example_weight_summary = example_model.get("coefficients")
print example_weight_summary
```
# Making Predictions
In the gradient descent notebook we use numpy to do our regression. In this book we will use existing graphlab create functions to analyze multiple regressions.
Recall that once a model is built we can use the .predict() function to find the predicted values for data we pass. For example using the example model above:
```
example_predictions = example_model.predict(train_data)
print example_predictions[0] # should be 271789.505878
```
# Compute RSS
Now that we can make predictions given the model, let's write a function to compute the RSS of the model. Complete the function below to calculate RSS given the model, data, and the outcome.
```
def get_residual_sum_of_squares(model, data, outcome):
# First get the predictions
# Then compute the residuals/errors
# Then square and add them up
return(RSS)
```
Test your function by computing the RSS on TEST data for the example model:
```
rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])
print rss_example_train # should be 2.7376153833e+14
```
# Create some new features
Although we often think of multiple regression as including multiple different features (e.g. # of bedrooms, squarefeet, and # of bathrooms) but we can also consider transformations of existing features e.g. the log of the squarefeet or even "interaction" features such as the product of bedrooms and bathrooms.
You will use the logarithm function to create a new feature. so first you should import it from the math library.
```
from math import log
```
Next create the following 4 new features as column in both TEST and TRAIN data:
* bedrooms_squared = bedrooms\*bedrooms
* bed_bath_rooms = bedrooms\*bathrooms
* log_sqft_living = log(sqft_living)
* lat_plus_long = lat + long
As an example here's the first one:
```
train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)
test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)
# create the remaining 3 features in both TEST and TRAIN data
```
* Squaring bedrooms will increase the separation between not many bedrooms (e.g. 1) and lots of bedrooms (e.g. 4) since 1^2 = 1 but 4^2 = 16. Consequently this feature will mostly affect houses with many bedrooms.
* bedrooms times bathrooms gives what's called an "interaction" feature. It is large when *both* of them are large.
* Taking the log of squarefeet has the effect of bringing large values closer together and spreading out small values.
* Adding latitude to longitude is totally non-sensical but we will do it anyway (you'll see why)
**Quiz Question: What is the mean (arithmetic average) value of your 4 new features on TEST data? (round to 2 digits)**
# Learning Multiple Models
Now we will learn the weights for three (nested) models for predicting house prices. The first model will have the fewest features the second model will add one more feature and the third will add a few more:
* Model 1: squarefeet, # bedrooms, # bathrooms, latitude & longitude
* Model 2: add bedrooms\*bathrooms
* Model 3: Add log squarefeet, bedrooms squared, and the (nonsensical) latitude + longitude
```
model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']
model_2_features = model_1_features + ['bed_bath_rooms']
model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']
```
Now that you have the features, learn the weights for the three different models for predicting target = 'price' using graphlab.linear_regression.create() and look at the value of the weights/coefficients:
```
# Learn the three models: (don't forget to set validation_set = None)
# Examine/extract each model's coefficients:
```
**Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 1?**
**Quiz Question: What is the sign (positive or negative) for the coefficient/weight for 'bathrooms' in model 2?**
Think about what this means.
# Comparing multiple models
Now that you've learned three models and extracted the model weights we want to evaluate which model is best.
First use your functions from earlier to compute the RSS on TRAINING Data for each of the three models.
```
# Compute the RSS on TRAINING data for each of the three models and record the values:
```
**Quiz Question: Which model (1, 2 or 3) has lowest RSS on TRAINING Data?** Is this what you expected?
Now compute the RSS on on TEST data for each of the three models.
```
# Compute the RSS on TESTING data for each of the three models and record the values:
```
**Quiz Question: Which model (1, 2 or 3) has lowest RSS on TESTING Data?** Is this what you expected?Think about the features that were added to each model from the previous.
|
github_jupyter
|
import graphlab
sales = graphlab.SFrame('kc_house_data.gl/')
train_data,test_data = sales.random_split(.8,seed=0)
example_features = ['sqft_living', 'bedrooms', 'bathrooms']
example_model = graphlab.linear_regression.create(train_data, target = 'price', features = example_features,
validation_set = None)
example_weight_summary = example_model.get("coefficients")
print example_weight_summary
example_predictions = example_model.predict(train_data)
print example_predictions[0] # should be 271789.505878
def get_residual_sum_of_squares(model, data, outcome):
# First get the predictions
# Then compute the residuals/errors
# Then square and add them up
return(RSS)
rss_example_train = get_residual_sum_of_squares(example_model, test_data, test_data['price'])
print rss_example_train # should be 2.7376153833e+14
from math import log
train_data['bedrooms_squared'] = train_data['bedrooms'].apply(lambda x: x**2)
test_data['bedrooms_squared'] = test_data['bedrooms'].apply(lambda x: x**2)
# create the remaining 3 features in both TEST and TRAIN data
model_1_features = ['sqft_living', 'bedrooms', 'bathrooms', 'lat', 'long']
model_2_features = model_1_features + ['bed_bath_rooms']
model_3_features = model_2_features + ['bedrooms_squared', 'log_sqft_living', 'lat_plus_long']
# Learn the three models: (don't forget to set validation_set = None)
# Examine/extract each model's coefficients:
# Compute the RSS on TRAINING data for each of the three models and record the values:
# Compute the RSS on TESTING data for each of the three models and record the values:
| 0.7324 | 0.995151 |
# Learning a Reward Function using Preference Comparisons
The preference comparisons algorithm learns a reward function by comparing trajectory segments to each other.
To set up the preference comparisons algorithm, we first need to set up a lot of its internals beforehand:
```
from imitation.algorithms import preference_comparisons
from imitation.rewards.reward_nets import BasicRewardNet
from imitation.util.networks import RunningNorm
from imitation.policies.base import FeedForward32Policy, NormalizeFeaturesExtractor
import seals
import gym
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3 import PPO
venv = DummyVecEnv([lambda: gym.make("seals/CartPole-v0")] * 8)
reward_net = BasicRewardNet(
venv.observation_space, venv.action_space, normalize_input_layer=RunningNorm
)
fragmenter = preference_comparisons.RandomFragmenter(warning_threshold=0, seed=0)
gatherer = preference_comparisons.SyntheticGatherer(seed=0)
reward_trainer = preference_comparisons.CrossEntropyRewardTrainer(
model=reward_net,
epochs=3,
)
agent = PPO(
policy=FeedForward32Policy,
policy_kwargs=dict(
features_extractor_class=NormalizeFeaturesExtractor,
features_extractor_kwargs=dict(normalize_class=RunningNorm),
),
env=venv,
seed=0,
n_steps=2048 // venv.num_envs,
batch_size=64,
ent_coef=0.0,
learning_rate=0.0003,
n_epochs=10,
)
trajectory_generator = preference_comparisons.AgentTrainer(
algorithm=agent,
reward_fn=reward_net,
exploration_frac=0.0,
seed=0,
)
pref_comparisons = preference_comparisons.PreferenceComparisons(
trajectory_generator,
reward_net,
fragmenter=fragmenter,
preference_gatherer=gatherer,
reward_trainer=reward_trainer,
comparisons_per_iteration=100,
fragment_length=100,
transition_oversampling=1,
initial_comparison_frac=0.1,
allow_variable_horizon=False,
seed=0,
initial_epoch_multiplier=2, # Note: set to 200 to achieve sensible results
)
```
Then we can start training the reward model. Note that we need to specify the tital timesteps that the agent should be trained and how many fragment comparisons should be made.
```
pref_comparisons.train(
total_timesteps=1000, # Note: set to 40000 to achieve sensible results
total_comparisons=120, # Note: set to 4000 to achieve sensible results
)
```
After we trained the reward network using the preference comparisons algorithm, we can wrap our environment with that learned reward.
```
from imitation.rewards.reward_wrapper import RewardVecEnvWrapper
learned_reward_venv = RewardVecEnvWrapper(venv, reward_net.predict)
```
Now we can train an agent, that only sees those learned reward.
```
from stable_baselines3 import PPO
from stable_baselines3.ppo import MlpPolicy
learner = PPO(
policy=MlpPolicy,
env=learned_reward_venv,
seed=0,
batch_size=64,
ent_coef=0.0,
learning_rate=0.0003,
n_epochs=10,
n_steps=64,
)
learner.learn(1000) # Note: set to 100000 to train a proficient expert
```
Then we can evaluate it using the original reward.
```
from stable_baselines3.common.evaluation import evaluate_policy
reward, _ = evaluate_policy(agent.policy, venv, 10)
print(reward)
```
|
github_jupyter
|
from imitation.algorithms import preference_comparisons
from imitation.rewards.reward_nets import BasicRewardNet
from imitation.util.networks import RunningNorm
from imitation.policies.base import FeedForward32Policy, NormalizeFeaturesExtractor
import seals
import gym
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3 import PPO
venv = DummyVecEnv([lambda: gym.make("seals/CartPole-v0")] * 8)
reward_net = BasicRewardNet(
venv.observation_space, venv.action_space, normalize_input_layer=RunningNorm
)
fragmenter = preference_comparisons.RandomFragmenter(warning_threshold=0, seed=0)
gatherer = preference_comparisons.SyntheticGatherer(seed=0)
reward_trainer = preference_comparisons.CrossEntropyRewardTrainer(
model=reward_net,
epochs=3,
)
agent = PPO(
policy=FeedForward32Policy,
policy_kwargs=dict(
features_extractor_class=NormalizeFeaturesExtractor,
features_extractor_kwargs=dict(normalize_class=RunningNorm),
),
env=venv,
seed=0,
n_steps=2048 // venv.num_envs,
batch_size=64,
ent_coef=0.0,
learning_rate=0.0003,
n_epochs=10,
)
trajectory_generator = preference_comparisons.AgentTrainer(
algorithm=agent,
reward_fn=reward_net,
exploration_frac=0.0,
seed=0,
)
pref_comparisons = preference_comparisons.PreferenceComparisons(
trajectory_generator,
reward_net,
fragmenter=fragmenter,
preference_gatherer=gatherer,
reward_trainer=reward_trainer,
comparisons_per_iteration=100,
fragment_length=100,
transition_oversampling=1,
initial_comparison_frac=0.1,
allow_variable_horizon=False,
seed=0,
initial_epoch_multiplier=2, # Note: set to 200 to achieve sensible results
)
pref_comparisons.train(
total_timesteps=1000, # Note: set to 40000 to achieve sensible results
total_comparisons=120, # Note: set to 4000 to achieve sensible results
)
from imitation.rewards.reward_wrapper import RewardVecEnvWrapper
learned_reward_venv = RewardVecEnvWrapper(venv, reward_net.predict)
from stable_baselines3 import PPO
from stable_baselines3.ppo import MlpPolicy
learner = PPO(
policy=MlpPolicy,
env=learned_reward_venv,
seed=0,
batch_size=64,
ent_coef=0.0,
learning_rate=0.0003,
n_epochs=10,
n_steps=64,
)
learner.learn(1000) # Note: set to 100000 to train a proficient expert
from stable_baselines3.common.evaluation import evaluate_policy
reward, _ = evaluate_policy(agent.policy, venv, 10)
print(reward)
| 0.727975 | 0.910386 |
This is my attempt at coding and decoding a sample text.
```
import os
os.getcwd()
import re
```
I have made use of the 'string' module which includes handy sets, such as the upper and lower case letters, all the letters and all punctuation marks.
The coding function codeText takes the text, the number of characters to offset and the chunck length.
Note that I use mod(26) to perform the offset which caters for any input, positive or negative.
```
from string import ascii_uppercase
from random import choice
def codeText(text,c,l):
#c is offset, which could be negative
#l is chunk length
#remove all non-alphas and convert to upper case
newText=''
i=0
for ch in re.sub('[^A-Za-z]+', '', text).upper():
newText=newText+chr(65+(ord(ch)-65+c)%26)
i+=1
if i==l:
newText=newText+' '
i=0
#Now add characters to last group if necessary
while (len(newText)%(l+1)>0) and (len(newText)%(l+1)<l):
newText=newText+random.choice(ascii_uppercase)
return newText
```
The decodeText function reverses the process. I have added two parameters. The first, c, is the character I want to test for since there is no guarantee that 'E' will be the most frequent. The second, n, is where I expect this character to appear in the sorted list.
This is to overcome subtleties where, for instance, there is more than one character with the maximum frequency. The order of characters will depend on the alphabetic order of characters in the code rather than the original message.
Note that this function also returns the code, which may be the original, the mod of the original if it was greater than 26, or the original +/- 26 depending on the coding.
For instance, if E is the most frequent letter and we code using a shift of 22, E=>A. Decoding will give a shift of -4.
```
def decodeText(text,c,n):
#c is character we're relying on
#n is place for c, eg if we believe it's second most common n=2
#This approach allows for the fact that 'E' is not always the most common letter in a sample of text
#remove spaces
text=text.replace(' ','')
#find most common letter
tDict = {}
for ch in ascii_uppercase:
tDict[ch]=text.count(ch)
#sort the list in descending order
sList=sorted(tDict.items(), key=lambda x: x[1],reverse=True)
shift=ord(sList[n-1][0])-ord(c)
newText=''
for ch in text:
newText=newText+chr(65+(ord(ch)-65-shift)%26)
return shift, newText
def load_data(filename):
return_string = ''
with open(filename, 'r') as raw_data:
for line in raw_data:
return_string = return_string + line
return return_string
text="This is a sample. It includes commas, and 'inverted' commas but doesn't miss the apostrophe."
print(text)
newText=codeText(text,6,7)
print(re.sub('[^A-Za-z]+', '', text).upper())
print(newText)
#decode it using S as the most frequent
decodeText(newText,'S',1)
```
Here's the result of applying this to the Cypher2.txt file supplied
```
text=load_data('CYPHER2.TXT')
decodeText(text,'E',1)
```
|
github_jupyter
|
import os
os.getcwd()
import re
from string import ascii_uppercase
from random import choice
def codeText(text,c,l):
#c is offset, which could be negative
#l is chunk length
#remove all non-alphas and convert to upper case
newText=''
i=0
for ch in re.sub('[^A-Za-z]+', '', text).upper():
newText=newText+chr(65+(ord(ch)-65+c)%26)
i+=1
if i==l:
newText=newText+' '
i=0
#Now add characters to last group if necessary
while (len(newText)%(l+1)>0) and (len(newText)%(l+1)<l):
newText=newText+random.choice(ascii_uppercase)
return newText
def decodeText(text,c,n):
#c is character we're relying on
#n is place for c, eg if we believe it's second most common n=2
#This approach allows for the fact that 'E' is not always the most common letter in a sample of text
#remove spaces
text=text.replace(' ','')
#find most common letter
tDict = {}
for ch in ascii_uppercase:
tDict[ch]=text.count(ch)
#sort the list in descending order
sList=sorted(tDict.items(), key=lambda x: x[1],reverse=True)
shift=ord(sList[n-1][0])-ord(c)
newText=''
for ch in text:
newText=newText+chr(65+(ord(ch)-65-shift)%26)
return shift, newText
def load_data(filename):
return_string = ''
with open(filename, 'r') as raw_data:
for line in raw_data:
return_string = return_string + line
return return_string
text="This is a sample. It includes commas, and 'inverted' commas but doesn't miss the apostrophe."
print(text)
newText=codeText(text,6,7)
print(re.sub('[^A-Za-z]+', '', text).upper())
print(newText)
#decode it using S as the most frequent
decodeText(newText,'S',1)
text=load_data('CYPHER2.TXT')
decodeText(text,'E',1)
| 0.086088 | 0.746832 |
# Physics Analysis using CMS Opendata
1. Using the CSV format in Jupyter
* Reading the CSV file
* Observing and selecting
2. Extracting data and saving it to a new file
3. Adding column to a DataFrame
* Performing the calculation of the invariant mass
4. Statistics with the CMS Open Data
* Mean
* Variance
* Standard deviation
5. Selecting the amount of data
* Evolving of the histogram when the amount of data increases
6. Plotting the invariant mass histogram
* Plotting the histogram without the weights
* Plotting the histogram with weights
7. The effect of the pseudorapidity $\eta$ to the resolution of the CMS detector
* Pseudorapidity
* The effect of pseudorapidity to the resolution of the measurement
8. Exercises
# Using the CSV format in Jupyter
The data collected by the CMS detector can be handled in many different file formats. One easy way is to handle the data in CSV files (comma-separated values). A CSV file is basically a regular text file which includes values separated by commas and lines.
### Reading the CSV file
CSV files can be read for example with the function `read_csv( )` of the pandas module. Let's use the data collected by the CMS detector in 2011 [1].
[1] CMS collaboration (2016). DoubleMu primary dataset in AOD format from RunA of 2011 (/DoubleMu/Run2011A-12Oct2013-v1/AOD). CERN Open Data Portal. DOI: 10.7483/OPENDATA.CMS.RZ34.QR6N.
To install pandas via conda: `conda install -c anaconda pandas` or install a package only in the currenty kernel:
```
# Install a conda package in the current Jupyter kernel
import sys
!conda install --yes --prefix {sys.prefix} pandas
```
```
import pandas as pd
import matplotlib.pyplot as plt
# Jupyter Notebook uses "magic functions". With this function it is possible to plot
# the histogram straight to notebook.
%matplotlib inline
pd.set_option('display.max_columns', None)
dataset = pd.read_csv('Data/DoubleMuRun2011A.csv')
print("The file has %d rows."% len(dataset))
```
We can check what kind of information the file we read contains. Let's use the command `head( )` of the **pandas** module which will print the first five lines of the DataFrame variable written before the command ([pandas documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html)).
```
dataset.head()
```
Notice that there are more lines in the variable `dataset` than the five printed. We can check the number of the lines with the function `len( )` which will return the length of the variable given in the brackets.
```
len(dataset)
```
### Observing and selecting the values
From the print above, we can see that the content of the file has been saved into a table (DataFrame tabular data structure). Each line of the table represent a different collision event and the columns include different saved values for the event. Some of the values are measured by the detector and some have been calculated from the measured values.
Values in the table can be accessed with the pandas module. For example the data we are using contains the charges of two muons marked as `Q1` and `Q2`. We can select certain columns from a table e.g. the charges of the first muon for all of the events by referring to the column name:
```
dataset['Q1']
```
Now the code printed the values of the column `Q1` of the variable dataset. Of course all of the values will not be printed (there are over of 470000 of them) and on the last line of the print you can see the name, lengt and tyoe of the information printed.
The numbers on the left tell the index of the line and the numbers on the right are the values of the charges. By replacing the `Q1` in the code it is possible to select any of the column from the dataset (e.g. `pt1`, `eta1`, `phi2`, ...).
If for example only the ten first values of the charges are wanted to be selected, it can be done by using the `.loc` method. In the method the brackets first include the indexes of the lines wanted to be selected (here lines 0--10) and after those the name of the column from where the lines will be selected (here `Q1`).
```
dataset.loc[0:10, 'Q1']
# If you use Python 2, use
# dataset.ix[0:10, 'Q1']
```
Also individual values can be picked. Let's say we want to see that charges from indices 0,1,5 and 10. This can be done with
```
dataset.loc[[0,1,5,10],'Q1']
# If you use Python 2, use
# dataset.ix[[0,1,5,10],'Q1']
```
### Other notes
- There are also other options for selecting the values. For example this [Stack Overflow link](http://stackoverflow.com/questions/31593201/pandas-iloc-vs-ix-vs-loc-explanation) includes other possibilities of the _pandas_ module. CSV files can also be read with other modules or libraries.
- Note that different CSV files include different data depending on from which kind of collision events the file has been created. You can always check the content of the file by opening it in the Jupyter or in a text editor.
# Extracting data and saving it to a new file
Let's walks you through how to read csv-data, select certain data using selection rules and saving selected data into a new file.
Plot a histogram of the invariant masses using the whole dataset.
```
plt.hist(dataset['M'], bins=500, range=(0,110))
axes = plt.gca()
axes.set_ylim([0,15000])
# Name the axis and set a title
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events')
plt.title('Histogram of invariant masses of two muons\n') # \n creates a new line for making the title look better
# Show the plot.
plt.show()
```
Histogram shows multiple peaks from which we are interested in the one on the right. Let's choose the rows from the original data that hae invariant mass values between `70 < M < 110` and save these into variable `peakdata`. We can also check how many rows of data fill these conditions.
```
peakdata = dataset[(dataset.M>70) & (dataset.M<110)] #Z
len(peakdata)
```
We can now easily plot a histogram for invariant masses of the selected data.
```
inv_mass = peakdata['M']
plt.hist(inv_mass, bins=50)
plt.show()
```
The selected conditions for data seem applicable but let's check the minimum and maximum values for invariant mass in our selected data. This way we can make sure that our conditions hold.
```
minimum = min(peakdata['M'])
maximum = max(peakdata['M'])
print("The smallest value of invariant mass is %f and the greatest is %f in selected data." %(minimum,maximum))
```
We see that our conditions hold so let's save the data in `peakdata` into a csv-file and name it `peak.csv`. We can leave out the original indices but it's good to keep the original column titels. The created file will be saved in the same folder with the notebook.
```
peakdata.to_csv('peak.csv',index=False,header=True)
```
We can make sure that we managed to save the data we wanted by reading the file `peak.csv` and printing the first five rows of it.
```
test = pd.read_csv('peak.csv')
test.head()
```
# Adding column to a DataFrame
This example demonstrates how to import a csv.-file, calculate using its values and how to add calculated values to the original table. In this example we calculate the invariant masses for different events.
We need to import packages **numpy** in order to make calculations.
```
import numpy as np
```
and also the type of the variable. We need to know the type since we want to combine our calcutations with this variable.
```
type(dataset)
```
### Performing the calculation of the invariant mass
Let's use the following expression for the invariant mass $M$ in the calculation
$$M = \sqrt{2p_{T1}p_{T2}(\cosh(\eta_1-\eta_2)-\cos(\phi_1-\phi_2))}.$$
and use **numpy (np)** for performing the calculation.
```
my_invariant_mass = np.sqrt(2*dataset.pt1*dataset.pt2*(np.cosh(dataset.eta1-dataset.eta2) - np.cos(dataset.phi1-dataset.phi2)))
```
After the calculation we can check which values were saved in the variable `my_invariant_mass` by printing the content of the variable:
```
print(my_invariant_mass)
```
Let's add the column of invariant masses to the original `dataset` which is the type DataFrame. First we need to know the type of `my_invariant_mass`
```
type(my_invariant_mass)
```
Since `my_invariant_mass` is a **Series**, we need to convert it into a DataFrame. Let's name the conversion `inv_masses` and give the column a heading `myM`.
```
inv_masses = my_invariant_mass.to_frame('myM')
```
Now we can combine `dataset` with `inv_masses` using the command `merge`. Let's save it into a variable `all_data`.
```
all_data = dataset.merge(inv_masses, left_index=True, right_index=True)
all_data.head()
```
As you can see, the calculated invariant masses are now in the last column with heading `M`.
# Statistics with the CMS Open Data
With Python it is easy to calculate statistical values for the CMS Open Data. In this notebook we will learn how to calculate the mean, the variance and the standard deviation.
## Mean $\bar x$
The mean can be easily calculated with the function `mean( )` of the **numpy** module. Let's calculate the mean of the invariant masses of the whole data:
```
mean_masses = np.mean(inv_mass)
print(mean_masses)
```
## Variance $\sigma^2$
The variance is determined by the equation
$$\sigma^2 = \frac{\sum_{i=1}^{n}(x_i-\bar{x})^2}{n}.$$
With Python the variance can be calculated with the function `var( )` of the **numpy** module. Let's do that for the whole data
```
variance = np.var(inv_mass)
print(variance)
```
## Standard deviation $\sigma$
Because the standard deviation is the square root of the variance, we can calculate the standard deviation with the function `sqrt( )` of the **numpy** module. The function `sqrt( )` calculates the square root for the given value. Once again for the whole data we get
```
sd = np.sqrt(variance)
print(sd)
```
# Selecting the amount of data
The code below asks how many events will be selected to the histogram. After that the code plots the histogram of the selected invariant masses.
Examine how the amount of the data used affects to the histogram. Which values of the invariant mass there seems to be most? What you can conclude from those values?
By examining the code predict what will happen if you enter a number bigger than 475465 for the asked amount of data. Try your prediction by running the code.
```
# Create a Series structure (basically a list) and name it to "invariant_mass".
# Save the column "M" from the "dataset" to the variable "invariant_mass".
invariant_mass = dataset['M']
# Create an empty list "selected", where the selected amount of invariant masses will be saved.
selected = []
# Ask user to enter the number of events wanted. Save the number to variable "amount".
amount = int(input('Enter the amount of events wanted: '))
# Check if user have selected more events than there are available.
# If not select that amount of invariant masses from the variable "invariant_mass".
# Masses will be selected in order.
if amount > 475465:
print('''You have tried to select more data than there are available in the file.
The histogram couldn't be drawn. The maximum amount of the data is 10851.''')
else:
for f in range(amount):
M = invariant_mass[f]
selected.append(M)
print('\n You selected {} invariant mass values from the whole data.'.format(amount))
# Jupyter Notebook uses "magic functions". With this function it is possible to plot
# the histogram straight to notebook.
%matplotlib inline
# Create the histogram from data in variable "selected". Set bins and range to histogram.
plt.hist(selected, bins=120, range=(60,120))
# Set y-axis from 0 to 800.
axes = plt.gca()
axes.set_ylim([0,800])
# Name the axises and give the title.
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events')
plt.title('Histogram of invariant masses of two muons\n')
# Empty the variable "selected" for the next run.
selected = []
```
## Evolving of the histogram when the amount of data increases
Let's observe with series of images how the histogram will change when the amount of data are increased.
The code below will create 10 different histograms of invariant masses from the same data. Between every image 50000 more values of invariant masses are taken to the histogram.
```
# Loop where a new histogram is plotted after 50000 events until 450000 events have reached.
for a in range(0,475465,50000):
T = invariant_mass[0:a]
%matplotlib inline
plt.hist(T, bins=120, range=(60,120))
# Set y-axis from 0 to 800.
axes = plt.gca()
axes.set_ylim([0,2500])
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events')
plt.title('Histogram of invariant masses of two muons for {} events\n'.format(len(T)))
plt.show()
```
# Plotting the invariant mass histogram
Let's prepare the histogram.
```
# Set the amount of bins to the histogram.
nr_bins = 500
```
## Plotting the histogram without the weights
```
plt.hist(invariant_mass_log, bins=nr_bins, range=(-0.5,2.5))
plt.yscale('log')
plt.show()
```
## Plotting the histogram with the weights
```
weights = [nr_bins/np.log(10)/invariant_mass for invariant_mass in invariant_mass]
# Take log10 from all of the values in "invariant_mass".
invariant_mass_log = np.log10(invariant_mass)
# Plot the histogram with the plt.hist()-function of the matplotlib.pyplot module.
# "bins" determines the number of bins used, "range" determines the limits of the x-axis
# and "weights" determines the weights to the histogram.
plt.hist(invariant_mass_log, bins=nr_bins, range=(-0.5,2.5), weights=weights)
# Set y-axis to logarithmic.
plt.yscale('log')
plt.show()
```
# The effect of the pseudorapidity $\eta$ to the resolution of the CMS detector
In this excercise the CMS (Compact Muon Solenoid) detector and the concept of pseudorapidity is introduced. With the real data collected by CMS detector the effect of the pseudorapidity to the resolution of the CMS detector is observed.
## Pseudorapidity
In experimental particle physics pseudorapidity $\eta$ is a spatial coordinate used to describe the angle between a particle and the particle beam. Pseudorapidity is determined by the equation
$$ \eta \equiv -\ln(\tan(\frac{\theta}{2})), $$
where $\theta$ is the angle of a particle relative to the particle beam.
Pseudorapidity thus describes the angle between a detected particle and the particle beam. In the image below the particle beam would go horizontally from left to right. So with the large values of $\eta$ a particle created in the collision would differ just a little from the direction of the beam. With the small values of $\eta$ the deflection is bigger.
<img src="Images/Pseudorapidity_plot.png" alt="Image of pseudorapidity values" style="height: 200px">
(Image: Wikimedia user Mets501, Own work, CC BY-SA 3.0, https://commons.wikimedia.org/w/index.php?curid=20392149)
<br>
<br>
<br>
## The effect of pseudorapidity to the resolution of the measurement
With the CMS detector for example momenta of particles can be measured. Pseudorapidity $\eta$ affects to the resolution of the measurement of momenta. The particles that hit in the middle part of the detector (in the barrel) can be measured more accurate than the particles that hit in the end of the detector (in the endcap).
The reason for that is probably that the particles that have flown to the endcap have encountered more other matter (other particles) than the particles hit in the barrel. The interaction with other matter will cause inaccuracy to the measurements of the particles hit in the endcap.
In the image below there is a draft of the two particles created in the collision event. One hits the barrel of the detector and another hits the endcap. There are also the values of pseudorapidities $\eta$ and the angles $\theta$ of the particles.
<img src="Images/Sylinder.png" alt="Draft of the CMS detector" style="height: 300px">
### Selecting the events
First we will select from all the events into two groups the events where the pseudorapidity of the two muons have been relatively large (e.g. $\eta$ > 1.52) and relatively small (e.g. $\eta$ < 0.45). The selection is made with the code below. We want about the same amount of events to both groups so that the comparison could be done.
Perform the selection by running the code below.
```
# Set the conditions to large and small etas. These can be changed, but it has to be taken
# care that about the same amount of events are selected in both groups.
cond1 = 1.52
cond2 = 0.45
# Create two DataFrames. Select to "large_etas" events where the pseudorapidities
# of the both muons are larger than "cond1". Select to "small_etas" events where
# the pseudorapidities of the both muons are smaller than "cond2".
large_etas = dataset[(np.absolute(dataset.eta1) > cond1) & (np.absolute(dataset.eta2) > cond1)]
small_etas = dataset[(np.absolute(dataset.eta1) < cond2) & (np.absolute(dataset.eta2) < cond2)]
# Print two empty lines for better design.
print('\n' * 2)
print('The amount of all events = %d' % len(dataset))
print('The amount of events where the pseudorapidity of both muons has been large = %d' %len(large_etas))
print('The amount of events where the pseudorapidity of both muons has been small = %d' %len(small_etas))
```
### Creating the histograms
Next we will create the separate histograms of the invariant masses for the events with the large pseudorapidities and with the small pseudorapidities. With the histograms we can compare these two situations.
#### Histogram for the large $\eta$ events
Let's start with the events where the pseudorapidity of both of the muons has been large.
```
# Save the invariant masses to variable "inv_mass1".
inv_mass1 = large_etas['M']
# Create the histogram from data in variable "inv_mass1". Set bins and range.
plt.hist(inv_mass1, bins=120, range=(60,120))
# Set y-axis range from 0 to 60.
axes = plt.gca()
axes.set_ylim([0,150])
# Name the axises and give a title.
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events per bin')
plt.title('Histogram of invariant masses for the events where the\n pseudorapidity of both of the muons has been large\n')
plt.show()
```
#### Histogram for the small $\eta$ events
Analogously than above, let's plot the histogram of the invariant masses for the events where the pseudorapidity of both of the muons has been small.
```
# Save the invariant masses to variable "inv_mass2".
inv_mass2 = small_etas['M']
# Create the histogram from data in variable "inv_mass1". Set bins and range.
plt.hist(inv_mass2, bins=120, range=(60,120))
# Set y-axis range from 0 to 60.
axes = plt.gca()
axes.set_ylim([0,200])
# Name the axises and give a title.
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events per bin')
plt.title('Histogram of invariant masses for the events where the\n pseudorapidity of both of the muons has been small\n')
plt.show()
```
# Exercises
Now we have created from the real CMS data the two histograms of the invariant masses. With the help of the histograms and the theory part of the notebook think about the following questions:
__Question 1)__ In which way you can see the effect of the pseudorapidity to the measurement resolution of the CMS detector?
__Question 2)__ Do your results show the same than the theory predicts?
After answering to the questions you can try to change the conditions for the large and small pseudorapidities in the first code cell. The conditions are named _cond1_ and _cond2_. Make sure you choose conditions in a way that there will be nearly same amount of events in both of the groups.
__Question 3)__ After the changes run the code again. How do the changes affect to the number of the events? And how to the histograms?
__Question 4)__ Write a function that represents Breit-Wigner distribution to the values of the histogram.
To get information about mass and lifetime of the detected resonance, a function that describes the distribution of the invariant masses must be fitted to the values of the histogram. In our case the values follow a Breit-Wigner distribution:
$$
N(E) = \frac{K}{(E-M)^2 + \frac{\Gamma^2}{4}},
$$
where $E$ is the energy, $M$ the maximum of the distribution (equals to the mass of the particle that is detected in the resonance), $\Gamma$ the full width at half maximum (FWHM) or the decay width of the distribution and $K$ a constant.
The Breit-Wigner distribution can also be expressed in the following form:
$$
\frac{ \frac{2\sqrt{2}M\Gamma\sqrt{M^2(M^2+\Gamma^2)} }{\pi\sqrt{M^2+\sqrt{M^2(M^2+\Gamma^2)}}} }{(E^2-M^2)^2 + M^2\Gamma^2},
$$
where the constant $K$ is written open.
The decay width $\Gamma$ and the lifetime $\tau$ of the particle detected in the resonance are related in the following way:
$$
\Gamma \equiv \frac{\hbar}{\tau},
$$
where $\hbar$ is the reduced Planck's constant.
__Question 5)__ Calculate the lifetime $\tau$ of the Z boson with the uncertainty by using the fit. Compare the calculated value to the known lifetime of the Z. What do you notice? What could possibly explain your observations?
|
github_jupyter
|
# Install a conda package in the current Jupyter kernel
import sys
!conda install --yes --prefix {sys.prefix} pandas
import pandas as pd
import matplotlib.pyplot as plt
# Jupyter Notebook uses "magic functions". With this function it is possible to plot
# the histogram straight to notebook.
%matplotlib inline
pd.set_option('display.max_columns', None)
dataset = pd.read_csv('Data/DoubleMuRun2011A.csv')
print("The file has %d rows."% len(dataset))
dataset.head()
len(dataset)
dataset['Q1']
dataset.loc[0:10, 'Q1']
# If you use Python 2, use
# dataset.ix[0:10, 'Q1']
dataset.loc[[0,1,5,10],'Q1']
# If you use Python 2, use
# dataset.ix[[0,1,5,10],'Q1']
plt.hist(dataset['M'], bins=500, range=(0,110))
axes = plt.gca()
axes.set_ylim([0,15000])
# Name the axis and set a title
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events')
plt.title('Histogram of invariant masses of two muons\n') # \n creates a new line for making the title look better
# Show the plot.
plt.show()
peakdata = dataset[(dataset.M>70) & (dataset.M<110)] #Z
len(peakdata)
inv_mass = peakdata['M']
plt.hist(inv_mass, bins=50)
plt.show()
minimum = min(peakdata['M'])
maximum = max(peakdata['M'])
print("The smallest value of invariant mass is %f and the greatest is %f in selected data." %(minimum,maximum))
peakdata.to_csv('peak.csv',index=False,header=True)
test = pd.read_csv('peak.csv')
test.head()
import numpy as np
type(dataset)
my_invariant_mass = np.sqrt(2*dataset.pt1*dataset.pt2*(np.cosh(dataset.eta1-dataset.eta2) - np.cos(dataset.phi1-dataset.phi2)))
print(my_invariant_mass)
type(my_invariant_mass)
inv_masses = my_invariant_mass.to_frame('myM')
all_data = dataset.merge(inv_masses, left_index=True, right_index=True)
all_data.head()
mean_masses = np.mean(inv_mass)
print(mean_masses)
variance = np.var(inv_mass)
print(variance)
sd = np.sqrt(variance)
print(sd)
# Create a Series structure (basically a list) and name it to "invariant_mass".
# Save the column "M" from the "dataset" to the variable "invariant_mass".
invariant_mass = dataset['M']
# Create an empty list "selected", where the selected amount of invariant masses will be saved.
selected = []
# Ask user to enter the number of events wanted. Save the number to variable "amount".
amount = int(input('Enter the amount of events wanted: '))
# Check if user have selected more events than there are available.
# If not select that amount of invariant masses from the variable "invariant_mass".
# Masses will be selected in order.
if amount > 475465:
print('''You have tried to select more data than there are available in the file.
The histogram couldn't be drawn. The maximum amount of the data is 10851.''')
else:
for f in range(amount):
M = invariant_mass[f]
selected.append(M)
print('\n You selected {} invariant mass values from the whole data.'.format(amount))
# Jupyter Notebook uses "magic functions". With this function it is possible to plot
# the histogram straight to notebook.
%matplotlib inline
# Create the histogram from data in variable "selected". Set bins and range to histogram.
plt.hist(selected, bins=120, range=(60,120))
# Set y-axis from 0 to 800.
axes = plt.gca()
axes.set_ylim([0,800])
# Name the axises and give the title.
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events')
plt.title('Histogram of invariant masses of two muons\n')
# Empty the variable "selected" for the next run.
selected = []
# Loop where a new histogram is plotted after 50000 events until 450000 events have reached.
for a in range(0,475465,50000):
T = invariant_mass[0:a]
%matplotlib inline
plt.hist(T, bins=120, range=(60,120))
# Set y-axis from 0 to 800.
axes = plt.gca()
axes.set_ylim([0,2500])
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events')
plt.title('Histogram of invariant masses of two muons for {} events\n'.format(len(T)))
plt.show()
# Set the amount of bins to the histogram.
nr_bins = 500
plt.hist(invariant_mass_log, bins=nr_bins, range=(-0.5,2.5))
plt.yscale('log')
plt.show()
weights = [nr_bins/np.log(10)/invariant_mass for invariant_mass in invariant_mass]
# Take log10 from all of the values in "invariant_mass".
invariant_mass_log = np.log10(invariant_mass)
# Plot the histogram with the plt.hist()-function of the matplotlib.pyplot module.
# "bins" determines the number of bins used, "range" determines the limits of the x-axis
# and "weights" determines the weights to the histogram.
plt.hist(invariant_mass_log, bins=nr_bins, range=(-0.5,2.5), weights=weights)
# Set y-axis to logarithmic.
plt.yscale('log')
plt.show()
# Set the conditions to large and small etas. These can be changed, but it has to be taken
# care that about the same amount of events are selected in both groups.
cond1 = 1.52
cond2 = 0.45
# Create two DataFrames. Select to "large_etas" events where the pseudorapidities
# of the both muons are larger than "cond1". Select to "small_etas" events where
# the pseudorapidities of the both muons are smaller than "cond2".
large_etas = dataset[(np.absolute(dataset.eta1) > cond1) & (np.absolute(dataset.eta2) > cond1)]
small_etas = dataset[(np.absolute(dataset.eta1) < cond2) & (np.absolute(dataset.eta2) < cond2)]
# Print two empty lines for better design.
print('\n' * 2)
print('The amount of all events = %d' % len(dataset))
print('The amount of events where the pseudorapidity of both muons has been large = %d' %len(large_etas))
print('The amount of events where the pseudorapidity of both muons has been small = %d' %len(small_etas))
# Save the invariant masses to variable "inv_mass1".
inv_mass1 = large_etas['M']
# Create the histogram from data in variable "inv_mass1". Set bins and range.
plt.hist(inv_mass1, bins=120, range=(60,120))
# Set y-axis range from 0 to 60.
axes = plt.gca()
axes.set_ylim([0,150])
# Name the axises and give a title.
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events per bin')
plt.title('Histogram of invariant masses for the events where the\n pseudorapidity of both of the muons has been large\n')
plt.show()
# Save the invariant masses to variable "inv_mass2".
inv_mass2 = small_etas['M']
# Create the histogram from data in variable "inv_mass1". Set bins and range.
plt.hist(inv_mass2, bins=120, range=(60,120))
# Set y-axis range from 0 to 60.
axes = plt.gca()
axes.set_ylim([0,200])
# Name the axises and give a title.
plt.xlabel('Invariant mass [GeV]')
plt.ylabel('Number of events per bin')
plt.title('Histogram of invariant masses for the events where the\n pseudorapidity of both of the muons has been small\n')
plt.show()
| 0.605216 | 0.991505 |
# Example GSE123139 differential expression
### Dataset
This is an demonstration of differential expression analysis with Normalisr between dysfunctional and naive T cells in human melanoma scRNA-seq. Only a subset of cells were extracted from GSE123139 dataset for this demonstration. You can change it to all the cells by following the guideline in example README.
**Warning**: You will only get the actual results with the **full dataset** of all cells. This example is intended as a demonstration of use for Normalisr, than an exact reproduction of our results in the manuscript.
### Usage
You can get the help of any function by typing ?func in jupyter notebook. For example, the line below shows documentation for function 'open'.
`?open`
To get help for Normalisr, try a specific function such as
`import normalisr.normalisr as norm
?norm.de`
```
import numpy as np
import normalisr.normalisr as norm
from os.path import join as pjoin
import matplotlib.pyplot as plt
from scipy.io import mmread
diri='../data/de'
```
### Optional extra logging
```
import logging
logging.basicConfig(level=logging.INFO)
# logging.basicConfig(level=logging.DEBUG)
```
### Load data
```
dt0=mmread(pjoin(diri,'0_read.mtx.gz')).astype(int).toarray()
dg=np.loadtxt(pjoin(diri,'0_group.tsv.gz'),delimiter='\t',dtype=int)
if dg.ndim==1:
dg=dg.reshape(1,-1)
dc=np.loadtxt(pjoin(diri,'0_cov.tsv.gz'),delimiter='\t',dtype=int)
with open(pjoin(diri,'0_gene.txt'),'r') as f:
namet=np.array([x.strip() for x in f.readlines()])
with open(pjoin(diri,'0_cell.txt'),'r') as f:
names=np.array([x.strip() for x in f.readlines()])
nameg=np.array([''])
with open(pjoin(diri,'0_cov.txt'),'r') as f:
namec=np.array([x.strip() for x in f.readlines()])
nc,ng,ns,nt=[len(x) for x in [namec,nameg,names,namet]]
assert dt0.shape==(nt,ns) and dg.shape==(ng,ns) and dc.shape==(nc,ns)
```
### Initial QC for low read cell and gene removal
```
select=norm.qc_reads(dt0,0,500,0.02,500,100,0)
print('Genes passed QC: {}/{}'.format(len(select[0]),nt))
print('Cells passed QC: {}/{}'.format(len(select[1]),ns))
dt0=dt0[select[0]][:,select[1]].copy()
dg=dg[:,select[1]].copy()
dc=dc[:,select[1]].copy()
namet=namet[select[0]]
names=names[select[1]]
ns,nt=[len(x) for x in [names,namet]]
assert dt0.shape==(nt,ns) and dg.shape==(ng,ns) and dc.shape==(nc,ns)
```
### Compute Bayesian logCPM and cellular summary covariates
```
dt,_,_,dc2=norm.lcpm(dt0)
dc=np.concatenate([dc,dc2],axis=0)
```
### Normalize covariates and add constant-1 covariate
```
dc=norm.normcov(dc)
```
### Compute variance normalization factors for each gene and each cell
```
sf=norm.scaling_factor(dt0)
weight=norm.compute_var(dt,dc)
```
### Detect very-low-variance cell outliers
```
select=norm.qc_outlier(weight)
```
### Remove very-low-variance cell outliers
```
print('Cells passed outlier QC: {}/{}'.format(select.sum(),ns))
dt0=dt0[:,select].copy()
dg=dg[:,select].copy()
dt=dt[:,select].copy()
dc=dc[:,select].copy()
weight=weight[select]
names=names[select]
ns=len(names)
```
### Normalize gene expression at mean and variance levels and covariates at variance level
```
dt,dc=norm.normvar(dt,dc,weight,sf)
```
### Perform differential expression test
```
de=norm.de(dg,dt,dc)
```
## Visualization
### Histogram
```
nbin=50
dx=np.linspace(0,1,nbin+1)
tdict=dict(zip(namet,range(len(namet))))
plt.hist(de[0][0],bins=dx);
plt.xlabel('P-value')
plt.ylabel('Histogram')
plt.show()
plt.hist(de[1][0],bins=nbin);
plt.xlabel('LogFC')
plt.ylabel('Histogram');
```
### Volcano plot
```
genes=['LAG3','TIGIT','HAVCR2','PDCD1','CTLA4']
color=[0.5]*3
dx=de[1][0]
dy=-np.log(de[0][0])
plt.scatter(dx,dy,color=color,alpha=0.4);
for xi in genes:
plt.text(dx[tdict[xi]],dy[tdict[xi]],xi,color='red')
```
|
github_jupyter
|
import numpy as np
import normalisr.normalisr as norm
from os.path import join as pjoin
import matplotlib.pyplot as plt
from scipy.io import mmread
diri='../data/de'
import logging
logging.basicConfig(level=logging.INFO)
# logging.basicConfig(level=logging.DEBUG)
dt0=mmread(pjoin(diri,'0_read.mtx.gz')).astype(int).toarray()
dg=np.loadtxt(pjoin(diri,'0_group.tsv.gz'),delimiter='\t',dtype=int)
if dg.ndim==1:
dg=dg.reshape(1,-1)
dc=np.loadtxt(pjoin(diri,'0_cov.tsv.gz'),delimiter='\t',dtype=int)
with open(pjoin(diri,'0_gene.txt'),'r') as f:
namet=np.array([x.strip() for x in f.readlines()])
with open(pjoin(diri,'0_cell.txt'),'r') as f:
names=np.array([x.strip() for x in f.readlines()])
nameg=np.array([''])
with open(pjoin(diri,'0_cov.txt'),'r') as f:
namec=np.array([x.strip() for x in f.readlines()])
nc,ng,ns,nt=[len(x) for x in [namec,nameg,names,namet]]
assert dt0.shape==(nt,ns) and dg.shape==(ng,ns) and dc.shape==(nc,ns)
select=norm.qc_reads(dt0,0,500,0.02,500,100,0)
print('Genes passed QC: {}/{}'.format(len(select[0]),nt))
print('Cells passed QC: {}/{}'.format(len(select[1]),ns))
dt0=dt0[select[0]][:,select[1]].copy()
dg=dg[:,select[1]].copy()
dc=dc[:,select[1]].copy()
namet=namet[select[0]]
names=names[select[1]]
ns,nt=[len(x) for x in [names,namet]]
assert dt0.shape==(nt,ns) and dg.shape==(ng,ns) and dc.shape==(nc,ns)
dt,_,_,dc2=norm.lcpm(dt0)
dc=np.concatenate([dc,dc2],axis=0)
dc=norm.normcov(dc)
sf=norm.scaling_factor(dt0)
weight=norm.compute_var(dt,dc)
select=norm.qc_outlier(weight)
print('Cells passed outlier QC: {}/{}'.format(select.sum(),ns))
dt0=dt0[:,select].copy()
dg=dg[:,select].copy()
dt=dt[:,select].copy()
dc=dc[:,select].copy()
weight=weight[select]
names=names[select]
ns=len(names)
dt,dc=norm.normvar(dt,dc,weight,sf)
de=norm.de(dg,dt,dc)
nbin=50
dx=np.linspace(0,1,nbin+1)
tdict=dict(zip(namet,range(len(namet))))
plt.hist(de[0][0],bins=dx);
plt.xlabel('P-value')
plt.ylabel('Histogram')
plt.show()
plt.hist(de[1][0],bins=nbin);
plt.xlabel('LogFC')
plt.ylabel('Histogram');
genes=['LAG3','TIGIT','HAVCR2','PDCD1','CTLA4']
color=[0.5]*3
dx=de[1][0]
dy=-np.log(de[0][0])
plt.scatter(dx,dy,color=color,alpha=0.4);
for xi in genes:
plt.text(dx[tdict[xi]],dy[tdict[xi]],xi,color='red')
| 0.138316 | 0.958069 |
# Triton Examples
## Prerequisites
* For the test data you will need to install `torch`, `torchvision` and `tensorflow`
* For the visualization `matplotlib`
* For calling the service `curl`
## Setup Seldon Core
Follow the instructions to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Install Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core).
Then port-forward to that ingress on localhost:8003 in a separate terminal either with:
* Ambassador: `kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080`
* Istio: `kubectl port-forward $(kubectl get pods -l istio=ingressgateway -n istio-system -o jsonpath='{.items[0].metadata.name}') -n istio-system 8003:80`
### Create Namespace for experimentation
We will first set up the namespace of Seldon where we will be deploying all our models
```
!kubectl create namespace seldon
```
And then we will set the current workspace to use the seldon namespace so all our commands are run there by default (instead of running everything in the default namespace.)
```
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
import tensorflow as tf
import matplotlib.pyplot as plt
import os
train, test = tf.keras.datasets.cifar10.load_data()
X_test, y_test = test
X_test = X_test.astype('float32') / 255
print(X_test.shape, y_test.shape)
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
```
## Triton Model Naming
You need to name the model in the graph with the same name as the triton model loaded as this name will be used in the path to triton.
## Tensorflow CIFAR10 Model
```
%%writefile resources/triton_tf_cifar10.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: cifar10
namespace: seldon
spec:
annotations:
seldon.io/engine-seldon-log-messages-externally: "true"
name: resnet32
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/tf_cifar10
name: cifar10
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_tf_cifar10.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=cifar10 -n seldon -o jsonpath='{.items[0].metadata.name}')
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=1
test_example=X_test[idx:idx+1].tolist()
payload='{"inputs":[{"name":"input_1","datatype":"FP32","shape":[1, 32, 32, 3],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/cifar10/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[y_test[idx][0]])
print("prediction:",class_names[arr.argmax()])
!kubectl delete -f resources/triton_tf_cifar10.yaml
```
## ONNX CIFAR10 Model
```
%%writefile resources/triton_onnx_cifar10.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: cifar10
namespace: seldon
spec:
annotations:
seldon.io/engine-seldon-log-messages-externally: "true"
name: resnet32
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/onnx_cifar10
name: cifar10
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_onnx_cifar10.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=cifar10 -n seldon -o jsonpath='{.items[0].metadata.name}')
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=1
test_example=X_test[idx:idx+1].tolist()
payload='{"inputs":[{"name":"input_1:0","datatype":"FP32","shape":[1, 32, 32, 3],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/cifar10/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[y_test[idx][0]])
print("prediction:",class_names[arr.argmax()])
!kubectl delete -f resources/triton_onnx_cifar10.yaml
```
## TorchScript CIFAR10 Model
```
%%writefile resources/triton_pt_cifar10.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: cifar10
namespace: seldon
spec:
annotations:
seldon.io/engine-seldon-log-messages-externally: "true"
name: resnet32
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/pytorch_cifar10
name: cifar10
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_pt_cifar10.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=cifar10 -n seldon -o jsonpath='{.items[0].metadata.name}')
import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=10,
shuffle=False, num_workers=2)
for data in testloader:
images, labels = data
break
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=3
test_example=images[idx:idx+1]
test_example = test_example.tolist()
payload='{"inputs":[{"name":"input__0","datatype":"FP32","shape":[1, 3, 32, 32],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/cifar10/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[labels[idx]])
print("prediction:",class_names[arr.argmax()])
!kubectl delete -f resources/triton_pt_cifar10.yaml
```
## Multi-Model Serving
```
%%writefile resources/triton_multi.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: multi
namespace: seldon
spec:
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/multi
name: multi
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_multi.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=multi -n seldon -o jsonpath='{.items[0].metadata.name}')
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=1
test_example=X_test[idx:idx+1].tolist()
payload='{"inputs":[{"name":"input_1","datatype":"FP32","shape":[1, 32, 32, 3],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/multi/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[y_test[idx][0]])
print("prediction:",class_names[arr.argmax()])
X=!curl -s -d '{"inputs":[{"name":"INPUT0","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]}]}' \
-X POST http://0.0.0.0:8003/seldon/seldon/multi/v2/models/simple/infer \
-H "Content-Type: application/json"
d=json.loads(X[0])
print(d)
assert(d["outputs"][0]["data"][0]==2)
!kubectl delete -f resources/triton_tf_cifar10.yaml
!kubectl delete -f resources/triton_multi.yaml
```
|
github_jupyter
|
!kubectl create namespace seldon
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
import tensorflow as tf
import matplotlib.pyplot as plt
import os
train, test = tf.keras.datasets.cifar10.load_data()
X_test, y_test = test
X_test = X_test.astype('float32') / 255
print(X_test.shape, y_test.shape)
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
%%writefile resources/triton_tf_cifar10.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: cifar10
namespace: seldon
spec:
annotations:
seldon.io/engine-seldon-log-messages-externally: "true"
name: resnet32
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/tf_cifar10
name: cifar10
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_tf_cifar10.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=cifar10 -n seldon -o jsonpath='{.items[0].metadata.name}')
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=1
test_example=X_test[idx:idx+1].tolist()
payload='{"inputs":[{"name":"input_1","datatype":"FP32","shape":[1, 32, 32, 3],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/cifar10/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[y_test[idx][0]])
print("prediction:",class_names[arr.argmax()])
!kubectl delete -f resources/triton_tf_cifar10.yaml
%%writefile resources/triton_onnx_cifar10.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: cifar10
namespace: seldon
spec:
annotations:
seldon.io/engine-seldon-log-messages-externally: "true"
name: resnet32
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/onnx_cifar10
name: cifar10
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_onnx_cifar10.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=cifar10 -n seldon -o jsonpath='{.items[0].metadata.name}')
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=1
test_example=X_test[idx:idx+1].tolist()
payload='{"inputs":[{"name":"input_1:0","datatype":"FP32","shape":[1, 32, 32, 3],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/cifar10/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[y_test[idx][0]])
print("prediction:",class_names[arr.argmax()])
!kubectl delete -f resources/triton_onnx_cifar10.yaml
%%writefile resources/triton_pt_cifar10.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: cifar10
namespace: seldon
spec:
annotations:
seldon.io/engine-seldon-log-messages-externally: "true"
name: resnet32
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/pytorch_cifar10
name: cifar10
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_pt_cifar10.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=cifar10 -n seldon -o jsonpath='{.items[0].metadata.name}')
import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=10,
shuffle=False, num_workers=2)
for data in testloader:
images, labels = data
break
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=3
test_example=images[idx:idx+1]
test_example = test_example.tolist()
payload='{"inputs":[{"name":"input__0","datatype":"FP32","shape":[1, 3, 32, 32],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/cifar10/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[labels[idx]])
print("prediction:",class_names[arr.argmax()])
!kubectl delete -f resources/triton_pt_cifar10.yaml
%%writefile resources/triton_multi.yaml
apiVersion: machinelearning.seldon.io/v1
kind: SeldonDeployment
metadata:
name: multi
namespace: seldon
spec:
predictors:
- graph:
implementation: TRITON_SERVER
logger:
mode: all
modelUri: gs://seldon-models/triton/multi
name: multi
type: MODEL
name: default
replicas: 1
protocol: kfserving
!kubectl apply -f resources/triton_multi.yaml
!kubectl rollout status -n seldon deploy/$(kubectl get deploy -l seldon-deployment-id=multi -n seldon -o jsonpath='{.items[0].metadata.name}')
from subprocess import run, Popen, PIPE
import json
import numpy as np
idx=1
test_example=X_test[idx:idx+1].tolist()
payload='{"inputs":[{"name":"input_1","datatype":"FP32","shape":[1, 32, 32, 3],"data":'+f"{test_example}"+'}]}'
cmd=f"""curl -d '{payload}' \
http://localhost:8003/seldon/seldon/multi/v2/models/cifar10/infer \
-H "Content-Type: application/json"
"""
ret = Popen(cmd, shell=True,stdout=PIPE)
raw = ret.stdout.read().decode("utf-8")
res=json.loads(raw)
arr=np.array(res["outputs"][0]["data"])
X = X_test[idx].reshape(1, 32, 32, 3)
plt.imshow(X.reshape(32, 32, 3))
plt.axis('off')
plt.show()
print("class:",class_names[y_test[idx][0]])
print("prediction:",class_names[arr.argmax()])
X=!curl -s -d '{"inputs":[{"name":"INPUT0","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]},{"name":"INPUT1","data":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16],"datatype":"INT32","shape":[1,16]}]}' \
-X POST http://0.0.0.0:8003/seldon/seldon/multi/v2/models/simple/infer \
-H "Content-Type: application/json"
d=json.loads(X[0])
print(d)
assert(d["outputs"][0]["data"][0]==2)
!kubectl delete -f resources/triton_tf_cifar10.yaml
!kubectl delete -f resources/triton_multi.yaml
| 0.338405 | 0.916185 |
# Exp 39 analysis
See `./informercial/Makefile` for experimental
details.
```
import os
import numpy as np
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from infomercial.exp import meta_bandit
from infomercial.local_gym import bandit
from infomercial.exp.meta_bandit import load_checkpoint
import gym
# ls ../data/exp2*
```
# Load and process data
```
data_path ="/Users/qualia/Code/infomercial/data/"
exp_name = "exp39"
best_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_best.pkl"))
sorted_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_sorted.pkl"))
best_params
```
# Performance
of best parameters
```
env_name = 'BanditHardAndSparse1000-v0'
# Run w/ best params
result = meta_bandit(
env_name=env_name,
num_episodes=10000,
lr=best_params["lr"],
tie_threshold=best_params["tie_threshold"],
seed_value=19,
save="exp39_best_model.pkl"
)
# Plot run
episodes = result["episodes"]
actions =result["actions"]
scores_R = result["scores_R"]
values_R = result["values_R"]
scores_E = result["scores_E"]
values_E = result["values_E"]
# Get some data from the gym...
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Init plot
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(5, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
plt.plot(episodes, np.repeat(best, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# score
plt.subplot(grid[1, 0])
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.scatter(episodes, scores_E, color="purple", alpha=0.9, s=2, label="E")
plt.ylabel("log score")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[2, 0])
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="R")
plt.scatter(episodes, values_E, color="purple", alpha=0.4, s=2, label="E")
plt.ylabel("log Q(s,a)")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# -
plt.savefig("figures/epsilon_bandit.pdf", bbox_inches='tight')
plt.savefig("figures/epsilon_bandit.eps", bbox_inches='tight')
```
# Sensitivity
to parameter choices
```
total_Rs = []
ties = []
lrs = []
trials = list(sorted_params.keys())
for t in trials:
total_Rs.append(sorted_params[t]['total_R'])
ties.append(sorted_params[t]['tie_threshold'])
lrs.append(sorted_params[t]['lr'])
# Init plot
fig = plt.figure(figsize=(10, 18))
grid = plt.GridSpec(4, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(trials, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("total R")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.scatter(trials, ties, color="black", alpha=.3, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("Tie threshold")
_ = sns.despine()
plt.subplot(grid[2, 0])
plt.scatter(trials, lrs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("lr")
_ = sns.despine()
```
# Distributions
of parameters
```
# Init plot
fig = plt.figure(figsize=(5, 6))
grid = plt.GridSpec(2, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(ties, color="black")
plt.xlabel("tie threshold")
plt.ylabel("Count")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.hist(lrs, color="black")
plt.xlabel("lr")
plt.ylabel("Count")
_ = sns.despine()
```
of total reward
```
# Init plot
fig = plt.figure(figsize=(5, 2))
grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(total_Rs, color="black", bins=50)
plt.xlabel("Total reward")
plt.ylabel("Count")
plt.xlim(0, 10)
_ = sns.despine()
```
|
github_jupyter
|
import os
import numpy as np
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from infomercial.exp import meta_bandit
from infomercial.local_gym import bandit
from infomercial.exp.meta_bandit import load_checkpoint
import gym
# ls ../data/exp2*
data_path ="/Users/qualia/Code/infomercial/data/"
exp_name = "exp39"
best_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_best.pkl"))
sorted_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_sorted.pkl"))
best_params
env_name = 'BanditHardAndSparse1000-v0'
# Run w/ best params
result = meta_bandit(
env_name=env_name,
num_episodes=10000,
lr=best_params["lr"],
tie_threshold=best_params["tie_threshold"],
seed_value=19,
save="exp39_best_model.pkl"
)
# Plot run
episodes = result["episodes"]
actions =result["actions"]
scores_R = result["scores_R"]
values_R = result["values_R"]
scores_E = result["scores_E"]
values_E = result["values_E"]
# Get some data from the gym...
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Init plot
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(5, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
plt.plot(episodes, np.repeat(best, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# score
plt.subplot(grid[1, 0])
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.scatter(episodes, scores_E, color="purple", alpha=0.9, s=2, label="E")
plt.ylabel("log score")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[2, 0])
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="R")
plt.scatter(episodes, values_E, color="purple", alpha=0.4, s=2, label="E")
plt.ylabel("log Q(s,a)")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# -
plt.savefig("figures/epsilon_bandit.pdf", bbox_inches='tight')
plt.savefig("figures/epsilon_bandit.eps", bbox_inches='tight')
total_Rs = []
ties = []
lrs = []
trials = list(sorted_params.keys())
for t in trials:
total_Rs.append(sorted_params[t]['total_R'])
ties.append(sorted_params[t]['tie_threshold'])
lrs.append(sorted_params[t]['lr'])
# Init plot
fig = plt.figure(figsize=(10, 18))
grid = plt.GridSpec(4, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(trials, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("total R")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.scatter(trials, ties, color="black", alpha=.3, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("Tie threshold")
_ = sns.despine()
plt.subplot(grid[2, 0])
plt.scatter(trials, lrs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("lr")
_ = sns.despine()
# Init plot
fig = plt.figure(figsize=(5, 6))
grid = plt.GridSpec(2, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(ties, color="black")
plt.xlabel("tie threshold")
plt.ylabel("Count")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.hist(lrs, color="black")
plt.xlabel("lr")
plt.ylabel("Count")
_ = sns.despine()
# Init plot
fig = plt.figure(figsize=(5, 2))
grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(total_Rs, color="black", bins=50)
plt.xlabel("Total reward")
plt.ylabel("Count")
plt.xlim(0, 10)
_ = sns.despine()
| 0.634996 | 0.79542 |
```
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
# %matplotlib inline
# %matplotlib notebook
%matplotlib widget
```
# One widget changes another
## Purpose
Figure out how to have changes in one widget affect the properties of another widget in real time. Use an example involving the following:
- Set up IntSlider
- Set up some radio buttons
- Depending on selected radio button, change IntSlider value, range, description
## Approach
See [Widget Events → Traitlet Events](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Events.html#Traitlet-events) in the [User Guide](https://ipywidgets.readthedocs.io/en/latest/user_guide.html) for how to use the `.observe` method to register a callback function.
Steps:
- Set up Widgets
- Create callback function
- Register callback function
## Discrete elements and functions implementation
```
# Widgets
int_slider = widgets.IntSlider(
value=5,
min=0, max=10, step=1,
description='slider'
)
radiobuttons = widgets.RadioButtons(
value='Original',
options=['Original', '0-5', '3-7', '5-10'],
description='radio buttons'
)
# Callback function
def radiobuttons_update(change):
int_slider.description = change.new
if change.new == 'Original':
int_slider.min = 0
int_slider.max = 10
elif change.new == '0-5':
if int_slider.value > 5:
int_slider.value = 5
int_slider.min = 0
int_slider.max = 5
elif change.new == '3-7':
if int_slider.value > 7:
int_slider.value = 7
elif int_slider.value < 3:
int_slider.value = 3
int_slider.min = 3
int_slider.max = 7
elif change.new == '5-10':
if int_slider.value < 5:
int_slider.value = 5
int_slider.min = 5
int_slider.max = 10
# Register callback function
radiobuttons.observe(radiobuttons_update, 'value')
# Display widgets in a VBox
widgets.VBox(
[
int_slider,
radiobuttons,
]
)
```
## Class implementation
```
class WidgetAffectsAnotherWidget(widgets.VBox):
def __init__(self):
super().__init__()
# output = widgets.Output()
# define widgets
self.int_slider = widgets.IntSlider(
value=5,
min=0, max=10, step=1,
description='slider',
)
self.radiobuttons = widgets.RadioButtons(
value='Original',
options=['Original', '0-5', '3-7', '5-10'],
description='radio buttons',
)
# observe stuff
self.radiobuttons.observe(self.radiobuttons_update, 'value')
# add to children
self.children = [self.int_slider, self.radiobuttons]
def radiobuttons_update(self, change):
self.int_slider.description = change.new
if change.new == 'Original':
self.int_slider.min = 0
self.int_slider.max = 10
elif change.new == '0-5':
if self.int_slider.value > 5:
self.int_slider.value = 5
self.int_slider.min = 0
self.int_slider.max = 5
elif change.new == '3-7':
if self.int_slider.value > 7:
self.int_slider.value = 7
elif self.int_slider.value < 3:
self.int_slider.value = 3
self.int_slider.min = 3
self.int_slider.max = 7
elif change.new == '5-10':
if self.int_slider.value < 5:
self.int_slider.value = 5
self.int_slider.min = 5
self.int_slider.max = 10
WidgetAffectsAnotherWidget()
```
# Output widgets
[Output widgets: leveraging Jupyter’s display system](https://ipywidgets.readthedocs.io/en/latest/examples/Output%20Widget.html?highlight=widgets.Output)
```
out = widgets.Output(layout={'border': '1px solid black'})
out
with out:
for i in range(10):
print(i, 'Hello world!')
from IPython.display import YouTubeVideo
with out:
display(YouTubeVideo('eWzY2nGfkXk'))
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
# %matplotlib inline
# %matplotlib notebook
%matplotlib widget
# Widgets
int_slider = widgets.IntSlider(
value=5,
min=0, max=10, step=1,
description='slider'
)
radiobuttons = widgets.RadioButtons(
value='Original',
options=['Original', '0-5', '3-7', '5-10'],
description='radio buttons'
)
# Callback function
def radiobuttons_update(change):
int_slider.description = change.new
if change.new == 'Original':
int_slider.min = 0
int_slider.max = 10
elif change.new == '0-5':
if int_slider.value > 5:
int_slider.value = 5
int_slider.min = 0
int_slider.max = 5
elif change.new == '3-7':
if int_slider.value > 7:
int_slider.value = 7
elif int_slider.value < 3:
int_slider.value = 3
int_slider.min = 3
int_slider.max = 7
elif change.new == '5-10':
if int_slider.value < 5:
int_slider.value = 5
int_slider.min = 5
int_slider.max = 10
# Register callback function
radiobuttons.observe(radiobuttons_update, 'value')
# Display widgets in a VBox
widgets.VBox(
[
int_slider,
radiobuttons,
]
)
class WidgetAffectsAnotherWidget(widgets.VBox):
def __init__(self):
super().__init__()
# output = widgets.Output()
# define widgets
self.int_slider = widgets.IntSlider(
value=5,
min=0, max=10, step=1,
description='slider',
)
self.radiobuttons = widgets.RadioButtons(
value='Original',
options=['Original', '0-5', '3-7', '5-10'],
description='radio buttons',
)
# observe stuff
self.radiobuttons.observe(self.radiobuttons_update, 'value')
# add to children
self.children = [self.int_slider, self.radiobuttons]
def radiobuttons_update(self, change):
self.int_slider.description = change.new
if change.new == 'Original':
self.int_slider.min = 0
self.int_slider.max = 10
elif change.new == '0-5':
if self.int_slider.value > 5:
self.int_slider.value = 5
self.int_slider.min = 0
self.int_slider.max = 5
elif change.new == '3-7':
if self.int_slider.value > 7:
self.int_slider.value = 7
elif self.int_slider.value < 3:
self.int_slider.value = 3
self.int_slider.min = 3
self.int_slider.max = 7
elif change.new == '5-10':
if self.int_slider.value < 5:
self.int_slider.value = 5
self.int_slider.min = 5
self.int_slider.max = 10
WidgetAffectsAnotherWidget()
out = widgets.Output(layout={'border': '1px solid black'})
out
with out:
for i in range(10):
print(i, 'Hello world!')
from IPython.display import YouTubeVideo
with out:
display(YouTubeVideo('eWzY2nGfkXk'))
| 0.232571 | 0.823435 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.