code
stringlengths 2.5k
6.36M
| kind
stringclasses 2
values | parsed_code
stringlengths 0
404k
| quality_prob
float64 0
0.98
| learning_prob
float64 0.03
1
|
---|---|---|---|---|
```
%load_ext autoreload
%autoreload 2
import xarray as xr
import preprocess as pp
import util
import xskillscore as xs
from tqdm.autonotebook import tqdm # Fancy progress bars for our loops!
import matplotlib.pyplot as plt
import numpy as np
plot_varnames = ["tas", "pr", "psl"]
```
### Load interim data
```
obs = xr.open_zarr("../data/interim/era5_timemean")
ens_dict = {}
ens_av_dict = {}
ens_std_dict = {}
for key in pp.all_mip_ids:
ens = xr.open_zarr(f"../data/interim/{key}_timemean").chunk({'lat': -1, 'lon':-1})
ens_dict[key] = ens
```
### Quality control
```
# Continental temperatures 20ºC higher than other ensemble members...
ens_dict['cmip6'] = ens_dict['cmip6'].isel(ensemble = ens_dict['cmip6']['ensemble'] != 'NIMS-KMA-KACE-1-0-G-r3i1p1f1')
# Units seem to be wrong... even after using the correction on the first ensemble member
ens_dict['cmip6'] = ens_dict['cmip6'].isel(ensemble = ens_dict['cmip6']['ensemble'] != 'UA-MCM-UA-1-0-r1i1p1f2')
```
## Mean absolute error
```
area = util.calc_area(obs.lat, obs.lon)
mae_dict = {}
mae_av_dict = {}
mae_std_dict = {}
for key, ens in tqdm(ens_dict.items()):
ens_av_tmp = ens.drop([var for var in list(ens.data_vars) if var not in plot_varnames])
mae = xs.mae(obs.drop([var for var in list(obs.data_vars) if var not in list(ens_av_tmp.data_vars)]),
ens_av_tmp,
['lat', 'lon'], weights=area).compute()
mae_dict[key] = mae
mae_av_dict[key] = mae.groupby('source_id').mean(skipna=True)
mae_std_dict[key] = mae.groupby('source_id').std(skipna=True).copy()
# TAR median
mae_med = mae_av_dict['cmip6'].sel(source_id=[key for key in mae_dict['cmip6'].source_id.values if key!='ens-mean']).median(dim='source_id', skipna=True).compute()
mae_skill_score_dict = {}
mae_std_skill_score_dict = {}
for key, mae in mae_av_dict.items():
mae_skill_score_dict[key] = (mae/mae_med)
mae_std_skill_score_dict[key] = (mae_std_dict[key]/mae_med)
```
# Skill metrics over time
```
mip_year_dict = {'far': 1990,
'sar': 1996,
'tar': 2000,
'cmip3': 2005,
'cmip5': 2013,
'cmip6': 2019}
mip_col = {'far': 'C5',
'sar': 'C4',
'tar': 'C3',
'cmip3': 'C2',
'cmip5': 'C1',
'cmip6': 'C0'}
var_shape = {'tas': 'o', 'pr': 's', 'psl': 'D'}
fig, axes = plt.subplots(len(plot_varnames),1,figsize=(8,12))
ax=axes[0]
ens_mean_label = "ens-mean"
ax.plot([], [], 'k<', markersize=10, label=ens_mean_label)
ax.plot([], [], 'k>', markersize=10, label='ens-median')
ax.plot([], [], 'k', marker=var_shape['tas'], markersize=10, label='tas', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['pr'], markersize=10, label='pr', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['psl'], markersize=10, label='sfcWind', linewidth=0.)
for key, ens in ens_dict.items():
subplot_count = 1
for idx, var_key in enumerate(var_shape.keys()):
if var_key not in mae_dict[key]: continue
if var_key == 'tas':
ens_label = key
else:
ens_label = None
ax = axes[idx]
data = mae_skill_score_dict[key][var_key]
data_err = mae_std_skill_score_dict[key][var_key]
ax.errorbar(
data,
mip_year_dict[key]*np.ones_like(data) + 4*(np.random.rand(*data.shape)-0.5),
xerr=data_err,
linewidth=0., elinewidth=1., ecolor='k',
marker=var_shape[var_key], markersize=10, alpha=0.5, label=ens_label, color=mip_col[key]
)
# ax.plot(data,mip_year_dict[key]*np.ones_like(data),
# var_shape[var_key], markersize=10, alpha=0.5, label=ens_label, color=mip_col[key])
# data = mae_skill_score_dict[key][var_key].median(skipna=True)
# ax.plot(data, mip_year_dict[key]*np.ones_like(data),
# marker='>', markersize=20, color=mip_col[key], markeredgecolor='k')
subplot_count+=1
var_longname = ['near-surface air temperature', 'precipitation rate', 'sea level pressure']
var_minlim = [0.0, 0.0, 0.0]
var_maxlim = [2.5, 2.5, 2.5]
for subplot_count, ax in enumerate(axes):
ax.set_ylim([1985,2025])
ax.set_xlim([var_minlim[subplot_count], var_maxlim[subplot_count]])
ax.set_ylabel('publication date')
ax.set_xlabel('normalized model mean absolute error')
if subplot_count == 0: ax.legend()
ax.set_title(var_longname[subplot_count]+' performance')
ax.grid(True)
plt.tight_layout()
plt.savefig("../figures/model_performance_over_time_scatter.png",bbox_inches='tight',dpi=100)
fig, axes = plt.subplots(len(plot_varnames),1,figsize=(8,12))
ax=axes[0]
ens_mean_label = "ens-mean"
ax.plot([], [], 'k<', markersize=10, label=ens_mean_label)
ax.plot([], [], 'k>', markersize=10, label='ens-median')
ax.plot([], [], 'k', marker=var_shape['tas'], markersize=10, label='tas', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['pr'], markersize=10, label='pr', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['psl'], markersize=10, label='sfcWind', linewidth=0.)
for key, ens in ens_dict.items():
subplot_count = 1
for idx, var_key in enumerate(var_shape.keys()):
if var_key not in mae_dict[key]: continue
if var_key == 'tas':
ens_label = key
else:
ens_label = None
ax = axes[idx]
data = mae_skill_score_dict[key][var_key]
data = data[~np.isnan(data)]
alpha=0.75
markersize=15.
if data.size > 10:
alpha=0.4
markersize=10
ax.plot(
data,mip_year_dict[key]*np.ones_like(data),
linewidth=0, marker=".", markersize=markersize, alpha=alpha, color="C0"
)
if data.size > 10:
parts = ax.violinplot(
data[~np.isnan(data)],
positions=[mip_year_dict[key]],
widths = 5.,
vert=False, showextrema=False,
)
for pc in parts['bodies']:
pc.set_facecolor("C0")
pc.set_edgecolor("k")
pc.set_alpha(0.3)
subplot_count+=1
var_longname = ['near-surface air temperature', 'precipitation rate', 'sea level pressure']
var_minlim = [0.0, 0.0, 0.0]
var_maxlim = [3.5, 3.5, 3.5]
for subplot_count, ax in enumerate(axes):
ax.set_ylim([1985,2025])
ax.set_xlim([var_minlim[subplot_count], var_maxlim[subplot_count]])
ax.set_ylabel('publication date')
ax.set_xlabel('normalized model mean absolute error')
if subplot_count == 0: ax.legend()
ax.set_title(var_longname[subplot_count]+' performance')
ax.grid(True)
plt.tight_layout()
plt.savefig("../figures/model_performance_over_time_violin.png",bbox_inches='tight',dpi=100)
mip = 'sar'
percent_spread = (mae_dict[mip].groupby('source_id').max() - mae_dict[mip].groupby('source_id').min())/mae_dict[mip].groupby('source_id').mean(skipna=True)
plt.plot(percent_spread['pr'].values, label='pr')
plt.plot(percent_spread['tas'].values, label='tas')
plt.plot(percent_spread['psl'].values, label='psl')
plt.legend()
ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r3i1p1f1')['pr'].plot()
ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r2i1p1f1')['pr'].plot()
(ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r2i1p1f1')['pr'] - ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r3i1p1f1')['pr']).plot()
```
|
github_jupyter
|
%load_ext autoreload
%autoreload 2
import xarray as xr
import preprocess as pp
import util
import xskillscore as xs
from tqdm.autonotebook import tqdm # Fancy progress bars for our loops!
import matplotlib.pyplot as plt
import numpy as np
plot_varnames = ["tas", "pr", "psl"]
obs = xr.open_zarr("../data/interim/era5_timemean")
ens_dict = {}
ens_av_dict = {}
ens_std_dict = {}
for key in pp.all_mip_ids:
ens = xr.open_zarr(f"../data/interim/{key}_timemean").chunk({'lat': -1, 'lon':-1})
ens_dict[key] = ens
# Continental temperatures 20ºC higher than other ensemble members...
ens_dict['cmip6'] = ens_dict['cmip6'].isel(ensemble = ens_dict['cmip6']['ensemble'] != 'NIMS-KMA-KACE-1-0-G-r3i1p1f1')
# Units seem to be wrong... even after using the correction on the first ensemble member
ens_dict['cmip6'] = ens_dict['cmip6'].isel(ensemble = ens_dict['cmip6']['ensemble'] != 'UA-MCM-UA-1-0-r1i1p1f2')
area = util.calc_area(obs.lat, obs.lon)
mae_dict = {}
mae_av_dict = {}
mae_std_dict = {}
for key, ens in tqdm(ens_dict.items()):
ens_av_tmp = ens.drop([var for var in list(ens.data_vars) if var not in plot_varnames])
mae = xs.mae(obs.drop([var for var in list(obs.data_vars) if var not in list(ens_av_tmp.data_vars)]),
ens_av_tmp,
['lat', 'lon'], weights=area).compute()
mae_dict[key] = mae
mae_av_dict[key] = mae.groupby('source_id').mean(skipna=True)
mae_std_dict[key] = mae.groupby('source_id').std(skipna=True).copy()
# TAR median
mae_med = mae_av_dict['cmip6'].sel(source_id=[key for key in mae_dict['cmip6'].source_id.values if key!='ens-mean']).median(dim='source_id', skipna=True).compute()
mae_skill_score_dict = {}
mae_std_skill_score_dict = {}
for key, mae in mae_av_dict.items():
mae_skill_score_dict[key] = (mae/mae_med)
mae_std_skill_score_dict[key] = (mae_std_dict[key]/mae_med)
mip_year_dict = {'far': 1990,
'sar': 1996,
'tar': 2000,
'cmip3': 2005,
'cmip5': 2013,
'cmip6': 2019}
mip_col = {'far': 'C5',
'sar': 'C4',
'tar': 'C3',
'cmip3': 'C2',
'cmip5': 'C1',
'cmip6': 'C0'}
var_shape = {'tas': 'o', 'pr': 's', 'psl': 'D'}
fig, axes = plt.subplots(len(plot_varnames),1,figsize=(8,12))
ax=axes[0]
ens_mean_label = "ens-mean"
ax.plot([], [], 'k<', markersize=10, label=ens_mean_label)
ax.plot([], [], 'k>', markersize=10, label='ens-median')
ax.plot([], [], 'k', marker=var_shape['tas'], markersize=10, label='tas', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['pr'], markersize=10, label='pr', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['psl'], markersize=10, label='sfcWind', linewidth=0.)
for key, ens in ens_dict.items():
subplot_count = 1
for idx, var_key in enumerate(var_shape.keys()):
if var_key not in mae_dict[key]: continue
if var_key == 'tas':
ens_label = key
else:
ens_label = None
ax = axes[idx]
data = mae_skill_score_dict[key][var_key]
data_err = mae_std_skill_score_dict[key][var_key]
ax.errorbar(
data,
mip_year_dict[key]*np.ones_like(data) + 4*(np.random.rand(*data.shape)-0.5),
xerr=data_err,
linewidth=0., elinewidth=1., ecolor='k',
marker=var_shape[var_key], markersize=10, alpha=0.5, label=ens_label, color=mip_col[key]
)
# ax.plot(data,mip_year_dict[key]*np.ones_like(data),
# var_shape[var_key], markersize=10, alpha=0.5, label=ens_label, color=mip_col[key])
# data = mae_skill_score_dict[key][var_key].median(skipna=True)
# ax.plot(data, mip_year_dict[key]*np.ones_like(data),
# marker='>', markersize=20, color=mip_col[key], markeredgecolor='k')
subplot_count+=1
var_longname = ['near-surface air temperature', 'precipitation rate', 'sea level pressure']
var_minlim = [0.0, 0.0, 0.0]
var_maxlim = [2.5, 2.5, 2.5]
for subplot_count, ax in enumerate(axes):
ax.set_ylim([1985,2025])
ax.set_xlim([var_minlim[subplot_count], var_maxlim[subplot_count]])
ax.set_ylabel('publication date')
ax.set_xlabel('normalized model mean absolute error')
if subplot_count == 0: ax.legend()
ax.set_title(var_longname[subplot_count]+' performance')
ax.grid(True)
plt.tight_layout()
plt.savefig("../figures/model_performance_over_time_scatter.png",bbox_inches='tight',dpi=100)
fig, axes = plt.subplots(len(plot_varnames),1,figsize=(8,12))
ax=axes[0]
ens_mean_label = "ens-mean"
ax.plot([], [], 'k<', markersize=10, label=ens_mean_label)
ax.plot([], [], 'k>', markersize=10, label='ens-median')
ax.plot([], [], 'k', marker=var_shape['tas'], markersize=10, label='tas', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['pr'], markersize=10, label='pr', linewidth=0.)
ax.plot([], [], 'k', marker=var_shape['psl'], markersize=10, label='sfcWind', linewidth=0.)
for key, ens in ens_dict.items():
subplot_count = 1
for idx, var_key in enumerate(var_shape.keys()):
if var_key not in mae_dict[key]: continue
if var_key == 'tas':
ens_label = key
else:
ens_label = None
ax = axes[idx]
data = mae_skill_score_dict[key][var_key]
data = data[~np.isnan(data)]
alpha=0.75
markersize=15.
if data.size > 10:
alpha=0.4
markersize=10
ax.plot(
data,mip_year_dict[key]*np.ones_like(data),
linewidth=0, marker=".", markersize=markersize, alpha=alpha, color="C0"
)
if data.size > 10:
parts = ax.violinplot(
data[~np.isnan(data)],
positions=[mip_year_dict[key]],
widths = 5.,
vert=False, showextrema=False,
)
for pc in parts['bodies']:
pc.set_facecolor("C0")
pc.set_edgecolor("k")
pc.set_alpha(0.3)
subplot_count+=1
var_longname = ['near-surface air temperature', 'precipitation rate', 'sea level pressure']
var_minlim = [0.0, 0.0, 0.0]
var_maxlim = [3.5, 3.5, 3.5]
for subplot_count, ax in enumerate(axes):
ax.set_ylim([1985,2025])
ax.set_xlim([var_minlim[subplot_count], var_maxlim[subplot_count]])
ax.set_ylabel('publication date')
ax.set_xlabel('normalized model mean absolute error')
if subplot_count == 0: ax.legend()
ax.set_title(var_longname[subplot_count]+' performance')
ax.grid(True)
plt.tight_layout()
plt.savefig("../figures/model_performance_over_time_violin.png",bbox_inches='tight',dpi=100)
mip = 'sar'
percent_spread = (mae_dict[mip].groupby('source_id').max() - mae_dict[mip].groupby('source_id').min())/mae_dict[mip].groupby('source_id').mean(skipna=True)
plt.plot(percent_spread['pr'].values, label='pr')
plt.plot(percent_spread['tas'].values, label='tas')
plt.plot(percent_spread['psl'].values, label='psl')
plt.legend()
ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r3i1p1f1')['pr'].plot()
ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r2i1p1f1')['pr'].plot()
(ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r2i1p1f1')['pr'] - ens_dict['sar'].sel(ensemble='HCCPR-HCCPR-01-r3i1p1f1')['pr']).plot()
| 0.244543 | 0.640706 |
STAT 453: Deep Learning (Spring 2021)
Instructor: Sebastian Raschka ([email protected])
Course website: http://pages.stat.wisc.edu/~sraschka/teaching/stat453-ss2021/
GitHub repository: https://github.com/rasbt/stat453-deep-learning-ss21
---
```
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p matplotlib,torch,pandas,numpy
```
---
# Solving the XOR Problem
## Toy Dataset
```
import torch
import torch.nn.functional as F
import pandas as pd
import time
import matplotlib.pyplot as plt
%matplotlib inline
RANDOM_SEED = 123
DEVICE = ('cuda:0' if torch.cuda.is_available() else 'cpu')
df = pd.read_csv('xor.csv')
X = df[['x1', 'x2']].values
y = df['class label'].values
plt.scatter(X[y==0, 0], X[y==0, 1], marker='o')
plt.scatter(X[y==1, 0], X[y==1, 1], marker='s')
plt.tight_layout()
#plt.savefig('xor.pdf')
plt.show()
```
## Multilayer Perceptron with Linear Activations
```
class MLPLinear(torch.nn.Module):
def __init__(self, num_features, num_hidden_1, num_classes):
super(MLPLinear, self).__init__()
self.num_classes = num_classes
self.linear_1 = torch.nn.Linear(num_features, num_hidden_1)
self.linear_out = torch.nn.Linear(num_hidden_1, num_classes)
def forward(self, x):
out = self.linear_1(x)
#out = F.relu(out)
logits = self.linear_out(out)
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(RANDOM_SEED)
model1 = MLPLinear(num_features=2,
num_hidden_1=50,
num_classes=2)
model1 = model1.to(DEVICE)
optimizer = torch.optim.SGD(model1.parameters(), lr=0.1)
start_time = time.time()
minibatch_cost = []
NUM_EPOCHS = 25
features = torch.tensor(X, dtype=torch.float).to(DEVICE)
targets = torch.tensor(y, dtype=torch.long).to(DEVICE)
for epoch in range(NUM_EPOCHS):
### FORWARD AND BACK PROP
logits, probas = model1(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
minibatch_cost.append(cost)
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | Cost: {cost:.4f}')
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
from matplotlib.colors import ListedColormap
import numpy as np
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
tensor = torch.tensor(np.array([xx1.ravel(), xx2.ravel()]).T).float()
logits, probas = classifier.forward(tensor)
Z = np.argmax(probas.detach().numpy(), axis=1)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, color=cmap(idx),
edgecolor='black',
marker=markers[idx],
label=cl)
plot_decision_regions(features, targets, classifier=model1)
plt.tight_layout()
#plt.savefig('xor1.pdf')
plt.show()
```
<br>
<br>
## Multilayer Perceptron with Non-Linear Activations (Here: ReLU)
```
class MLPReLU(torch.nn.Module):
def __init__(self, num_features, num_hidden_1, num_classes):
super(MLPReLU, self).__init__()
self.num_classes = num_classes
self.linear_1 = torch.nn.Linear(num_features, num_hidden_1)
self.linear_out = torch.nn.Linear(num_hidden_1, num_classes)
def forward(self, x):
out = self.linear_1(x)
out = F.relu(out)
logits = self.linear_out(out)
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(RANDOM_SEED)
model2 = MLPReLU(num_features=2,
num_hidden_1=50,
num_classes=2)
model2 = model2.to(DEVICE)
optimizer = torch.optim.SGD(model2.parameters(), lr=0.1)
start_time = time.time()
minibatch_cost = []
NUM_EPOCHS = 25
features = torch.tensor(X, dtype=torch.float).to(DEVICE)
targets = torch.tensor(y, dtype=torch.long).to(DEVICE)
for epoch in range(NUM_EPOCHS):
### FORWARD AND BACK PROP
logits, probas = model2(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
minibatch_cost.append(cost)
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | Cost: {cost:.4f}')
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
plot_decision_regions(features, targets, classifier=model2)
plt.tight_layout()
#plt.savefig('xor2.pdf')
plt.show()
```
|
github_jupyter
|
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p matplotlib,torch,pandas,numpy
import torch
import torch.nn.functional as F
import pandas as pd
import time
import matplotlib.pyplot as plt
%matplotlib inline
RANDOM_SEED = 123
DEVICE = ('cuda:0' if torch.cuda.is_available() else 'cpu')
df = pd.read_csv('xor.csv')
X = df[['x1', 'x2']].values
y = df['class label'].values
plt.scatter(X[y==0, 0], X[y==0, 1], marker='o')
plt.scatter(X[y==1, 0], X[y==1, 1], marker='s')
plt.tight_layout()
#plt.savefig('xor.pdf')
plt.show()
class MLPLinear(torch.nn.Module):
def __init__(self, num_features, num_hidden_1, num_classes):
super(MLPLinear, self).__init__()
self.num_classes = num_classes
self.linear_1 = torch.nn.Linear(num_features, num_hidden_1)
self.linear_out = torch.nn.Linear(num_hidden_1, num_classes)
def forward(self, x):
out = self.linear_1(x)
#out = F.relu(out)
logits = self.linear_out(out)
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(RANDOM_SEED)
model1 = MLPLinear(num_features=2,
num_hidden_1=50,
num_classes=2)
model1 = model1.to(DEVICE)
optimizer = torch.optim.SGD(model1.parameters(), lr=0.1)
start_time = time.time()
minibatch_cost = []
NUM_EPOCHS = 25
features = torch.tensor(X, dtype=torch.float).to(DEVICE)
targets = torch.tensor(y, dtype=torch.long).to(DEVICE)
for epoch in range(NUM_EPOCHS):
### FORWARD AND BACK PROP
logits, probas = model1(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
minibatch_cost.append(cost)
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | Cost: {cost:.4f}')
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
from matplotlib.colors import ListedColormap
import numpy as np
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
tensor = torch.tensor(np.array([xx1.ravel(), xx2.ravel()]).T).float()
logits, probas = classifier.forward(tensor)
Z = np.argmax(probas.detach().numpy(), axis=1)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, color=cmap(idx),
edgecolor='black',
marker=markers[idx],
label=cl)
plot_decision_regions(features, targets, classifier=model1)
plt.tight_layout()
#plt.savefig('xor1.pdf')
plt.show()
class MLPReLU(torch.nn.Module):
def __init__(self, num_features, num_hidden_1, num_classes):
super(MLPReLU, self).__init__()
self.num_classes = num_classes
self.linear_1 = torch.nn.Linear(num_features, num_hidden_1)
self.linear_out = torch.nn.Linear(num_hidden_1, num_classes)
def forward(self, x):
out = self.linear_1(x)
out = F.relu(out)
logits = self.linear_out(out)
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(RANDOM_SEED)
model2 = MLPReLU(num_features=2,
num_hidden_1=50,
num_classes=2)
model2 = model2.to(DEVICE)
optimizer = torch.optim.SGD(model2.parameters(), lr=0.1)
start_time = time.time()
minibatch_cost = []
NUM_EPOCHS = 25
features = torch.tensor(X, dtype=torch.float).to(DEVICE)
targets = torch.tensor(y, dtype=torch.long).to(DEVICE)
for epoch in range(NUM_EPOCHS):
### FORWARD AND BACK PROP
logits, probas = model2(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
minibatch_cost.append(cost)
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
print (f'Epoch: {epoch+1:03d}/{NUM_EPOCHS:03d} | Cost: {cost:.4f}')
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
plot_decision_regions(features, targets, classifier=model2)
plt.tight_layout()
#plt.savefig('xor2.pdf')
plt.show()
| 0.852874 | 0.900223 |
```
from bokeh.plotting import figure, output_file, show, output_notebook
from bokeh.models import NumeralTickFormatter
from bokeh.io import show
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomJS, Select
from bokeh.plotting import figure
import json
import requests
import pandas as pd
import numpy as np
nrel_long_tilt_tmy2 = []
tilts = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90]
for i in range(len(tilts)):
list_parameters = {"formt": 'JSON', "api_key": "spJFj2l5ghY5jwk7dNfVYs3JHbpR6BOGHQNO8Y9Z", "system_capacity": 4, "module_type": 0, "losses": 14.08,
"array_type": 0, "tilt": tilts[i], "azimuth": 180, "lat": 66.83, "lon": -161.04, "dataset": 'tmy2'}
json_response_tmy2 = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
new_dataframe = pd.DataFrame(data = json_response_tmy2['outputs'])
nrel_long_tilt_tmy2.append(new_dataframe)
nrel_long_tilt_tmy3 = []
tilts = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90]
for i in range(len(tilts)):
list_parameters = {"formt": 'JSON', "api_key": "spJFj2l5ghY5jwk7dNfVYs3JHbpR6BOGHQNO8Y9Z", "system_capacity": 4, "module_type": 0, "losses": 14.08,
"array_type": 0, "tilt": tilts[i], "azimuth": 180, "lat": 66.83, "lon": -161.04, "dataset": 'tmy3'}
json_response_tmy3 = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
new_dataframe = pd.DataFrame(data = json_response_tmy3['outputs'])
nrel_long_tilt_tmy3.append(new_dataframe)
annual_production_tmy2 = []
for i in range(len(tilts)):
annual_production_tmy2.append(nrel_long_tilt_tmy2[i]['ac_annual'][2]/4)
annual_production_tmy3 = []
for i in range(len(tilts)):
annual_production_tmy3.append(nrel_long_tilt_tmy3[i]['ac_annual'][2]/4)
output_file("Noorvik_annual.html")
p = figure( x_axis_label='Tilts', y_axis_label='Annual Production (kWh)',plot_width=500, plot_height=250)
# add a line renderer
p.line(tilts, annual_production_tmy2, line_width=2,color='red', legend='TMY2')
p.line(tilts, annual_production_tmy3, line_width=2,color='blue', legend='TMY3')
p.xaxis.ticker = [10,20,30,40,50,60,70,80,90]
p.title.text = "Annual Production at Varying Tilts"
p.title.align = "center"
p.title.text_color = "olive"
p.title.text_font = "times"
p.title.text_font_style = "italic"
p.title.text_font_size = '12pt'
show(p)
# This part is to get the figures showing ac_monthly for different tilts with selections
tilt_5 = nrel_long_tilt_tmy3[0]['ac_monthly']
tilt_10 = nrel_long_tilt_tmy3[1]['ac_monthly']
tilt_15 = nrel_long_tilt_tmy3[2]['ac_monthly']
tilt_20 = nrel_long_tilt_tmy3[3]['ac_monthly']
tilt_25 = nrel_long_tilt_tmy3[4]['ac_monthly']
tilt_30 = nrel_long_tilt_tmy3[5]['ac_monthly']
tilt_35 = nrel_long_tilt_tmy3[6]['ac_monthly']
tilt_40 = nrel_long_tilt_tmy3[7]['ac_monthly']
tilt_45 = nrel_long_tilt_tmy3[8]['ac_monthly']
tilt_50 = nrel_long_tilt_tmy3[9]['ac_monthly']
tilt_55 = nrel_long_tilt_tmy3[10]['ac_monthly']
tilt_60 = nrel_long_tilt_tmy3[11]['ac_monthly']
tilt_65 = nrel_long_tilt_tmy3[12]['ac_monthly']
tilt_70 = nrel_long_tilt_tmy3[13]['ac_monthly']
tilt_75 = nrel_long_tilt_tmy3[14]['ac_monthly']
tilt_80 = nrel_long_tilt_tmy3[15]['ac_monthly']
tilt_85 = nrel_long_tilt_tmy3[16]['ac_monthly']
tilt_90 = nrel_long_tilt_tmy3[17]['ac_monthly']
output_file("Noorvik_ac_monthly.html")
x = np.arange(1,13)
source = ColumnDataSource(data=dict(x=x, y=tilt_5,
tilt_5=tilt_5, tilt_10=tilt_10,
tilt_15=tilt_15,tilt_20=tilt_20,
tilt_25=tilt_25,tilt_30=tilt_30,
tilt_35=tilt_35,tilt_40=tilt_40,
tilt_45=tilt_45,tilt_50=tilt_50,
tilt_55=tilt_55,tilt_60=tilt_60,
tilt_65=tilt_65,tilt_70=tilt_70,
tilt_75=tilt_75,tilt_80=tilt_80,
tilt_85=tilt_85,tilt_90=tilt_90,
))
plot = figure(x_axis_label='Month', y_axis_label='Monthly Production(kWh)',plot_height=250)
plot.line(x='x', y='y', source=source)
plot.title.text = "Energy Production of One Year"
plot.title.align = "center"
plot.title.text_color = "olive"
plot.title.text_font = "times"
plot.title.text_font_style = "italic"
plot.title.text_font_size = '15pt'
select = Select(value='foo', options=['tilt_5', 'tilt_10','tilt_15',
'tilt_20','tilt_25','tilt_30',
'tilt_35','tilt_40','tilt_45',
'tilt_50','tilt_55','tilt_60',
'tilt_65','tilt_70','tilt_75',
'tilt_80','tilt_85','tilt_90'])
select.js_on_change('value', CustomJS(args=dict(source=source, select=select), code="""
// make a shallow copy of the current data dict
const new_data = Object.assign({}, source.data)
// update the y column in the new data dict from the appropriate other column
new_data.y = source.data[select.value]
// set the new data on source, BokehJS will pick this up automatically
source.data = new_data
"""))
show(column(plot, select))
annual_production_tmy2 = []
for i in range(len(tilts)):
annual_production_tmy2.append(nrel_long_tilt_tmy2[i]['ac_annual'][2])
annual_production_tmy3 = []
for i in range(len(tilts)):
annual_production_tmy3.append(nrel_long_tilt_tmy3[i]['ac_annual'][2])
d_tmy2 = {'Tilts':tilts,'Annual_production':annual_production_tmy2}
df_tmy2 = pd.DataFrame(d_tmy2)
d_tmy3 = {'Tilts':tilts,'Annual_production':annual_production_tmy3}
df_tmy3 = pd.DataFrame(d_tmy3)
#Then find out the max production raw
max_tilt_tmy2 = int(df_tmy2[['Annual_production']].idxmax().values)
max_tilt_tmy3 = int(df_tmy3[['Annual_production']].idxmax().values)
#Then calculate the other tilts' lose compared with the max annual production
lose_tmy2 = []
for index, row in df_tmy2.iterrows():
tilt_loss = 1- row['Annual_production']/df_tmy2['Annual_production'][max_tilt_tmy2]
lose_tmy2.append(tilt_loss)
df_tmy2['loss']=lose_tmy2
lose_tmy3 = []
for index, row in df_tmy3.iterrows():
tilt_loss = 1- row['Annual_production']/df_tmy3['Annual_production'][max_tilt_tmy3]
lose_tmy3.append(tilt_loss)
df_tmy3['loss']=lose_tmy3
output_file("Noorvik_tilts_loss.html")
p = figure(x_axis_label='Tilts', y_axis_label='loss (%)',plot_width=500, plot_height=250)
# add a line renderer
p.line(tilts, df_tmy2['loss'], line_width=2,color='red',legend="TMY2")
p.line(tilts,df_tmy3['loss'],line_width=2,color='blue',legend="TMY3")
p.xaxis.ticker = [10,20,30,40,50,60,70,80,90]
p.yaxis.formatter = NumeralTickFormatter(format='0 %')
p.title.text = "Annual Production loss of different tilts"
p.title.align = "center"
p.title.text_color = "olive"
p.title.text_font = "times"
p.title.text_font_style = "italic"
p.title.text_font_size = '12pt'
show(p)
```
|
github_jupyter
|
from bokeh.plotting import figure, output_file, show, output_notebook
from bokeh.models import NumeralTickFormatter
from bokeh.io import show
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomJS, Select
from bokeh.plotting import figure
import json
import requests
import pandas as pd
import numpy as np
nrel_long_tilt_tmy2 = []
tilts = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90]
for i in range(len(tilts)):
list_parameters = {"formt": 'JSON', "api_key": "spJFj2l5ghY5jwk7dNfVYs3JHbpR6BOGHQNO8Y9Z", "system_capacity": 4, "module_type": 0, "losses": 14.08,
"array_type": 0, "tilt": tilts[i], "azimuth": 180, "lat": 66.83, "lon": -161.04, "dataset": 'tmy2'}
json_response_tmy2 = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
new_dataframe = pd.DataFrame(data = json_response_tmy2['outputs'])
nrel_long_tilt_tmy2.append(new_dataframe)
nrel_long_tilt_tmy3 = []
tilts = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90]
for i in range(len(tilts)):
list_parameters = {"formt": 'JSON', "api_key": "spJFj2l5ghY5jwk7dNfVYs3JHbpR6BOGHQNO8Y9Z", "system_capacity": 4, "module_type": 0, "losses": 14.08,
"array_type": 0, "tilt": tilts[i], "azimuth": 180, "lat": 66.83, "lon": -161.04, "dataset": 'tmy3'}
json_response_tmy3 = requests.get("https://developer.nrel.gov/api/pvwatts/v6", params = list_parameters).json()
new_dataframe = pd.DataFrame(data = json_response_tmy3['outputs'])
nrel_long_tilt_tmy3.append(new_dataframe)
annual_production_tmy2 = []
for i in range(len(tilts)):
annual_production_tmy2.append(nrel_long_tilt_tmy2[i]['ac_annual'][2]/4)
annual_production_tmy3 = []
for i in range(len(tilts)):
annual_production_tmy3.append(nrel_long_tilt_tmy3[i]['ac_annual'][2]/4)
output_file("Noorvik_annual.html")
p = figure( x_axis_label='Tilts', y_axis_label='Annual Production (kWh)',plot_width=500, plot_height=250)
# add a line renderer
p.line(tilts, annual_production_tmy2, line_width=2,color='red', legend='TMY2')
p.line(tilts, annual_production_tmy3, line_width=2,color='blue', legend='TMY3')
p.xaxis.ticker = [10,20,30,40,50,60,70,80,90]
p.title.text = "Annual Production at Varying Tilts"
p.title.align = "center"
p.title.text_color = "olive"
p.title.text_font = "times"
p.title.text_font_style = "italic"
p.title.text_font_size = '12pt'
show(p)
# This part is to get the figures showing ac_monthly for different tilts with selections
tilt_5 = nrel_long_tilt_tmy3[0]['ac_monthly']
tilt_10 = nrel_long_tilt_tmy3[1]['ac_monthly']
tilt_15 = nrel_long_tilt_tmy3[2]['ac_monthly']
tilt_20 = nrel_long_tilt_tmy3[3]['ac_monthly']
tilt_25 = nrel_long_tilt_tmy3[4]['ac_monthly']
tilt_30 = nrel_long_tilt_tmy3[5]['ac_monthly']
tilt_35 = nrel_long_tilt_tmy3[6]['ac_monthly']
tilt_40 = nrel_long_tilt_tmy3[7]['ac_monthly']
tilt_45 = nrel_long_tilt_tmy3[8]['ac_monthly']
tilt_50 = nrel_long_tilt_tmy3[9]['ac_monthly']
tilt_55 = nrel_long_tilt_tmy3[10]['ac_monthly']
tilt_60 = nrel_long_tilt_tmy3[11]['ac_monthly']
tilt_65 = nrel_long_tilt_tmy3[12]['ac_monthly']
tilt_70 = nrel_long_tilt_tmy3[13]['ac_monthly']
tilt_75 = nrel_long_tilt_tmy3[14]['ac_monthly']
tilt_80 = nrel_long_tilt_tmy3[15]['ac_monthly']
tilt_85 = nrel_long_tilt_tmy3[16]['ac_monthly']
tilt_90 = nrel_long_tilt_tmy3[17]['ac_monthly']
output_file("Noorvik_ac_monthly.html")
x = np.arange(1,13)
source = ColumnDataSource(data=dict(x=x, y=tilt_5,
tilt_5=tilt_5, tilt_10=tilt_10,
tilt_15=tilt_15,tilt_20=tilt_20,
tilt_25=tilt_25,tilt_30=tilt_30,
tilt_35=tilt_35,tilt_40=tilt_40,
tilt_45=tilt_45,tilt_50=tilt_50,
tilt_55=tilt_55,tilt_60=tilt_60,
tilt_65=tilt_65,tilt_70=tilt_70,
tilt_75=tilt_75,tilt_80=tilt_80,
tilt_85=tilt_85,tilt_90=tilt_90,
))
plot = figure(x_axis_label='Month', y_axis_label='Monthly Production(kWh)',plot_height=250)
plot.line(x='x', y='y', source=source)
plot.title.text = "Energy Production of One Year"
plot.title.align = "center"
plot.title.text_color = "olive"
plot.title.text_font = "times"
plot.title.text_font_style = "italic"
plot.title.text_font_size = '15pt'
select = Select(value='foo', options=['tilt_5', 'tilt_10','tilt_15',
'tilt_20','tilt_25','tilt_30',
'tilt_35','tilt_40','tilt_45',
'tilt_50','tilt_55','tilt_60',
'tilt_65','tilt_70','tilt_75',
'tilt_80','tilt_85','tilt_90'])
select.js_on_change('value', CustomJS(args=dict(source=source, select=select), code="""
// make a shallow copy of the current data dict
const new_data = Object.assign({}, source.data)
// update the y column in the new data dict from the appropriate other column
new_data.y = source.data[select.value]
// set the new data on source, BokehJS will pick this up automatically
source.data = new_data
"""))
show(column(plot, select))
annual_production_tmy2 = []
for i in range(len(tilts)):
annual_production_tmy2.append(nrel_long_tilt_tmy2[i]['ac_annual'][2])
annual_production_tmy3 = []
for i in range(len(tilts)):
annual_production_tmy3.append(nrel_long_tilt_tmy3[i]['ac_annual'][2])
d_tmy2 = {'Tilts':tilts,'Annual_production':annual_production_tmy2}
df_tmy2 = pd.DataFrame(d_tmy2)
d_tmy3 = {'Tilts':tilts,'Annual_production':annual_production_tmy3}
df_tmy3 = pd.DataFrame(d_tmy3)
#Then find out the max production raw
max_tilt_tmy2 = int(df_tmy2[['Annual_production']].idxmax().values)
max_tilt_tmy3 = int(df_tmy3[['Annual_production']].idxmax().values)
#Then calculate the other tilts' lose compared with the max annual production
lose_tmy2 = []
for index, row in df_tmy2.iterrows():
tilt_loss = 1- row['Annual_production']/df_tmy2['Annual_production'][max_tilt_tmy2]
lose_tmy2.append(tilt_loss)
df_tmy2['loss']=lose_tmy2
lose_tmy3 = []
for index, row in df_tmy3.iterrows():
tilt_loss = 1- row['Annual_production']/df_tmy3['Annual_production'][max_tilt_tmy3]
lose_tmy3.append(tilt_loss)
df_tmy3['loss']=lose_tmy3
output_file("Noorvik_tilts_loss.html")
p = figure(x_axis_label='Tilts', y_axis_label='loss (%)',plot_width=500, plot_height=250)
# add a line renderer
p.line(tilts, df_tmy2['loss'], line_width=2,color='red',legend="TMY2")
p.line(tilts,df_tmy3['loss'],line_width=2,color='blue',legend="TMY3")
p.xaxis.ticker = [10,20,30,40,50,60,70,80,90]
p.yaxis.formatter = NumeralTickFormatter(format='0 %')
p.title.text = "Annual Production loss of different tilts"
p.title.align = "center"
p.title.text_color = "olive"
p.title.text_font = "times"
p.title.text_font_style = "italic"
p.title.text_font_size = '12pt'
show(p)
| 0.480722 | 0.51879 |
# Primer for the Pokemon Fair Value bond model
The intuition behind the model presented here is relatively simple - the *"Fair Value"* of a long dated bond should be a function of short-term interest rates, inflation and economic growth. Additionally as capital allocation becomes increasing global it is reasonable that our *"Fair Value"* should include a global factor.
$ FV_{x} = \beta_{x,0}
+\beta_{x,1}\times \text{Rates}
+\beta_{x,2} \times \text{Inf}
+\beta_{x,3} \times \text{Growth}
+\beta_{x,4}\times \text{Global}
+\epsilon_{x}$
Mathematically we are indifferent to the choice of input, as we are simply calculating a multi-variate linear regression over several rolling windows; practically variable selection is probably the most challenging part of the model. If we assume markets attempt to incorporate forward estimates into prices than similarly we would prefer to incorporate forward looking macro variables into our estimates. Exact inputs are covered below, but in general:
* Short-Rates - forward path of cash rates is modelled by the current 2-year rate
* Inflation - ideally taken from market implied inflation from breakeven rates or inflation swaps
* Growth - survey based (ISM) or from Composite Leading Indicators like the [OECD CLIs](http://www.oecd.org/sdd/leading-indicators/)
* Global Factor - **most difficult** but could be the [BIS REER](https://www.bis.org/statistics/eer.htm) or a blend of global bonds
## Fair Value Output
## Sample Python Code
Where the Pokemon FV model has been presented elsewhere in the chartbook it has been calculated using a homemade class function - eventually this will be wrapped and posted to PyPi - but the code below is a sample of what is happening.
```
def pokemon(df, window=36, valiadate_data=True):
# REQUIREMENTS:
# from sklearn.linear_model import LinearRegression
# INPUTS:
# df - DataFrame with datetimeindex, col[0] as long bond & col[1+] as regression variables
# window - rolling regression window; default is 36 months
# validate_data - forward fills missing data (useful where we may be missing current month)
# OUTPUTS:
# guess - DataFrame with long-bond, full sample and rolling model outputs
# stats - coefficients for each thingy
if valiadate_data is True:
df.fillna(method='ffill', inplace=True) # Forward fill missing data
df = df[~df.isna().any(axis=1)] # Each row complete
# Dummy Dataframe(s)
guess = pd.DataFrame(data=df.iloc[:,0])
guess.columns = ['long_bond']
coeff_vn = ['r2', 'intercept']
coeff_vn.extend(list(df.columns.values)[1:])
stats = pd.DataFrame(columns=coeff_vn)
# Full Sample Period
y, X = df.iloc[:,0], df.iloc[:,1:]
lm = LinearRegression().fit(X,y)
guess['full_sample'] = lm.predict(X)
stats.loc['full_sample',['r2','intercept']]=lm.score(X, y),lm.intercept_
stats.loc['full_sample'].iloc[2:] = lm.coef_
# Rolling Window
for i, v in enumerate(guess.index):
if i < window:
continue
y = df.iloc[i-window:i,0] # Dependant Var [long bond]
X = df.iloc[i-window:i,1:] # Independant [short, inf, growth]
roll_lm = LinearRegression().fit(X,y)
guess.loc[v,'rolling'] = roll_lm.predict(X)[-1]
stats.loc[v,['r2', 'intercept']] = roll_lm.score(X, y), lm.intercept_
stats.loc[v].iloc[2:] = roll_lm.coef_
return guess, stats
```
## Data Sources
All data is open sourced and comes from [Quandl](www.quandl.com). For the Pokemon model this means a compromise between what we would consider to be the optimal data and what is easily achievable. Below note the data used here (and consider some better data).
__US Treasuries__
* [US Treasury](https://www.quandl.com/data/USTREASURY/YIELD-Treasury-Yield-Curve-Rates) for nominal 10 & 2 year yields
* [UMich](https://www.quandl.com/data/UMICH/SOC33-University-of-Michigan-Consumer-Survey-Expected-Change-in-Prices-During-the-Next-5-Years) 5-year Median Inflation Expectations
* [ISM Mfg Composite](https://www.quandl.com/data/ISM/MAN_PMI-PMI-Composite-Index) for Growth estimates
__Gilts__
* [Bank-of-England](https://www.quandl.com/data/BOE/IUDMNZC-Yield-From-British-Government-Securities-10-Year-Nominal-Zero-Coupon) 10-year Nominal Zero-Coupon as a proxy for the standard nominal
* [ECB](https://www.quandl.com/data/ECB/FM_M_GB_GBP_RT_MM_GBP3MFSR__HSTA-United-Kingdom-Money-Market-GB-Pound-Sterling-3-month-British-Bankers-Association-Libor-Historical-close-average-of-observations-through-period-UK-pound-sterling-provided-by-Reuters) provide the 3m GBP LIBOR rate; need a better source but the BoE doesn't publish the 2 (or 1) year gilt yields or forward rate data.
* [Rate Inflation](https://www.quandl.com/data/RATEINF/INFLATION_GBR-Inflation-YOY-UK)
* [OECD Amplitude-Adjusted Composite Leading Indicator](https://www.quandl.com/data/OECD/MEI_CLI_LOLITOAA_GBR_M-Amplitude-Adjusted-Cli-United-Kingdom)
__Bunds__
* Bundesbank for the [10-year](https://www.quandl.com/data/BUNDESBANK/BBK01_WT1010-Daily-Yield-Of-The-Current-10-Year-Federal-Bond) & [2-year](https://www.quandl.com/data/BUNDESBANK/BBK01_WT0202-Daily-Yield-Of-The-Current-two-year-Federal-Treasury-Notes) Bund yield. This needs further investigation as the Quandl dataset for the German 2-year only goes back to Jan-14, *which isn't enough for a sensible full-sample model*
* [Rate Inflation](https://www.quandl.com/data/RATEINF/INFLATION_DEU-Inflation-YOY-Germany)
* [OECD Amplitude-Adjusted Composite Leading Indicator](https://www.quandl.com/data/OECD/MEI_CLI_LOLITOAA_DEU_M-Amplitude-Adjusted-Cli-Germany)
__JGBs__
* [Ministry of Finance Japan]() for the [10-year](https://www.quandl.com/data/MOFJ/INTEREST_RATE_JAPAN_10Y-JGB-Interest-Rates-Term-Structure-10Y) & [2-year](https://www.quandl.com/data/MOFJ/INTEREST_RATE_JAPAN_2Y-JGB-Interest-Rates-Term-Structure-2Y) JGBs
* [Rate Inflation](https://www.quandl.com/data/RATEINF/INFLATION_JPN)
* [OECD Amplitude-Adjusted Composite Leading Indicator](https://www.quandl.com/data/OECD/MEI_CLI_LOLITOAA_JPN_M-Amplitude-Adjusted-Cli-Japan)
## Small Print
It should come as no surprise that the concept presented here is not new. Any CFA candidate who has has been even vaguely dilligent should recognise the basis. [NDR](https://www.ndr.com/group/ndr/content-viewer/-/v/B0410A) do some excellent modelling and often present a simplified version and [Goldman Sachs Research](https://research.gs.com/) have been incorporating a [forward looking Sudoku Fair Value](http://www.verstyuk.net/papers/GlobalViewpoint.07-24.pdf) model in their Fixed Income research since 2007; the GS model is probably superior as they take more time developing a trade-weighted variable for the *"Global"* factor.
Pokemon is indeed a nod to the childrens cartoon series. The point at which the model was developed happened to coincide with the Pokemon Go phenomenon in the UK during which time several PMs and Analysts were found wondering Green Park in London in search of Pokemon.
|
github_jupyter
|
def pokemon(df, window=36, valiadate_data=True):
# REQUIREMENTS:
# from sklearn.linear_model import LinearRegression
# INPUTS:
# df - DataFrame with datetimeindex, col[0] as long bond & col[1+] as regression variables
# window - rolling regression window; default is 36 months
# validate_data - forward fills missing data (useful where we may be missing current month)
# OUTPUTS:
# guess - DataFrame with long-bond, full sample and rolling model outputs
# stats - coefficients for each thingy
if valiadate_data is True:
df.fillna(method='ffill', inplace=True) # Forward fill missing data
df = df[~df.isna().any(axis=1)] # Each row complete
# Dummy Dataframe(s)
guess = pd.DataFrame(data=df.iloc[:,0])
guess.columns = ['long_bond']
coeff_vn = ['r2', 'intercept']
coeff_vn.extend(list(df.columns.values)[1:])
stats = pd.DataFrame(columns=coeff_vn)
# Full Sample Period
y, X = df.iloc[:,0], df.iloc[:,1:]
lm = LinearRegression().fit(X,y)
guess['full_sample'] = lm.predict(X)
stats.loc['full_sample',['r2','intercept']]=lm.score(X, y),lm.intercept_
stats.loc['full_sample'].iloc[2:] = lm.coef_
# Rolling Window
for i, v in enumerate(guess.index):
if i < window:
continue
y = df.iloc[i-window:i,0] # Dependant Var [long bond]
X = df.iloc[i-window:i,1:] # Independant [short, inf, growth]
roll_lm = LinearRegression().fit(X,y)
guess.loc[v,'rolling'] = roll_lm.predict(X)[-1]
stats.loc[v,['r2', 'intercept']] = roll_lm.score(X, y), lm.intercept_
stats.loc[v].iloc[2:] = roll_lm.coef_
return guess, stats
| 0.557604 | 0.959193 |
<a href="https://colab.research.google.com/github/Elkinmt19/data-engineer-dojo/blob/main/Ejercicio_PRAGMA_ElkinJavierGuerraGaleano.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **Prueba práctica Científico de Datos**
## ***Organizar el espacio de trabajo***
En primer lugar, es muy importante organizar la maquina virtual, instalando todas las dependencias necesarias para trabajar con python, al igual que traer el archivo con el dataset.
## ***Clonando el repositorio con los archivos necesarios***
```
# Clonanmos el repositorio fuente
!git clone https://github.com/Elkinmt19/data-engineer-dojo.git
# Comprobar la version de python y descargar las librerias necesarias
%cd data-engineer-dojo
!python3 --version
!pip3 install -r requirements.txt
%cd data_engineer_test
```
## ***Se filtra el dataset para quitar el ruido***
---
```
# Ejecutando el archivo "filter_datset.py" se quita el ruido del dataset y se crea un nuevo archivo limpio
!python3 filter_dataset.py
```
## ***Se definen algunas librerias importantes y se obtiene la informacion del archivo .csv filtrado***
```
# Built-in imports
import os
import sys
import csv
# External imports
import numpy as np
# My Own imports
import get_path_dataset_folder as gpaf
# Get dataset folder in repo for the samples
DATASET_FOLDER = gpaf.get_dataset_folder_path()
def get_path_file(file):
"""
This is a simple python function that gives us the absolute path of the
dataset's file according to the name of the file.
"""
path_dataset_file = os.path.join(
DATASET_FOLDER,
file
)
return path_dataset_file
# Get the data from the csv file
with open(get_path_file("USvideos_clean.csv"), 'r', encoding="utf8") as file:
csvreader = csv.reader(file)
features = next(csvreader)
features = features[:11]
rows = np.array([[None]*len(features)])
for row in csvreader:
rows = np.concatenate((rows,np.array([row[:11]])),axis=0)
rows = np.delete(rows, 0, 0)
file.close()
# Mostramos las variables que se pueden obtener del archivo
print(f"Tenemos {len(features)} variables")
print(features)
```
## ***Obtener la informacion del archivo .json***
```
# Built-in imports
import json
# Get the data from the json file
with open(get_path_file("US_category_id.json"), "r") as read_it:
data = json.load(read_it)
elements = data["items"]
features_category = ["id", "channelId", "title"]
rows_category = []
for it in elements:
row_buff = [it["id"], it["snippet"]["channelId"], it["snippet"]["title"]]
rows_category.append(row_buff)
read_it.close()
rows_category = np.array(rows_category)
id2category = {rows_category[i,0]: rows_category[i,2] for i in range(len(rows_category[:,0]))}
# Mostramos las variables que se pueden obtener del archivo
print(f"Tenemos {len(features_category)} variables")
print(features_category)
```
## ***Dividir el dataset en varios grupos o clusters de acuerdo a la fecha***
```
# Split the complete dataset by the trending data in order to make the analysis
clusters = np.split(
rows,
np.where(np.diff(np.array([x.replace('.', '') for x in rows[:,1]]).astype(int)))[0]+1
)
# Comprobamos que el numero de clusters sea el correcto
print(f"Se tienen {len(clusters)} dias")
```
## ***Dividir el dataset en varios grupos o clusters de acuerdo al id de la categoria del video***
```
# Create the function tu make the video's category analysis
def category_views_likes_analysis(info_featu):
# Split the dataset by the category_id
elem , idxs = np.unique(info_featu[:,4], return_index=True)
clusters = np.split(info_featu, idxs[1:])
category_data = []
for i in range(len(clusters)):
buff = [int(elem[i]), sum(clusters[i][:,7].astype(int)), sum(clusters[i][:,8].astype(int))]
category_data.append(buff)
category_data = np.array(category_data).astype(int)
return category_data
# Define a variable to save all the information about the category of the videos
category_data_date = []
views_max = []
likes_max = []
for dt in clusters:
category_data_date.append(category_views_likes_analysis(dt))
for cid in category_data_date:
# Sort the arrays by the amoung of views and likes
view_idx = cid[:,1].argmax(axis=0)
like_idx = cid[:,2].argmax(axis=0)
buff_view = [cid[view_idx,0], cid[view_idx,1]]
buff_like = [cid[like_idx,0], cid[like_idx,1]]
views_max.append(buff_view)
likes_max.append(buff_like)
views_max = np.array(views_max)
likes_max = np.array(likes_max)
# Get the frequency of the category_id with the max number of views and likes
unique_views, counts_views = np.unique(views_max[:,0], return_counts=True)
unique_likes, counts_likes = np.unique(likes_max[:,0], return_counts=True)
views_data_unique = np.asarray((unique_views, counts_views)).T
likes_data_unique = np.asarray((unique_likes, counts_likes)).T
```
## ***Graficamos algunas metricas importantes***
```
# External imports
import matplotlib.pyplot as plt
def greater(el, maxi):
if (el == maxi):
return 0.1
return 0
# Plot the categories with more views
labels = [id2category[str(x)] for x in views_data_unique[:,0]]
sizes = views_data_unique[:,1]
explode = [greater(x,max(views_data_unique[:,1])) for x in views_data_unique[:,1]]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
# Plot the categories with more likes
labels = [id2category[str(x)] for x in likes_data_unique[:,0]]
sizes = likes_data_unique[:,1]
explode = [greater(x,max(likes_data_unique[:,1])) for x in likes_data_unique[:,1]]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
```
## ***Preparación del dataset para el análisis correspondiente a la fecha de publicación de los videos***
```
# Split the complete dataset by the publish time in order to make the analysis
elem_publish, idxs_publish = np.unique([x[5:7] for x in rows[:,5]], return_index=True)
clusters_publish_month = np.split(rows, idxs_publish[1:])
# Define some important variables to make the analysis
mean_publish_month = []
months = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
# Get the average value of views by the publish month of the videos
for i in range(len(clusters_publish_month)):
buff_array = clusters_publish_month[i]
buff = [int(elem_publish[i]), np.mean(buff_array[:,7].astype(int))]
mean_publish_month.append(buff)
mean_publish_month = np.array(mean_publish_month)
mean_publish_month = mean_publish_month[mean_publish_month[:, -1] > 0]
```
## ***Graficamos la información resultante***
```
# Plot the results
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(
[months[x] for x in mean_publish_month[:,0]],
mean_publish_month[:,1],
color ='maroon',
width = 0.4
)
plt.xlabel("Mes de publicación del video")
plt.ylabel("Promedio de vistas")
plt.title("Relación entre el mes de publicación con las vistas de los videos")
plt.show()
```
## ***Preparar los datasets para los diferentes analisis***
```
"""
The input variables of the model are gonna be the following:
- category_id
- trending_day
- trending_month
- trending_year
- dislikes
and the output is gonna be the variable 'likes'
"""
# Get the input's array
category_input = rows[:,4].reshape((len(rows), 1)).astype(int)
dislike_input = rows[:,9].reshape((len(rows), 1)).astype(int)
trending = rows[:,1]
day_input = np.array([int(x[6:]) for x in trending]).reshape((len(rows), 1))
month_input = np.array([int(x[3:5]) for x in trending]).reshape((len(rows), 1))
year_input = np.array([int(x[:2]) for x in trending]).reshape((len(rows), 1))
# Inputs for training the model (The regression models)
inputs = np.hstack((
category_input,
day_input,
month_input,
year_input,
dislike_input
))
# Get the desired array in order to training the model (The regression models)
desired_views = rows[:,7].astype(int)
desired_likes = rows[:,8].astype(int)
```
## ***Entrenar el modelo de los likes y los views***
```
# External imports
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression
# Build a linear regression model - views
reg_view = LinearRegression().fit(inputs, desired_views)
print("LINEAR REGRESSION MODEL INFORMATION - VIEWS")
print(f"The R^2 es: {reg_view.score(inputs, desired_views)}")
print(f"The coefficients are: {reg_view.coef_}")
print(f"The intercept is: {reg_view.intercept_}")
# Build a linear regression model - likes
reg_like = LinearRegression().fit(inputs, desired_likes)
print("LINEAR REGRESSION MODEL INFORMATION - LIKES")
print(f"The R^2 es: {reg_like.score(inputs, desired_likes)}")
print(f"The coefficients are: {reg_like.coef_}")
print(f"The intercept is: {reg_like.intercept_}")
# External imports
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
# Analysis of the model (linear regression)
def plot_learning_curve(
estimator,
title,
X,
y,
axes=None,
ylim=None,
cv=None,
n_jobs=None,
train_sizes=np.linspace(0.1, 1.0, 5),
):
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(
estimator,
X,
y,
cv=cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(
train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
axes[0].fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
axes[0].plot(
train_sizes, train_scores_mean, "o-", color="r", label="Training score"
)
axes[0].plot(
train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score"
)
axes[0].legend(loc="best")
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, "o-")
axes[1].fill_between(
train_sizes,
fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std,
alpha=0.1,
)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
# Plot fit_time vs score
fit_time_argsort = fit_times_mean.argsort()
fit_time_sorted = fit_times_mean[fit_time_argsort]
test_scores_mean_sorted = test_scores_mean[fit_time_argsort]
test_scores_std_sorted = test_scores_std[fit_time_argsort]
axes[2].grid()
axes[2].plot(fit_time_sorted, test_scores_mean_sorted, "o-")
axes[2].fill_between(
fit_time_sorted,
test_scores_mean_sorted - test_scores_std_sorted,
test_scores_mean_sorted + test_scores_std_sorted,
alpha=0.1,
)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
# Define the variables to make the plots
fig, axes = plt.subplots(3, 2, figsize=(10, 15))
title = "Linear Regression - views"
# Cross validation with 50 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=50, test_size=0.2, random_state=0)
estimator = LinearRegression()
plot_learning_curve(
estimator, title, inputs, desired_views, axes=axes[:, 0], cv=cv, n_jobs=4
)
title = "Linear Regression - likes"
# Cross validation with 50 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=50, test_size=0.2, random_state=0)
estimator = LinearRegression()
plot_learning_curve(
estimator, title, inputs, desired_likes, axes=axes[:, 1], cv=cv, n_jobs=4
)
plt.show()
```
|
github_jupyter
|
# Clonanmos el repositorio fuente
!git clone https://github.com/Elkinmt19/data-engineer-dojo.git
# Comprobar la version de python y descargar las librerias necesarias
%cd data-engineer-dojo
!python3 --version
!pip3 install -r requirements.txt
%cd data_engineer_test
# Ejecutando el archivo "filter_datset.py" se quita el ruido del dataset y se crea un nuevo archivo limpio
!python3 filter_dataset.py
# Built-in imports
import os
import sys
import csv
# External imports
import numpy as np
# My Own imports
import get_path_dataset_folder as gpaf
# Get dataset folder in repo for the samples
DATASET_FOLDER = gpaf.get_dataset_folder_path()
def get_path_file(file):
"""
This is a simple python function that gives us the absolute path of the
dataset's file according to the name of the file.
"""
path_dataset_file = os.path.join(
DATASET_FOLDER,
file
)
return path_dataset_file
# Get the data from the csv file
with open(get_path_file("USvideos_clean.csv"), 'r', encoding="utf8") as file:
csvreader = csv.reader(file)
features = next(csvreader)
features = features[:11]
rows = np.array([[None]*len(features)])
for row in csvreader:
rows = np.concatenate((rows,np.array([row[:11]])),axis=0)
rows = np.delete(rows, 0, 0)
file.close()
# Mostramos las variables que se pueden obtener del archivo
print(f"Tenemos {len(features)} variables")
print(features)
# Built-in imports
import json
# Get the data from the json file
with open(get_path_file("US_category_id.json"), "r") as read_it:
data = json.load(read_it)
elements = data["items"]
features_category = ["id", "channelId", "title"]
rows_category = []
for it in elements:
row_buff = [it["id"], it["snippet"]["channelId"], it["snippet"]["title"]]
rows_category.append(row_buff)
read_it.close()
rows_category = np.array(rows_category)
id2category = {rows_category[i,0]: rows_category[i,2] for i in range(len(rows_category[:,0]))}
# Mostramos las variables que se pueden obtener del archivo
print(f"Tenemos {len(features_category)} variables")
print(features_category)
# Split the complete dataset by the trending data in order to make the analysis
clusters = np.split(
rows,
np.where(np.diff(np.array([x.replace('.', '') for x in rows[:,1]]).astype(int)))[0]+1
)
# Comprobamos que el numero de clusters sea el correcto
print(f"Se tienen {len(clusters)} dias")
# Create the function tu make the video's category analysis
def category_views_likes_analysis(info_featu):
# Split the dataset by the category_id
elem , idxs = np.unique(info_featu[:,4], return_index=True)
clusters = np.split(info_featu, idxs[1:])
category_data = []
for i in range(len(clusters)):
buff = [int(elem[i]), sum(clusters[i][:,7].astype(int)), sum(clusters[i][:,8].astype(int))]
category_data.append(buff)
category_data = np.array(category_data).astype(int)
return category_data
# Define a variable to save all the information about the category of the videos
category_data_date = []
views_max = []
likes_max = []
for dt in clusters:
category_data_date.append(category_views_likes_analysis(dt))
for cid in category_data_date:
# Sort the arrays by the amoung of views and likes
view_idx = cid[:,1].argmax(axis=0)
like_idx = cid[:,2].argmax(axis=0)
buff_view = [cid[view_idx,0], cid[view_idx,1]]
buff_like = [cid[like_idx,0], cid[like_idx,1]]
views_max.append(buff_view)
likes_max.append(buff_like)
views_max = np.array(views_max)
likes_max = np.array(likes_max)
# Get the frequency of the category_id with the max number of views and likes
unique_views, counts_views = np.unique(views_max[:,0], return_counts=True)
unique_likes, counts_likes = np.unique(likes_max[:,0], return_counts=True)
views_data_unique = np.asarray((unique_views, counts_views)).T
likes_data_unique = np.asarray((unique_likes, counts_likes)).T
# External imports
import matplotlib.pyplot as plt
def greater(el, maxi):
if (el == maxi):
return 0.1
return 0
# Plot the categories with more views
labels = [id2category[str(x)] for x in views_data_unique[:,0]]
sizes = views_data_unique[:,1]
explode = [greater(x,max(views_data_unique[:,1])) for x in views_data_unique[:,1]]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
# Plot the categories with more likes
labels = [id2category[str(x)] for x in likes_data_unique[:,0]]
sizes = likes_data_unique[:,1]
explode = [greater(x,max(likes_data_unique[:,1])) for x in likes_data_unique[:,1]]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal')
plt.show()
# Split the complete dataset by the publish time in order to make the analysis
elem_publish, idxs_publish = np.unique([x[5:7] for x in rows[:,5]], return_index=True)
clusters_publish_month = np.split(rows, idxs_publish[1:])
# Define some important variables to make the analysis
mean_publish_month = []
months = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December"
}
# Get the average value of views by the publish month of the videos
for i in range(len(clusters_publish_month)):
buff_array = clusters_publish_month[i]
buff = [int(elem_publish[i]), np.mean(buff_array[:,7].astype(int))]
mean_publish_month.append(buff)
mean_publish_month = np.array(mean_publish_month)
mean_publish_month = mean_publish_month[mean_publish_month[:, -1] > 0]
# Plot the results
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(
[months[x] for x in mean_publish_month[:,0]],
mean_publish_month[:,1],
color ='maroon',
width = 0.4
)
plt.xlabel("Mes de publicación del video")
plt.ylabel("Promedio de vistas")
plt.title("Relación entre el mes de publicación con las vistas de los videos")
plt.show()
"""
The input variables of the model are gonna be the following:
- category_id
- trending_day
- trending_month
- trending_year
- dislikes
and the output is gonna be the variable 'likes'
"""
# Get the input's array
category_input = rows[:,4].reshape((len(rows), 1)).astype(int)
dislike_input = rows[:,9].reshape((len(rows), 1)).astype(int)
trending = rows[:,1]
day_input = np.array([int(x[6:]) for x in trending]).reshape((len(rows), 1))
month_input = np.array([int(x[3:5]) for x in trending]).reshape((len(rows), 1))
year_input = np.array([int(x[:2]) for x in trending]).reshape((len(rows), 1))
# Inputs for training the model (The regression models)
inputs = np.hstack((
category_input,
day_input,
month_input,
year_input,
dislike_input
))
# Get the desired array in order to training the model (The regression models)
desired_views = rows[:,7].astype(int)
desired_likes = rows[:,8].astype(int)
# External imports
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression
# Build a linear regression model - views
reg_view = LinearRegression().fit(inputs, desired_views)
print("LINEAR REGRESSION MODEL INFORMATION - VIEWS")
print(f"The R^2 es: {reg_view.score(inputs, desired_views)}")
print(f"The coefficients are: {reg_view.coef_}")
print(f"The intercept is: {reg_view.intercept_}")
# Build a linear regression model - likes
reg_like = LinearRegression().fit(inputs, desired_likes)
print("LINEAR REGRESSION MODEL INFORMATION - LIKES")
print(f"The R^2 es: {reg_like.score(inputs, desired_likes)}")
print(f"The coefficients are: {reg_like.coef_}")
print(f"The intercept is: {reg_like.intercept_}")
# External imports
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
# Analysis of the model (linear regression)
def plot_learning_curve(
estimator,
title,
X,
y,
axes=None,
ylim=None,
cv=None,
n_jobs=None,
train_sizes=np.linspace(0.1, 1.0, 5),
):
if axes is None:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title(title)
if ylim is not None:
axes[0].set_ylim(*ylim)
axes[0].set_xlabel("Training examples")
axes[0].set_ylabel("Score")
train_sizes, train_scores, test_scores, fit_times, _ = learning_curve(
estimator,
X,
y,
cv=cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
return_times=True,
)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
fit_times_mean = np.mean(fit_times, axis=1)
fit_times_std = np.std(fit_times, axis=1)
# Plot learning curve
axes[0].grid()
axes[0].fill_between(
train_sizes,
train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std,
alpha=0.1,
color="r",
)
axes[0].fill_between(
train_sizes,
test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std,
alpha=0.1,
color="g",
)
axes[0].plot(
train_sizes, train_scores_mean, "o-", color="r", label="Training score"
)
axes[0].plot(
train_sizes, test_scores_mean, "o-", color="g", label="Cross-validation score"
)
axes[0].legend(loc="best")
# Plot n_samples vs fit_times
axes[1].grid()
axes[1].plot(train_sizes, fit_times_mean, "o-")
axes[1].fill_between(
train_sizes,
fit_times_mean - fit_times_std,
fit_times_mean + fit_times_std,
alpha=0.1,
)
axes[1].set_xlabel("Training examples")
axes[1].set_ylabel("fit_times")
axes[1].set_title("Scalability of the model")
# Plot fit_time vs score
fit_time_argsort = fit_times_mean.argsort()
fit_time_sorted = fit_times_mean[fit_time_argsort]
test_scores_mean_sorted = test_scores_mean[fit_time_argsort]
test_scores_std_sorted = test_scores_std[fit_time_argsort]
axes[2].grid()
axes[2].plot(fit_time_sorted, test_scores_mean_sorted, "o-")
axes[2].fill_between(
fit_time_sorted,
test_scores_mean_sorted - test_scores_std_sorted,
test_scores_mean_sorted + test_scores_std_sorted,
alpha=0.1,
)
axes[2].set_xlabel("fit_times")
axes[2].set_ylabel("Score")
axes[2].set_title("Performance of the model")
return plt
# Define the variables to make the plots
fig, axes = plt.subplots(3, 2, figsize=(10, 15))
title = "Linear Regression - views"
# Cross validation with 50 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=50, test_size=0.2, random_state=0)
estimator = LinearRegression()
plot_learning_curve(
estimator, title, inputs, desired_views, axes=axes[:, 0], cv=cv, n_jobs=4
)
title = "Linear Regression - likes"
# Cross validation with 50 iterations to get smoother mean test and train
# score curves, each time with 20% data randomly selected as a validation set.
cv = ShuffleSplit(n_splits=50, test_size=0.2, random_state=0)
estimator = LinearRegression()
plot_learning_curve(
estimator, title, inputs, desired_likes, axes=axes[:, 1], cv=cv, n_jobs=4
)
plt.show()
| 0.438064 | 0.889864 |
This example shows how to:
1. Load a counts matrix (10X Chromium data from human peripheral blood cells)
2. Run the default Scrublet pipeline
3. Check that doublet predictions make sense
```
import sys
sys.path
%matplotlib inline
import scrublet as scr
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
plt.rc('font', size=14)
plt.rcParams['pdf.fonttype'] = 42
```
#### Load counts matrix and gene list
Load the raw counts matrix as a scipy sparse matrix with cells as rows and genes as columns.
```
input_dir = '/home/ubuntu/velocyto/Hs_Trachea/Apr3_v3chemistry/GA23wk/'
counts_matrix = scipy.io.mmread(input_dir + '/matrix.mtx').T.tocsc()
genes = np.array(scr.load_genes(input_dir + 'features.tsv', delimiter='\t', column=1))
print('Counts matrix shape: {} rows, {} columns'.format(counts_matrix.shape[0], counts_matrix.shape[1]))
print('Number of genes in gene list: {}'.format(len(genes)))
```
#### Initialize Scrublet object
The relevant parameters are:
- *expected_doublet_rate*: the expected fraction of transcriptomes that are doublets, typically 0.05-0.1. Results are not particularly sensitive to this parameter. For this example, the expected doublet rate comes from the Chromium User Guide: https://support.10xgenomics.com/permalink/3vzDu3zQjY0o2AqkkkI4CC
- *sim_doublet_ratio*: the number of doublets to simulate, relative to the number of observed transcriptomes. This should be high enough that all doublet states are well-represented by simulated doublets. Setting it too high is computationally expensive. The default value is 2, though values as low as 0.5 give very similar results for the datasets that have been tested.
- *n_neighbors*: Number of neighbors used to construct the KNN classifier of observed transcriptomes and simulated doublets. The default value of `round(0.5*sqrt(n_cells))` generally works well.
```
scrub = scr.Scrublet(counts_matrix, expected_doublet_rate=0.14)
```
#### Run the default pipeline, which includes:
1. Doublet simulation
2. Normalization, gene filtering, rescaling, PCA
3. Doublet score calculation
4. Doublet score threshold detection and doublet calling
```
doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=2,
min_cells=3,
min_gene_variability_pctl=85,
n_prin_comps=30)
```
#### Plot doublet score histograms for observed transcriptomes and simulated doublets
The simulated doublet histogram is typically bimodal. The left mode corresponds to "embedded" doublets generated by two cells with similar gene expression. The right mode corresponds to "neotypic" doublets, which are generated by cells with distinct gene expression (e.g., different cell types) and are expected to introduce more artifacts in downstream analyses. Scrublet can only detect neotypic doublets.
To call doublets vs. singlets, we must set a threshold doublet score, ideally at the minimum between the two modes of the simulated doublet histogram. `scrub_doublets()` attempts to identify this point automatically and has done a good job in this example. However, if automatic threshold detection doesn't work well, you can adjust the threshold with the `call_doublets()` function. For example:
```python
scrub.call_doublets(threshold=0.25)
```
```
scrub.plot_histogram();
```
#### Get 2-D embedding to visualize the results
```
print('Running UMAP...')
scrub.set_embedding('UMAP', scr.get_umap(scrub.manifold_obs_, 10, min_dist=0.3))
# # Uncomment to run tSNE - slow
# print('Running tSNE...')
# scrub.set_embedding('tSNE', scr.get_tsne(scrub.manifold_obs_, angle=0.9))
# # Uncomment to run force layout - slow
# print('Running ForceAtlas2...')
# scrub.set_embedding('FA', scr.get_force_layout(scrub.manifold_obs_, n_neighbors=5. n_iter=1000))
print('Done.')
```
#### Plot doublet predictions on 2-D embedding
Predicted doublets should co-localize in distinct states.
```
scrub.plot_embedding('UMAP', order_points=True);
# scrub.plot_embedding('tSNE', order_points=True);
# scrub.plot_embedding('FA', order_points=True);
print(doublet_scores)
print(predicted_doublets)
sum(predicted_doublets)
len(predicted_doublets)
cwd = os.getcwd()
print (cwd)
doublet_scores.tofile('GA23wk_Apr3_v3_doubletScore.csv',sep=',',format='%s')
min(doublet_scores[predicted_doublets])
```
|
github_jupyter
|
import sys
sys.path
%matplotlib inline
import scrublet as scr
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
plt.rc('font', size=14)
plt.rcParams['pdf.fonttype'] = 42
input_dir = '/home/ubuntu/velocyto/Hs_Trachea/Apr3_v3chemistry/GA23wk/'
counts_matrix = scipy.io.mmread(input_dir + '/matrix.mtx').T.tocsc()
genes = np.array(scr.load_genes(input_dir + 'features.tsv', delimiter='\t', column=1))
print('Counts matrix shape: {} rows, {} columns'.format(counts_matrix.shape[0], counts_matrix.shape[1]))
print('Number of genes in gene list: {}'.format(len(genes)))
scrub = scr.Scrublet(counts_matrix, expected_doublet_rate=0.14)
doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=2,
min_cells=3,
min_gene_variability_pctl=85,
n_prin_comps=30)
scrub.call_doublets(threshold=0.25)
scrub.plot_histogram();
print('Running UMAP...')
scrub.set_embedding('UMAP', scr.get_umap(scrub.manifold_obs_, 10, min_dist=0.3))
# # Uncomment to run tSNE - slow
# print('Running tSNE...')
# scrub.set_embedding('tSNE', scr.get_tsne(scrub.manifold_obs_, angle=0.9))
# # Uncomment to run force layout - slow
# print('Running ForceAtlas2...')
# scrub.set_embedding('FA', scr.get_force_layout(scrub.manifold_obs_, n_neighbors=5. n_iter=1000))
print('Done.')
scrub.plot_embedding('UMAP', order_points=True);
# scrub.plot_embedding('tSNE', order_points=True);
# scrub.plot_embedding('FA', order_points=True);
print(doublet_scores)
print(predicted_doublets)
sum(predicted_doublets)
len(predicted_doublets)
cwd = os.getcwd()
print (cwd)
doublet_scores.tofile('GA23wk_Apr3_v3_doubletScore.csv',sep=',',format='%s')
min(doublet_scores[predicted_doublets])
| 0.123524 | 0.979136 |
# 7.5. 批量归一化
:label:`sec_batch_norm`
训练深层神经网络是十分困难的,特别是在较短的时间内使他们收敛更加棘手。
在本节中,我们将介绍 *批量归一化*(batch normalization) :cite:`Ioffe.Szegedy.2015` ,这是一种流行且有效的技术,可持续加速深层网络的收敛速度。
再结合在 :numref:`sec_resnet` 中将介绍的残差块,批量归一化使得研究人员能够训练 100 层以上的网络。
## 7.5.1. 训练深层网络
为什么需要批量归一化层呢?让我们来回顾一下训练神经网络时出现的一些实际挑战。
首先,数据预处理的方式通常会对最终结果产生巨大影响。
回想一下我们应用多层感知机来预测房价的例子( :numref:`sec_kaggle_house` )。
使用真实数据时,我们的第一步是标准化输入特征,使其平均值为0,方差为1。
直观地说,这种标准化可以很好地与我们的优化器配合使用,因为它可以将参数的量级进行统一。
第二,对于典型的多层感知机或卷积神经网络。当我们训练时,中间层中的变量(例如,多层感知机中的仿射变换输出)可能具有更广的变化范围:不论是沿着从输入到输出的层,跨同一层中的单元,或是随着时间的推移,模型参数的随着训练更新变幻莫测。
批量归一化的发明者非正式地假设,这些变量分布中的这种偏移可能会阻碍网络的收敛。
直观地说,我们可能会猜想,如果一个层的可变值是另一层的 100 倍,这可能需要对学习率进行补偿调整。
第三,更深层的网络很复杂,容易过拟合。
这意味着正则化变得更加重要。
批量归一化应用于单个可选层(也可以应用到所有层),其原理如下:在每次训练迭代中,我们首先归一化输入,即通过减去其均值并除以其标准差,其中两者均基于当前小批量处理。
接下来,我们应用比例系数和比例偏移。
正是由于这个基于*批量*统计的*标准化*,才有了*批量归一化*的名称。
请注意,如果我们尝试使用大小为 1 的小批量应用批量归一化,我们将无法学到任何东西。
这是因为在减去均值之后,每个隐藏单元将为 0。
所以,只有使用足够大的小批量,批量归一化这种方法才是有效且稳定的。
请注意,在应用批量归一化时,批量大小的选择可能比没有批量归一化时更重要。
从形式上来说,用 $\mathbf{x} \in \mathcal{B}$ 表示一个来自小批量 $\mathcal{B}$ 的输入,批量归一化 $\mathrm{BN}$ 根据以下表达式转换 $\mathbf{x}$:
$$\mathrm{BN}(\mathbf{x}) = \boldsymbol{\gamma} \odot \frac{\mathbf{x} - \hat{\boldsymbol{\mu}}_\mathcal{B}}{\hat{\boldsymbol{\sigma}}_\mathcal{B}} + \boldsymbol{\beta}.$$
:eqlabel:`eq_batchnorm`
在 :eqref:`eq_batchnorm` 中,$\hat{\boldsymbol{\mu}}_\mathcal{B}$ 是样本均值,$\hat{\boldsymbol{\sigma}}_\mathcal{B}$ 是小批量 $\mathcal{B}$ 的样本标准差。
应用标准化后,生成的小批量的平均值为 0 和单位方差为 1。
由于单位方差(与其他一些魔法数)是一个任意的选择,因此我们通常包含
*拉伸参数*(scale) $\boldsymbol{\gamma}$ 和 *偏移参数*(shift) $\boldsymbol{\beta}$,它们的形状与 $\mathbf{x}$ 相同。
请注意,$\boldsymbol{\gamma}$ 和 $\boldsymbol{\beta}$ 是需要与其他模型参数一起学习的参数。
由于在训练过程中,中间层的变化幅度不能过于剧烈,而批量归一化将每一层主动居中,并将它们重新调整为给定的平均值和大小(通过 $\hat{\boldsymbol{\mu}}_\mathcal{B}$ 和 ${\hat{\boldsymbol{\sigma}}_\mathcal{B}}$)。
从形式上来看,我们计算出 :eqref:`eq_batchnorm` 中的 $\hat{\boldsymbol{\mu}}_\mathcal{B}$ 和 ${\hat{\boldsymbol{\sigma}}_\mathcal{B}}$,如下所示:
$$\begin{aligned} \hat{\boldsymbol{\mu}}_\mathcal{B} &= \frac{1}{|\mathcal{B}|} \sum_{\mathbf{x} \in \mathcal{B}} \mathbf{x},\\
\hat{\boldsymbol{\sigma}}_\mathcal{B}^2 &= \frac{1}{|\mathcal{B}|} \sum_{\mathbf{x} \in \mathcal{B}} (\mathbf{x} - \hat{\boldsymbol{\mu}}_{\mathcal{B}})^2 + \epsilon.\end{aligned}$$
请注意,我们在方差估计值中添加一个小常量 $\epsilon > 0$,以确保我们永远不会尝试除以零,即使在经验方差估计值可能消失的情况下也是如此。估计值 $\hat{\boldsymbol{\mu}}_\mathcal{B}$ 和 ${\hat{\boldsymbol{\sigma}}_\mathcal{B}}$ 通过使用平均值和方差的噪声(noise)估计来抵消缩放问题。
你可能会认为这种噪声是一个问题,而事实上它是有益的。
事实证明,这是深度学习中一个反复出现的主题。
由于理论上尚未明确表述的原因,优化中的各种噪声源通常会导致更快的训练和较少的过拟合:这种变化似乎是正则化的一种形式。
在一些初步研究中, :cite:`Teye.Azizpour.Smith.2018` 和 :cite:`Luo.Wang.Shao.ea.2018` 分别将批量归一化的性质与贝叶斯先验相关联。
这些理论揭示了为什么批量归一化最适应 $50 \sim 100$ 范围中的中等小批量尺寸的难题。
另外,批量归一化图层在”训练模式“(通过小批量统计数据归一化)和“预测模式”(通过数据集统计归一化)中的功能不同。
在训练过程中,我们无法得知使用整个数据集来估计平均值和方差,所以只能根据每个小批次的平均值和方差不断训练模型。
而在预测模式下,可以根据整个数据集精确计算批量归一化所需的平均值和方差。
现在,我们了解一下批量归一化在实践中是如何工作的。
## 7.5.2. 批量归一化层
回想一下,批量归一化和其他图层之间的一个关键区别是,由于批量归一化在完整的小批次上运行,因此我们不能像以前在引入其他图层时那样忽略批处理的尺寸大小。
我们在下面讨论这两种情况:全连接层和卷积层,他们的批量归一化实现略有不同。
### 7.5.2.1. 全连接层
通常,我们将批量归一化层置于全连接层中的仿射变换和激活函数之间。
设全连接层的输入为 u ,权重参数和偏置参数分别为 $\mathbf{W}$ 和 $\mathbf{b}$ ,激活函数为 $\phi$ ,批量归一化的运算符为 $\mathrm{BN}$ 。
那么,使用批量归一化的全连接层的输出的计算详情如下:
$$\mathbf{h} = \phi(\mathrm{BN}(\mathbf{W}\mathbf{x} + \mathbf{b}) ).$$
回想一下,均值和方差是在应用变换的"相同"小批量上计算的。
### 7.5.2.2. 卷积层
同样,对于卷积层,我们可以在卷积层之后和非线性激活函数之前应用批量归一化。
当卷积有多个输出通道时,我们需要对这些通道的“每个”输出执行批量归一化,每个通道都有自己的拉伸(scale)和偏移(shift)参数,这两个参数都是标量。
假设我们的微批次包含 $m$ 个示例,并且对于每个通道,卷积的输出具有高度 $p$ 和宽度 $q$。
那么对于卷积层,我们在每个输出通道的 $m \cdot p \cdot q$ 个元素上同时执行每个批量归一化。
因此,在计算平均值和方差时,我们会收集所有空间位置的值,然后在给定通道内应用相同的均值和方差,以便在每个空间位置对值进行归一化。
### 7.5.2.3. 预测过程中的批量归一化
正如我们前面提到的,批量归一化在训练模式和预测模式下的行为通常不同。
首先,将训练好的模型用于预测时,我们不再需要样本均值中的噪声以及在微批次上估计每个小批次产生的样本方差了。
其次,例如,我们可能需要使用我们的模型对逐个样本进行预测。
一种常用的方法是通过移动平均估算整个训练数据集的样本均值和方差,并在预测时使用它们得到确定的输出。
可见,和 dropout 一样,批量归一化层在训练模式和预测模式下的计算结果也是不一样的。
## (**7.5.3. 从零实现**)
下面,我们从头开始实现一个具有张量的批量归一化层。
```
import numpy as np
import paddle
import paddle.nn as nn
import paddle.fluid.layers as layers
def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):
# 训练模式还与预测模式的BN处理不同
if not is_training:
# 如果是在预测模式下,直接使用传入的移动平均所得的均值和方差
X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
# 使用全连接层的情况,计算特征维上的均值和方差
mean = paddle.mean(X)
var = paddle.mean(((X - mean) ** 2))
else:
# 使用二维卷积层的情况,计算通道维上(axis=1)的均值和方差。这里我们需要保持
# X的形状以便后面可以做广播运算
mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)
var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)
# 训练模式下用当前的均值和方差做标准化
X_hat = (X - mean) / (var + eps) ** 0.5
# 更新移动平均的均值和方差
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta # 拉伸和偏移
return Y, moving_mean, moving_var
```
我们现在可以[**创建一个正确的 `BatchNorm` 图层**]。
这个层将保持适当的参数:拉伸 `gamma` 和偏移 `beta`, 这两个参数将在训练过程中更新。
此外,我们的图层将保存均值和方差的移动平均值,以便在模型预测期间随后使用。
撇开算法细节,注意我们实现图层的基础设计模式。
通常情况下,我们用一个单独的函数定义其数学原理,比如说 `batch_norm`。
然后,我们将此功能集成到一个自定义层中,其代码主要处理簿记问题,例如将数据移动到训练设备(如 GPU)、分配和初始化任何必需的变量、跟踪移动平均线(此处为均值和方差)等。
为了方便起见,我们并不担心在这里自动推断输入形状,因此我们需要指定整个特征的数量。
不用担心,深度学习框架中的批量归一化 API 将为我们解决上述问题,我们稍后将展示这一点。
```
class BatchNorm(nn.Layer):
def __init__(self, num_features, num_dims=4):
super(BatchNorm, self).__init__()
if num_dims == 2:
shape = (1, num_features)
else:
shape = (1, num_features, 1, 1)
# 参与求梯度和迭代的拉伸和偏移参数,分别初始化成1和0
self.gamma = self.create_parameter(
attr=None,
shape=shape,
dtype='float32',
is_bias=False,
default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))
self.beta = self.create_parameter(
attr=None,
shape=shape,
dtype='float32',
is_bias=False,
default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))
self.moving_mean = paddle.zeros(shape=shape, dtype='float32')
self.moving_var = paddle.zeros(shape=shape, dtype='float32')
def forward(self, X):
# 保存更新过的moving_mean和moving_var
Y, self.moving_mean, self.moving_var = batch_norm(
X, self.gamma, self.beta, self.moving_mean,
self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)
return Y
```
## 7.5.4. 使用批量归一化层的 LeNet
为了更好理解如何[**应用`BatchNorm`**],下面我们将其应用(**于LeNet模型**)( :numref:`sec_lenet` )。
回想一下,批量归一化是在卷积层或全连接层之后、相应的激活函数之前应用的。
```
class LeNet(nn.Layer):
def __init__(self, num_dim=1, num_class=10):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2D(num_dim, 6, 5)
self.bn1 = BatchNorm(6, num_dims=4)
self.pool1 = nn.MaxPool2D(2, stride=2)
self.conv2 = nn.Conv2D(6, 16, 5)
self.bn2 = BatchNorm(16, num_dims=4)
self.pool2 = nn.MaxPool2D(2, stride=2)
self.fc1 = nn.Linear(256, 120)
self.bn3 = BatchNorm(120, num_dims=2)
self.fc2 = nn.Linear(120, 84)
self.bn4 = BatchNorm(84, num_dims=2)
self.fc3 = nn.Linear(84, num_class)
def forward(self, X):
Y = self.conv1(X)
Y = layers.sigmoid(self.bn1(Y))
Y = self.pool1(Y)
Y = self.conv2(Y)
Y = layers.sigmoid(self.bn2(Y))
Y = self.pool2(Y)
Y = paddle.flatten(Y, start_axis=1)
Y = self.fc1(Y)
Y = layers.sigmoid(self.bn3(Y))
Y = self.fc2(Y)
Y = layers.sigmoid(self.bn4(Y))
Y = self.fc3(Y)
return Y
```
和以前一样,我们将[**在Fashion-MNIST数据集上训练网络**]。
这个代码与我们第一次训练 LeNet( :numref:`sec_lenet` )时几乎完全相同,主要区别在于学习率大得多。
```
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import FashionMNIST
# 数据集处理
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5]),
])
train_dataset = FashionMNIST(mode='train', transform=transform)
val_dataset = FashionMNIST(mode='test', transform=transform)
# 模型定义
model = paddle.Model(LeNet(num_class=10))
# 设置训练模型所需的optimizer, loss, metric
model.prepare(
paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()),
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
# 启动训练、评估
model.fit(train_dataset, val_dataset, epochs=2, batch_size=64, log_freq=200)
```
让我们来看看从第一个批量归一化层中学到的[**拉伸参数 `gamma` 和偏移参数 `beta`**]。
```
param = model.parameters()
print('gamma:', param[2].numpy().reshape(-1))
print('beta:', param[3].numpy().reshape(-1))
```
## [**7.5.5. 简明实现**]
除了使用我们刚刚定义的 `BatchNorm` ,我们也可以直接使用深度学习框架中定义的 `BatchNorm` 。
该代码看起来几乎与我们上面的代码相同。
```
class LeNet(nn.Layer):
def __init__(self, num_dim=1, num_class=10):
super(LeNet, self).__init__()
model_conv = [
nn.Conv2D(num_dim, 6, 5),
# nn.BatchNorm(6),
nn.Sigmoid(),
nn.MaxPool2D(2, stride=2),
nn.Conv2D(6, 16, 5),
# nn.BatchNorm(16),
nn.MaxPool2D(2, stride=2),
nn.Sigmoid(),
]
self.model_conv = nn.Sequential(*model_conv)
model_fc = [
nn.Linear(256, 120),
# nn.BatchNorm(120),
nn.Sigmoid(),
nn.Linear(120, 84),
# nn.BatchNorm(84),
nn.Sigmoid(),
nn.Linear(84, num_class),
]
self.model_fc = nn.Sequential(*model_fc)
def forward(self, X):
Y = self.model_conv(X)
Y = paddle.flatten(Y, start_axis=1)
Y = self.model_fc(Y)
return Y
```
下面,我们[**使用相同超参数来训练模型**]。
请注意,通常高级 API 变体运行速度快得多,因为它的代码已编译为 C++ 或 CUDA,而我们的自定义代码由 Python 实现。
```
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5]),
])
train_dataset = FashionMNIST(mode='train', transform=transform)
val_dataset = FashionMNIST(mode='test', transform=transform)
# 模型定义
model = paddle.Model(LeNet(num_class=10))
# 设置训练模型所需的optimizer, loss, metric
model.prepare(
paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()),
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
# 启动训练、评估
model.fit(train_dataset, val_dataset, epochs=2, batch_size=64, log_freq=200)
```
## 7.5.6. 争议
直观地说,批量归一化被认为可以使优化更加平滑。
然而,我们必须小心区分投机直觉和对我们观察到的现象的真实解释。
回想一下,我们甚至不知道为什么简单的神经网络(多层感知机和传统的卷积神经网络)为什么如此有效。
即使在 dropout 和权重衰减的情况下,它们仍然非常灵活,因此无法通过传统的学习理论泛化保证来解释它们是否能够概括到看不见的数据。
在提出批量归一化的论文中,作者除了介绍了其应用,还解释了其原理:通过减少 *内部协变量偏移*(internal covariate shift)。
据推测,作者所说的“内部协变量转移”类似于上述的投机直觉,即变量值的分布在训练过程中会发生变化。
然而,这种解释有两个问题:
i)这种偏移与严格定义的 *协变量偏移*(covariate shift)非常不同,所以这个名字用词不当。
ii)这种解释只提供了一种不明确的直觉,但留下了一个有待后续挖掘的问题:为什么这项技术如此有效?。
本书旨在传达实践者用来发展深层神经网络的直觉。
然而,重要的是将这些指导性直觉与既定的科学事实区分开来。
最终,当你掌握了这些方法,并开始撰写自己的研究论文时,你会希望清楚地区分技术和直觉。
随着批量归一化的普及,“内部协变量偏移”的解释反复出现在技术文献的辩论,特别是关于“如何展示机器学习研究”的更广泛的讨论中。
Ali Rahimi 在接受 2017 年 NeurIPS 大会的“接受时间考验奖”(Test of Time Award)时发表了一篇令人难忘的演讲。他将“内部协变量转移”作为焦点,将现代深度学习的实践比作炼金术。
他对该示例进行了详细回顾 :cite:`Lipton.Steinhardt.2018` ,概述了机器学习中令人不安的趋势。
此外,一些作者对批量归一化的成功提出了另一种解释:在某些方面,批量归一化的表现出与原始论文 :cite:`Santurkar.Tsipras.Ilyas.ea.2018` 中声称的行为是相反的。
然而,与技术机器学习文献中成千上万类似模糊的声明相比,内部协变量偏移没有什么更值得批评。
很可能,它作为这些辩论的焦点而产生共鸣,要归功于它对目标受众的广泛认可。
批量归一化已经证明是一种不可或缺的方法,适用于几乎所有图像分类器,在学术界获得了数万引用。
## 7.5.7. 小结
* 在模型训练过程中,批量归一化利用小批量的均值和标准差,不断调整神经网络的中间输出,使整个神经网络各层的中间输出值更加稳定。
* 批量归一化在全连接层和卷积层的使用略有不同。
* 批量归一化层和 dropout 层一样,在训练模式和预测模式下计算不同。
* 批量归一化有许多有益的副作用,主要是正则化。另一方面,”减少内部协变量偏移“的原始动机似乎不是一个有效的解释。
## 7.5.8. 练习
1. 在使用批量归一化之前,我们是否可以从全连接层或卷积层中删除偏置参数?为什么?
1. 比较LeNet在使用和不使用批量归一化情况下的学习率。
1. 绘制训练和测试准确度的提高。
1. 你的学习率有多高?
1. 我们是否需要在每个层中进行批量归一化?尝试一下?
1. 你可以通过批量归一化来替换 dropout 吗?行为如何改变?
1. 确定参数 `beta` 和 `gamma`,并观察和分析结果。
1. 查看高级 API 中有关 `BatchNorm` 的在线文档,以查看其他批量归一化的应用。
1. 研究思路:想想你可以应用的其他“归一化”转换?你可以应用概率积分变换吗?全秩协方差估计如何?
[Discussions](https://discuss.d2l.ai/t/1874)
|
github_jupyter
|
import numpy as np
import paddle
import paddle.nn as nn
import paddle.fluid.layers as layers
def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum, is_training=True):
# 训练模式还与预测模式的BN处理不同
if not is_training:
# 如果是在预测模式下,直接使用传入的移动平均所得的均值和方差
X_hat = (X - moving_mean) / (moving_var + eps) ** 0.5
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
# 使用全连接层的情况,计算特征维上的均值和方差
mean = paddle.mean(X)
var = paddle.mean(((X - mean) ** 2))
else:
# 使用二维卷积层的情况,计算通道维上(axis=1)的均值和方差。这里我们需要保持
# X的形状以便后面可以做广播运算
mean = paddle.mean(X, axis=(0, 2, 3), keepdim=True)
var = paddle.mean(((X - mean) ** 2), axis=(0, 2, 3), keepdim=True)
# 训练模式下用当前的均值和方差做标准化
X_hat = (X - mean) / (var + eps) ** 0.5
# 更新移动平均的均值和方差
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta # 拉伸和偏移
return Y, moving_mean, moving_var
class BatchNorm(nn.Layer):
def __init__(self, num_features, num_dims=4):
super(BatchNorm, self).__init__()
if num_dims == 2:
shape = (1, num_features)
else:
shape = (1, num_features, 1, 1)
# 参与求梯度和迭代的拉伸和偏移参数,分别初始化成1和0
self.gamma = self.create_parameter(
attr=None,
shape=shape,
dtype='float32',
is_bias=False,
default_initializer=nn.initializer.Assign(paddle.ones(shape=shape, dtype='float32')))
self.beta = self.create_parameter(
attr=None,
shape=shape,
dtype='float32',
is_bias=False,
default_initializer=nn.initializer.Assign(paddle.zeros(shape=shape, dtype='float32')))
self.moving_mean = paddle.zeros(shape=shape, dtype='float32')
self.moving_var = paddle.zeros(shape=shape, dtype='float32')
def forward(self, X):
# 保存更新过的moving_mean和moving_var
Y, self.moving_mean, self.moving_var = batch_norm(
X, self.gamma, self.beta, self.moving_mean,
self.moving_var, eps=1e-5, momentum=0.9, is_training=self.training)
return Y
class LeNet(nn.Layer):
def __init__(self, num_dim=1, num_class=10):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2D(num_dim, 6, 5)
self.bn1 = BatchNorm(6, num_dims=4)
self.pool1 = nn.MaxPool2D(2, stride=2)
self.conv2 = nn.Conv2D(6, 16, 5)
self.bn2 = BatchNorm(16, num_dims=4)
self.pool2 = nn.MaxPool2D(2, stride=2)
self.fc1 = nn.Linear(256, 120)
self.bn3 = BatchNorm(120, num_dims=2)
self.fc2 = nn.Linear(120, 84)
self.bn4 = BatchNorm(84, num_dims=2)
self.fc3 = nn.Linear(84, num_class)
def forward(self, X):
Y = self.conv1(X)
Y = layers.sigmoid(self.bn1(Y))
Y = self.pool1(Y)
Y = self.conv2(Y)
Y = layers.sigmoid(self.bn2(Y))
Y = self.pool2(Y)
Y = paddle.flatten(Y, start_axis=1)
Y = self.fc1(Y)
Y = layers.sigmoid(self.bn3(Y))
Y = self.fc2(Y)
Y = layers.sigmoid(self.bn4(Y))
Y = self.fc3(Y)
return Y
import paddle
import paddle.vision.transforms as T
from paddle.vision.datasets import FashionMNIST
# 数据集处理
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5]),
])
train_dataset = FashionMNIST(mode='train', transform=transform)
val_dataset = FashionMNIST(mode='test', transform=transform)
# 模型定义
model = paddle.Model(LeNet(num_class=10))
# 设置训练模型所需的optimizer, loss, metric
model.prepare(
paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()),
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
# 启动训练、评估
model.fit(train_dataset, val_dataset, epochs=2, batch_size=64, log_freq=200)
param = model.parameters()
print('gamma:', param[2].numpy().reshape(-1))
print('beta:', param[3].numpy().reshape(-1))
class LeNet(nn.Layer):
def __init__(self, num_dim=1, num_class=10):
super(LeNet, self).__init__()
model_conv = [
nn.Conv2D(num_dim, 6, 5),
# nn.BatchNorm(6),
nn.Sigmoid(),
nn.MaxPool2D(2, stride=2),
nn.Conv2D(6, 16, 5),
# nn.BatchNorm(16),
nn.MaxPool2D(2, stride=2),
nn.Sigmoid(),
]
self.model_conv = nn.Sequential(*model_conv)
model_fc = [
nn.Linear(256, 120),
# nn.BatchNorm(120),
nn.Sigmoid(),
nn.Linear(120, 84),
# nn.BatchNorm(84),
nn.Sigmoid(),
nn.Linear(84, num_class),
]
self.model_fc = nn.Sequential(*model_fc)
def forward(self, X):
Y = self.model_conv(X)
Y = paddle.flatten(Y, start_axis=1)
Y = self.model_fc(Y)
return Y
transform = T.Compose([
T.Transpose(),
T.Normalize([127.5], [127.5]),
])
train_dataset = FashionMNIST(mode='train', transform=transform)
val_dataset = FashionMNIST(mode='test', transform=transform)
# 模型定义
model = paddle.Model(LeNet(num_class=10))
# 设置训练模型所需的optimizer, loss, metric
model.prepare(
paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters()),
paddle.nn.CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
# 启动训练、评估
model.fit(train_dataset, val_dataset, epochs=2, batch_size=64, log_freq=200)
| 0.690246 | 0.956756 |
#Bar Chart for the number of datapoints in the model
Created by: Clayton Miller
Nov. 4, 2104
Updated on July 28, 2015 for the JBPS journal paper
The purpose of this is to visualize the number of datapoints available from each of the buildings. We base this off of the points list.
Lataxify: http://nipunbatra.github.io/2014/08/latexify/
```
import pandas as pd
import seaborn
%matplotlib inline
df = pd.read_csv("ETH_EMS_BMS_MasterPointList - ForTotalPointNumberViz.csv")
df.head()
df.info()
countpiv = pd.pivot_table(df.drop(["Unnamed: 0","Unnamed: 1","CategoryTag","Unnamed: 2","TypeTag","EnglishTag"], axis=1), index='BuildTag', columns='Adjusted_EnglishTag', aggfunc=len)
countpiv.head()
```
##Plot
```
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
import matplotlib
SPINE_COLOR = 'gray'
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'text.latex.preamble': ['\usepackage{gensymb}'],
'axes.labelsize': 8, # fontsize for x and y labels (was 10)
'axes.titlesize': 10,
'text.fontsize': 10, # was 10
'legend.fontsize': 8, # was 10
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'text.usetex': True,
'figure.figsize': [fig_width,fig_height],
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
def format_axes(ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
latexify(fig_height=6)
ax = countpiv.plot(kind="barh", stacked=True)
ax.set_xlabel("Number of Datapoints")
ax.set_ylabel("Campus Building")
ax.set_title("Amount/Type of Sensors in EMS")
ax.legend(loc=1)
plt.tight_layout()
format_axes(ax)
plt.savefig("pointbreakdown.pdf")
```
##Let's hide the building names by assigning a number
```
countpiv['sum'] = countpiv.sum(axis=1)
countpiv = countpiv.sort(columns="sum", ascending=False).drop(["sum"], axis=1)
countpiv = countpiv[["Cooling energy","Electricity","Heating energy","City gas","Domestic hot water","Grey water","Water consumption"]]
countpiv.head()
countpiv_anonymous = countpiv.reset_index().reset_index()
countpiv_anonymous = countpiv_anonymous.drop(['index','BuildTag'], axis=1)
latexify(fig_height=5)
ax = countpiv_anonymous.plot(kind="barh", stacked=True)
ax.set_xlabel("Number of Datapoints")
ax.set_ylabel("Campus Building Number")
ax.set_title("Number/Type of Sensors in EMS")
ax.legend(loc=1)
ax.yaxis.grid(False)
# ax.xaxis.grid(False)
plt.tight_layout()
format_axes(ax)
plt.savefig("pointbreakdown_anon.pdf")
```
|
github_jupyter
|
import pandas as pd
import seaborn
%matplotlib inline
df = pd.read_csv("ETH_EMS_BMS_MasterPointList - ForTotalPointNumberViz.csv")
df.head()
df.info()
countpiv = pd.pivot_table(df.drop(["Unnamed: 0","Unnamed: 1","CategoryTag","Unnamed: 2","TypeTag","EnglishTag"], axis=1), index='BuildTag', columns='Adjusted_EnglishTag', aggfunc=len)
countpiv.head()
import matplotlib.pyplot as plt
import numpy as np
from math import sqrt
import matplotlib
SPINE_COLOR = 'gray'
def latexify(fig_width=None, fig_height=None, columns=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1,2])
if fig_width is None:
fig_width = 3.39 if columns==1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (sqrt(5)-1.0)/2.0 # Aesthetic ratio
fig_height = fig_width*golden_mean # height in inches
MAX_HEIGHT_INCHES = 8.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'text.latex.preamble': ['\usepackage{gensymb}'],
'axes.labelsize': 8, # fontsize for x and y labels (was 10)
'axes.titlesize': 10,
'text.fontsize': 10, # was 10
'legend.fontsize': 8, # was 10
'xtick.labelsize': 7,
'ytick.labelsize': 7,
'text.usetex': True,
'figure.figsize': [fig_width,fig_height],
'font.family': 'serif'
}
matplotlib.rcParams.update(params)
def format_axes(ax):
for spine in ['top', 'right']:
ax.spines[spine].set_visible(False)
for spine in ['left', 'bottom']:
ax.spines[spine].set_color(SPINE_COLOR)
ax.spines[spine].set_linewidth(0.5)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for axis in [ax.xaxis, ax.yaxis]:
axis.set_tick_params(direction='out', color=SPINE_COLOR)
return ax
latexify(fig_height=6)
ax = countpiv.plot(kind="barh", stacked=True)
ax.set_xlabel("Number of Datapoints")
ax.set_ylabel("Campus Building")
ax.set_title("Amount/Type of Sensors in EMS")
ax.legend(loc=1)
plt.tight_layout()
format_axes(ax)
plt.savefig("pointbreakdown.pdf")
countpiv['sum'] = countpiv.sum(axis=1)
countpiv = countpiv.sort(columns="sum", ascending=False).drop(["sum"], axis=1)
countpiv = countpiv[["Cooling energy","Electricity","Heating energy","City gas","Domestic hot water","Grey water","Water consumption"]]
countpiv.head()
countpiv_anonymous = countpiv.reset_index().reset_index()
countpiv_anonymous = countpiv_anonymous.drop(['index','BuildTag'], axis=1)
latexify(fig_height=5)
ax = countpiv_anonymous.plot(kind="barh", stacked=True)
ax.set_xlabel("Number of Datapoints")
ax.set_ylabel("Campus Building Number")
ax.set_title("Number/Type of Sensors in EMS")
ax.legend(loc=1)
ax.yaxis.grid(False)
# ax.xaxis.grid(False)
plt.tight_layout()
format_axes(ax)
plt.savefig("pointbreakdown_anon.pdf")
| 0.705582 | 0.865224 |
# Planet Tasking API Monitoring Tasking Orders
---
## Introduction
---
This tutorial is an introduction on how to create monitoring tasking orders using [Planet](https://www.planet.com)'s Tasking API. It provides code samples on how to write simple Python code to do this.
The API reference documentation can be found at https://developers.planet.com/docs/tasking
### Requirements
---
#### Software & Modules
This tutorial assumes familiarity with the [Python](https://python.org) programming language throughout. Familiarity with basic REST API concepts and usage is also assumed.
We'll be using a **"Jupyter Notebook"** (aka Python Notebook) to run through the examples.
To learn more about and get started with using Jupyter, visit: [Jupyter](https://jupyter.org/) and [IPython](https://ipython.org/).
For the best experience, download this notebook and run it on your system, and make sure to install the modules listed below first. You can also copy the examples' code to a separate Python files an run them directly with Python on your system if you prefer.
#### Planet API Key
You should have an account on the Planet Platform to access the Tasking API. You may retrieve your API key from your [account page](https://www.planet.com/account/), or from the "API Tab" in [Planet Explorer](https://www.planet.com/explorer).
## Overview
---
### The basic workflow
1. Create a monitoring tasking order
1. Check the status of the tasking order
### API Endpoints
This tutorial will cover the following API ***endpoint***:
* [`/order`](https://api.planet.com/tasking/v2/order/)
## Basic Setup
---
Before interacting with the Planet Tasking API using Python, we will set up our environment with some useful modules and helper functions.
* We'll configure *authentication* to the Planet Tasking API
* We'll use the `requests` Python module to make HTTP communication easier.
* We'll use the `json` Python module to help us work with JSON responses from the API.
* We'll use the `pytz` Python module to define the time frame for the order that we will be creating.
* We'll create a function called `p` that will print Python dictionaries nicely.
Then we'll be ready to make our first call to the Planet Tasking API by hitting the base endpoint at `https://api.planet.com/tasking/v2`.
Let's start by configuring authentication:
### Authentication
Authentication with the Planet Tasking API can be achieved using a valid Planet **API key**.
You can *export* your API Key as an environment variable on your system:
`export PL_API_KEY="YOUR API KEY HERE"`
Or add the variable to your path, etc.
To start our Python code, we'll setup an API Key variable from an environment variable to use with our requests:
```
# Import the os module in order to access environment variables
import os
#If you are running this notebook outside of the docker environment that comes with the repo, you can uncomment the next line to provide your API key
#os.environ['PL_API_KEY']=input('Please provide your API Key')
# Setup the API Key from the `PL_API_KEY` environment variable
PLANET_API_KEY = os.getenv('PL_API_KEY')
```
### Helper Modules and Functions
```
# Import helper modules
import json
import requests
import pytz
from time import sleep
from datetime import datetime, timedelta
# Helper function to printformatted JSON using the json module
def p(data):
print(json.dumps(data, indent=2))
# Setup Planet Tasking PLANET_API_HOST
TASKING_API_URL = "https://api.planet.com/tasking/v2"
# Setup the session
session = requests.Session()
# Authenticate
session.headers.update({
'Authorization': f'api-key {PLANET_API_KEY}',
'Content-Type': 'application/json'
})
```
## 1 | Compose the monitoring tasking order
We want to create a monitoring tasking order that can be set up to take images of the same location at a defined cadence, in this example once per week. To keep things simple we are going to create a Point order, which takes a single latitude/longitude coordinate pair. Since this is your monitoring order, you need to provide the details of what the tasing order is called, the coordinates, the time period over which the order should be active and the cadence.
To make things easier, we will default the start and end time to start tomorrow and end 28 days from now, with the aim of taking four images if we stick to the weekly cadence. Of course, feel free to change this to suit your needs, but if you do, take note that all times should be in UTC format. Unlike a standard flexible tasking order, a monitoring tasking order requires the end time to be defined.
```
# Define the name and coordinates for the order
name=input("Give the order a name")
latitude=float(input("Provide the latitude"))
longitude=float(input("Provide the longitude"))
# Because the geometry is GeoJSON, the coordinates must be longitude,latitude
order = {
'name': name,
'geometry': {
'type': 'Point',
'coordinates': [
longitude,
latitude
]
}
}
# Set a start and end time, giving the order a month to complete
tomorrow = datetime.now(pytz.utc) + timedelta(days=1)
twenty_eight_days_later = tomorrow + timedelta(days=28)
# define the cadence
cadence=7
monitoring_parameters = {
'start_time': tomorrow.isoformat(),
'end_time': twenty_eight_days_later.isoformat(),
'monitoring_cadence': cadence
}
# Add the monitoring parameters
order.update(monitoring_parameters)
#View the payload before posting
p(order)
# The creation of an order is a POST request to the /orders endpoint
res = session.request('POST', TASKING_API_URL + '/orders/', json=order)
if res.status_code == 403:
print('Your PLANET_API_KEY is valid, but you are not authorized.')
elif res.status_code == 401:
print('Your PLANET_API_KEY is incorrect')
elif res.status_code == 201:
print('Your order was created successfully')
else:
print(f'Received status code {res.status_code} from the API. Please contact support.')
# View the response
p(res.json())
```
**Congratulations!** You just created a monitoring tasking order using the Planet Tasking API. Depending on the parameters that you provided, a satellite will be attempting to take an image over your given coordinates in the near future.
## 2 | Check the status of the monitoring order
To see the status of the new monitoring tasking order, the tasking order id is required. Depending on the tasking order, it can take some time for the status of the tasking order to change, and so you may need to come back to this section once some time has elapsed before changes to the tasking order can be seen. It is recommended to run the next part of this notebook to extract the ID of the newly created order and save that for later use.
```
# Get the response JSON and extract the ID of the order
response = res.json()
new_order_id = response["id"]
p(new_order_id)
def check_order_status(order_id):
# Make a GET request with the order_id concatenated to the end of the /orders url; e.g. https://api.planet.com/tasking/v2/orders/<ORDER_ID>
res = session.request('GET', TASKING_API_URL + '/orders/' + order_id)
if res.status_code == 403:
print('Your PLANET_API_KEYPLANET_API_KEY is valid, but you are not authorized to view this order.')
elif res.status_code == 401:
print('Your PLANET_API_KEYPLANET_API_KEY is incorrect')
elif res.status_code == 404:
print(f'Your order ({order_id}) does not exist')
elif res.status_code != 200:
print(f'Received status code {res.status_code} from the API. Please contact support.')
else:
order = res.json()
p(res.json())
print(f'Your order is {order["status"]} with {order["capture_status_published_count"]} published captures '
f'and {order["capture_assessment_success_count"]} successful captures')
check_order_status(new_order_id)
```
|
github_jupyter
|
# Import the os module in order to access environment variables
import os
#If you are running this notebook outside of the docker environment that comes with the repo, you can uncomment the next line to provide your API key
#os.environ['PL_API_KEY']=input('Please provide your API Key')
# Setup the API Key from the `PL_API_KEY` environment variable
PLANET_API_KEY = os.getenv('PL_API_KEY')
# Import helper modules
import json
import requests
import pytz
from time import sleep
from datetime import datetime, timedelta
# Helper function to printformatted JSON using the json module
def p(data):
print(json.dumps(data, indent=2))
# Setup Planet Tasking PLANET_API_HOST
TASKING_API_URL = "https://api.planet.com/tasking/v2"
# Setup the session
session = requests.Session()
# Authenticate
session.headers.update({
'Authorization': f'api-key {PLANET_API_KEY}',
'Content-Type': 'application/json'
})
# Define the name and coordinates for the order
name=input("Give the order a name")
latitude=float(input("Provide the latitude"))
longitude=float(input("Provide the longitude"))
# Because the geometry is GeoJSON, the coordinates must be longitude,latitude
order = {
'name': name,
'geometry': {
'type': 'Point',
'coordinates': [
longitude,
latitude
]
}
}
# Set a start and end time, giving the order a month to complete
tomorrow = datetime.now(pytz.utc) + timedelta(days=1)
twenty_eight_days_later = tomorrow + timedelta(days=28)
# define the cadence
cadence=7
monitoring_parameters = {
'start_time': tomorrow.isoformat(),
'end_time': twenty_eight_days_later.isoformat(),
'monitoring_cadence': cadence
}
# Add the monitoring parameters
order.update(monitoring_parameters)
#View the payload before posting
p(order)
# The creation of an order is a POST request to the /orders endpoint
res = session.request('POST', TASKING_API_URL + '/orders/', json=order)
if res.status_code == 403:
print('Your PLANET_API_KEY is valid, but you are not authorized.')
elif res.status_code == 401:
print('Your PLANET_API_KEY is incorrect')
elif res.status_code == 201:
print('Your order was created successfully')
else:
print(f'Received status code {res.status_code} from the API. Please contact support.')
# View the response
p(res.json())
# Get the response JSON and extract the ID of the order
response = res.json()
new_order_id = response["id"]
p(new_order_id)
def check_order_status(order_id):
# Make a GET request with the order_id concatenated to the end of the /orders url; e.g. https://api.planet.com/tasking/v2/orders/<ORDER_ID>
res = session.request('GET', TASKING_API_URL + '/orders/' + order_id)
if res.status_code == 403:
print('Your PLANET_API_KEYPLANET_API_KEY is valid, but you are not authorized to view this order.')
elif res.status_code == 401:
print('Your PLANET_API_KEYPLANET_API_KEY is incorrect')
elif res.status_code == 404:
print(f'Your order ({order_id}) does not exist')
elif res.status_code != 200:
print(f'Received status code {res.status_code} from the API. Please contact support.')
else:
order = res.json()
p(res.json())
print(f'Your order is {order["status"]} with {order["capture_status_published_count"]} published captures '
f'and {order["capture_assessment_success_count"]} successful captures')
check_order_status(new_order_id)
| 0.422981 | 0.965867 |
```
import pandas as pd
import numpy as np
FBposts = pd.read_csv("./data/facebook-fact-check.csv")
```
# Dates to Float
```
FBposts_datesDF=pd.to_datetime(FBposts['Date Published'])
FBposts_datesfloatDF= (FBposts_datesDF - FBposts_datesDF.min()) / np.timedelta64(1,'D')
type(FBposts_datesfloatDF)
print(len(FBposts.columns))
FBposts['date_delta']=FBposts_datesfloatDF
print(len(FBposts.columns))
```
## Clean NaN
```
FBposts_cleanedNaN = FBposts.fillna(FBposts.mean())
```
# Dealing with Category or author from who's writing what on FB posts
```
from NLP_nltk import levenshtein_distance
FBposts_cleanedNaN.columns
FBposts_Categorynorm=pd.DataFrame([levenshtein_distance(FBposts_cleanedNaN["Category"].mode()[0],author) for author in FBposts_cleanedNaN['Category']])
FBposts_cleanedNaN['Categorynorm']=FBposts_Categorynorm
print(len(FBposts_cleanedNaN.columns))
```
# more data; normalizing `account_id`
```
FBposts_acctidnorm=pd.DataFrame([levenshtein_distance(str(FBposts_cleanedNaN["account_id"].mode()[0]),str(acctid)) for acctid in FBposts_cleanedNaN['account_id']])
FBposts_cleanedNaN['account_idnorm']=FBposts_acctidnorm
print(len(FBposts_cleanedNaN.columns))
FBpostsX=FBposts_cleanedNaN[["account_idnorm","Categorynorm","date_delta","share_count","reaction_count","comment_count"]]
```
## Having a $Y$ value
```
FBpostsY=FBposts_cleanedNaN["Rating"]
FBpostsY=FBpostsY.replace("mostly true",0.75)
FBpostsY=FBpostsY.replace("no factual content",0.5)
FBpostsY=FBpostsY.replace("mixture of true and false",0.25)
FBpostsY=FBpostsY.replace("mostly false",0.0)
FBpostsY.index
FBpostsX.describe()
```
# normalize further the data for account_idnorm, Categorynorm with Gaussians, share_count, reaction_count, comment_count with log
```
import sklearn
from sklearn import preprocessing
from sklearn.preprocessing import FunctionTransformer
transformer = FunctionTransformer(np.log1p)
FBpostsXnorm=FBpostsX
FBpostsXnorm["account_idnorm"]=preprocessing.scale(transformer.transform( FBpostsX['account_idnorm'].values.reshape(-1,1)))
FBpostsXnorm["Categorynorm"] =preprocessing.scale(transformer.transform( FBpostsX['Categorynorm'].values.reshape(-1,1)))
FBpostsXnorm.describe()
```
Now to do it for share_count, reaction_count, comment_count
```
FBpostsXnorm["share_count"]=preprocessing.scale(transformer.transform( FBpostsX['share_count'].values.reshape(-1,1)))
FBpostsXnorm["reaction_count"] =preprocessing.scale(transformer.transform( FBpostsX['reaction_count'].values.reshape(-1,1)))
FBpostsXnorm["comment_count"]=preprocessing.scale(transformer.transform( FBpostsX['comment_count'].values.reshape(-1,1)))
FBpostsXnorm.describe()
```
# Training data, (Cross-)Validation, Test Data
```
ratiotrain=0.85
ratiovalid=0.1
ratiotest =0.05
N = len(FBpostsY.index)
indshuffled=sklearn.utils.shuffle(np.arange(N))
trainind=indshuffled[:int(ratiotrain*N)]
validind=indshuffled[int(ratiotrain*N):int((ratiotrain+ratiovalid)*N)]
testind=indshuffled[int((ratiotrain+ratiovalid)*N):]
print(len(trainind))
print(len(validind))
print(len(testind))
FB_X_train=FBpostsXnorm.iloc[trainind]
FB_X_valid=FBpostsXnorm.iloc[validind]
FB_X_test=FBpostsXnorm.iloc[testind]
FB_Y_train=FBpostsY.iloc[trainind]
FB_Y_valid=FBpostsY.iloc[validind]
FB_Y_test=FBpostsY.iloc[testind]
FB_X_train.describe()
FB_Y_train.describe()
FB_Y_test.describe()
FB_X_train.to_pickle("FB_X_train.pkl")
FB_X_valid.to_pickle("FB_X_valid.pkl")
FB_X_test.to_pickle("FB_X_test.pkl")
FB_Y_train.to_pickle("FB_Y_train.pkl")
FB_Y_valid.to_pickle("FB_Y_valid.pkl")
FB_Y_test.to_pickle("FB_Y_test.pkl")
np.save("FB_X_train.npy", FB_X_train.values)
np.save("FB_X_valid.npy", FB_X_valid.values)
np.save("FB_X_test.npy", FB_X_test.values)
np.save("FB_Y_train.npy", FB_Y_train.values)
np.save("FB_Y_valid.npy", FB_Y_valid.values)
np.save("FB_Y_test.npy", FB_Y_test.values)
FB_X_train.values.shape
```
|
github_jupyter
|
import pandas as pd
import numpy as np
FBposts = pd.read_csv("./data/facebook-fact-check.csv")
FBposts_datesDF=pd.to_datetime(FBposts['Date Published'])
FBposts_datesfloatDF= (FBposts_datesDF - FBposts_datesDF.min()) / np.timedelta64(1,'D')
type(FBposts_datesfloatDF)
print(len(FBposts.columns))
FBposts['date_delta']=FBposts_datesfloatDF
print(len(FBposts.columns))
FBposts_cleanedNaN = FBposts.fillna(FBposts.mean())
from NLP_nltk import levenshtein_distance
FBposts_cleanedNaN.columns
FBposts_Categorynorm=pd.DataFrame([levenshtein_distance(FBposts_cleanedNaN["Category"].mode()[0],author) for author in FBposts_cleanedNaN['Category']])
FBposts_cleanedNaN['Categorynorm']=FBposts_Categorynorm
print(len(FBposts_cleanedNaN.columns))
FBposts_acctidnorm=pd.DataFrame([levenshtein_distance(str(FBposts_cleanedNaN["account_id"].mode()[0]),str(acctid)) for acctid in FBposts_cleanedNaN['account_id']])
FBposts_cleanedNaN['account_idnorm']=FBposts_acctidnorm
print(len(FBposts_cleanedNaN.columns))
FBpostsX=FBposts_cleanedNaN[["account_idnorm","Categorynorm","date_delta","share_count","reaction_count","comment_count"]]
FBpostsY=FBposts_cleanedNaN["Rating"]
FBpostsY=FBpostsY.replace("mostly true",0.75)
FBpostsY=FBpostsY.replace("no factual content",0.5)
FBpostsY=FBpostsY.replace("mixture of true and false",0.25)
FBpostsY=FBpostsY.replace("mostly false",0.0)
FBpostsY.index
FBpostsX.describe()
import sklearn
from sklearn import preprocessing
from sklearn.preprocessing import FunctionTransformer
transformer = FunctionTransformer(np.log1p)
FBpostsXnorm=FBpostsX
FBpostsXnorm["account_idnorm"]=preprocessing.scale(transformer.transform( FBpostsX['account_idnorm'].values.reshape(-1,1)))
FBpostsXnorm["Categorynorm"] =preprocessing.scale(transformer.transform( FBpostsX['Categorynorm'].values.reshape(-1,1)))
FBpostsXnorm.describe()
FBpostsXnorm["share_count"]=preprocessing.scale(transformer.transform( FBpostsX['share_count'].values.reshape(-1,1)))
FBpostsXnorm["reaction_count"] =preprocessing.scale(transformer.transform( FBpostsX['reaction_count'].values.reshape(-1,1)))
FBpostsXnorm["comment_count"]=preprocessing.scale(transformer.transform( FBpostsX['comment_count'].values.reshape(-1,1)))
FBpostsXnorm.describe()
ratiotrain=0.85
ratiovalid=0.1
ratiotest =0.05
N = len(FBpostsY.index)
indshuffled=sklearn.utils.shuffle(np.arange(N))
trainind=indshuffled[:int(ratiotrain*N)]
validind=indshuffled[int(ratiotrain*N):int((ratiotrain+ratiovalid)*N)]
testind=indshuffled[int((ratiotrain+ratiovalid)*N):]
print(len(trainind))
print(len(validind))
print(len(testind))
FB_X_train=FBpostsXnorm.iloc[trainind]
FB_X_valid=FBpostsXnorm.iloc[validind]
FB_X_test=FBpostsXnorm.iloc[testind]
FB_Y_train=FBpostsY.iloc[trainind]
FB_Y_valid=FBpostsY.iloc[validind]
FB_Y_test=FBpostsY.iloc[testind]
FB_X_train.describe()
FB_Y_train.describe()
FB_Y_test.describe()
FB_X_train.to_pickle("FB_X_train.pkl")
FB_X_valid.to_pickle("FB_X_valid.pkl")
FB_X_test.to_pickle("FB_X_test.pkl")
FB_Y_train.to_pickle("FB_Y_train.pkl")
FB_Y_valid.to_pickle("FB_Y_valid.pkl")
FB_Y_test.to_pickle("FB_Y_test.pkl")
np.save("FB_X_train.npy", FB_X_train.values)
np.save("FB_X_valid.npy", FB_X_valid.values)
np.save("FB_X_test.npy", FB_X_test.values)
np.save("FB_Y_train.npy", FB_Y_train.values)
np.save("FB_Y_valid.npy", FB_Y_valid.values)
np.save("FB_Y_test.npy", FB_Y_test.values)
FB_X_train.values.shape
| 0.226955 | 0.497376 |
<h1> Create TensorFlow DNN model </h1>
This notebook illustrates:
<ol>
<li> Creating a model using the high-level Estimator API
</ol>
```
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
%bash
ls *.csv
```
<h2> Create TensorFlow model using TensorFlow's Estimator API </h2>
<p>
First, write an input_fn to read the data.
```
import shutil
import numpy as np
import tensorflow as tf
print(tf.__version__)
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
TRAIN_STEPS = 1000
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size=10*batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
return _input_fn
```
Next, define the feature columns
```
# Define feature columns
def get_categorical(name, values):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(name, values))
def get_cols():
# Define column types
return [\
get_categorical('is_male', ['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
get_categorical('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
```
To predict with the TensorFlow model, we also need a serving input function. We will want all the inputs from our user.
```
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
EVAL_INTERVAL = 300
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
estimator = tf.estimator.DNNRegressor(
model_dir = output_dir,
feature_columns = get_cols(),
hidden_units = [64, 32],
config = run_config)
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train.csv', mode = tf.estimator.ModeKeys.TRAIN),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval.csv', mode = tf.estimator.ModeKeys.EVAL),
steps = None,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
```
Finally, train!
```
# Run the model
shutil.rmtree('babyweight_trained', ignore_errors = True) # start fresh each time
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
train_and_evaluate('babyweight_trained')
```
When I ran it, the final lines of the output (above) were:
<pre>
INFO:tensorflow:Saving dict for global step 1000: average_loss = 1.2693067, global_step = 1000, loss = 635.9226
INFO:tensorflow:Restoring parameters from babyweight_trained/model.ckpt-1000
INFO:tensorflow:Assets added to graph.
INFO:tensorflow:No assets to write.
INFO:tensorflow:SavedModel written to: babyweight_trained/export/exporter/temp-1517899936/saved_model.pb
</pre>
The exporter directory contains the final model and the final RMSE (the average_loss) is 1.2693067
<h2> Monitor and experiment with training </h2>
```
from google.datalab.ml import TensorBoard
TensorBoard().start('./babyweight_trained')
```
In TensorBoard, look at the learned embeddings. Are they getting clustered? How about the weights for the hidden layers? What if you run this longer? What happens if you change the batchsize?
```
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print('Stopped TensorBoard with pid {}'.format(pid))
```
Copyright 2017-2018 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
github_jupyter
|
# change these to try this notebook out
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
%bash
ls *.csv
import shutil
import numpy as np
import tensorflow as tf
print(tf.__version__)
# Determine CSV, label, and key columns
CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',')
LABEL_COLUMN = 'weight_pounds'
KEY_COLUMN = 'key'
# Set default values for each CSV column
DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']]
TRAIN_STEPS = 1000
# Create an input function reading a file using the Dataset API
# Then provide the results to the Estimator API
def read_dataset(filename, mode, batch_size = 512):
def _input_fn():
def decode_csv(value_column):
columns = tf.decode_csv(value_column, record_defaults=DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
label = features.pop(LABEL_COLUMN)
return features, label
# Create list of files that match pattern
file_list = tf.gfile.Glob(filename)
# Create dataset from file list
dataset = (tf.data.TextLineDataset(file_list) # Read text file
.map(decode_csv)) # Transform each elem by applying decode_csv fn
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # indefinitely
dataset = dataset.shuffle(buffer_size=10*batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
return _input_fn
# Define feature columns
def get_categorical(name, values):
return tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list(name, values))
def get_cols():
# Define column types
return [\
get_categorical('is_male', ['True', 'False', 'Unknown']),
tf.feature_column.numeric_column('mother_age'),
get_categorical('plurality',
['Single(1)', 'Twins(2)', 'Triplets(3)',
'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']),
tf.feature_column.numeric_column('gestation_weeks')
]
# Create serving input function to be able to serve predictions later using provided inputs
def serving_input_fn():
feature_placeholders = {
'is_male': tf.placeholder(tf.string, [None]),
'mother_age': tf.placeholder(tf.float32, [None]),
'plurality': tf.placeholder(tf.string, [None]),
'gestation_weeks': tf.placeholder(tf.float32, [None])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
return tf.estimator.export.ServingInputReceiver(features, feature_placeholders)
# Create estimator to train and evaluate
def train_and_evaluate(output_dir):
EVAL_INTERVAL = 300
run_config = tf.estimator.RunConfig(save_checkpoints_secs = EVAL_INTERVAL,
keep_checkpoint_max = 3)
estimator = tf.estimator.DNNRegressor(
model_dir = output_dir,
feature_columns = get_cols(),
hidden_units = [64, 32],
config = run_config)
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset('train.csv', mode = tf.estimator.ModeKeys.TRAIN),
max_steps = TRAIN_STEPS)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset('eval.csv', mode = tf.estimator.ModeKeys.EVAL),
steps = None,
start_delay_secs = 60, # start evaluating after N seconds
throttle_secs = EVAL_INTERVAL, # evaluate every N seconds
exporters = exporter)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
# Run the model
shutil.rmtree('babyweight_trained', ignore_errors = True) # start fresh each time
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
train_and_evaluate('babyweight_trained')
from google.datalab.ml import TensorBoard
TensorBoard().start('./babyweight_trained')
for pid in TensorBoard.list()['pid']:
TensorBoard().stop(pid)
print('Stopped TensorBoard with pid {}'.format(pid))
| 0.550849 | 0.874774 |
```
import pandas as pd
import numpy as np
import os
from mplsoccer.pitch import Pitch
```
# Load data
Load event data
```
df_events_wyscout = pd.read_parquet(os.path.join('..', 'data', 'wyscout', 'event.parquet'))
df_events_statsbomb = pd.read_parquet(os.path.join('..', 'data', 'statsbomb', 'event.parquet'))
```
Get match dataframe for full seasons in Wyscout data
```
df_match_wyscout = pd.read_parquet(os.path.join('..', 'data', 'wyscout', 'match.parquet'))
df_match_wyscout = df_match_wyscout[df_match_wyscout.competition_name.isin(['Premier League', 'Ligue 1',
'Bundesliga', 'Serie A', 'La Liga'])].copy()
df_match_wyscout_overlap = pd.read_parquet(os.path.join('..', 'data', 'wyscout', 'match_overlap.parquet'))
df_match_wyscout_overlap = df_match_wyscout_overlap[df_match_wyscout_overlap.competition_name == 'La Liga'].copy()
df_match = pd.concat([df_match_wyscout, df_match_wyscout_overlap])
```
Get a match dataframe for the overlapping games from the Statsbomb data
```
df_match_statsbomb = pd.read_parquet(os.path.join('..', 'data', 'statsbomb', 'match.parquet'))
df_match_statsbomb = df_match_statsbomb[(df_match_statsbomb.competition_name == 'La Liga') &
(df_match_statsbomb.season_name == '2017/2018')].copy()
```
# Get a match id/ team id lookup for overlapping games
```
cols = ['match_id', 'home_team_id', 'home_team_name', 'away_team_id', 'away_team_name']
df_overlap_ids = df_match_wyscout_overlap[cols].merge(df_match_statsbomb[cols],
on=['home_team_name', 'away_team_name'], suffixes=['_wyscout', ''])
df_home = df_overlap_ids[['home_team_id_wyscout', 'home_team_id']].copy()
df_away = df_overlap_ids[['away_team_id_wyscout', 'away_team_id']].copy()
df_home.rename({'home_team_id_wyscout': 'team_id_wyscout', 'home_team_id': 'team_id'}, axis=1, inplace=True)
df_away.rename({'away_team_id_wyscout': 'team_id_wyscout', 'away_team_id': 'team_id'}, axis=1, inplace=True)
df_team_overlap = pd.concat([df_home, df_away])
df_team_overlap.drop_duplicates(inplace=True)
df_match_overlap = df_overlap_ids[['match_id', 'match_id_wyscout']]
```
# Add a goal flag
```
df_events_statsbomb['goal'] = df_events_statsbomb.outcome_name == 'Goal'
```
# Filter events dataframes to only include shots from the 5 big league matches and specific columns
```
mask_match = df_events_wyscout.match_id.isin(df_match_wyscout.match_id)
mask_shot = df_events_wyscout.subEventName.isin(['Penalty', 'Free kick shot', 'Shot'])
cols = ['match_id', 'id', 'subEventName', 'matchPeriod', 'eventSec', 'team_id', 'x', 'y', 'goal']
df_events_wyscout = df_events_wyscout.loc[mask_match & mask_shot, cols].copy()
mask_match = df_events_statsbomb.match_id.isin(df_match_statsbomb.match_id)
mask_shot = df_events_statsbomb.type_name == 'Shot'
cols = ['match_id', 'id', 'period', 'timestamp_minute', 'timestamp_second', 'timestamp_millisecond',
'team_id', 'x', 'y', 'shot_statsbomb_xg', 'goal']
df_events_statsbomb = df_events_statsbomb.loc[mask_match & mask_shot, cols].copy()
```
# Format the dataframes
Add wyscout match/ team ids to statsbomb data
```
df_events_statsbomb = df_events_statsbomb.merge(df_match_overlap)
df_events_statsbomb = df_events_statsbomb.merge(df_team_overlap)
df_events_statsbomb.drop('match_id', axis=1, inplace=True)
df_events_statsbomb.drop('team_id', axis=1, inplace=True)
df_events_statsbomb = df_events_statsbomb.rename({'match_id_wyscout': 'match_id', 'team_id_wyscout': 'team_id'}, axis=1)
```
Add on wyscout xg
```
df_xg = pd.read_parquet(os.path.join('..', 'data', 'modelling', 'xg_shap.parquet'))
df_xg = df_xg.loc[df_xg.wyscout_id.notnull(), ['match_id', 'wyscout_id', 'xg']].rename({'wyscout_id': 'id'}, axis=1)
df_events_wyscout = df_events_wyscout.merge(df_xg, on=['match_id', 'id'], how='left')
df_events_wyscout.loc[df_events_wyscout.subEventName == 'Penalty', 'xg'] = 0.76
```
Do not have xg for some outliers
```
pitch = Pitch(pitch_type='wyscout')
fig, ax = pitch.draw()
mask_no_xg = df_events_wyscout.xg.isnull()
pitch.scatter(df_events_wyscout[mask_no_xg].x, df_events_wyscout[mask_no_xg].y, ax=ax)
```
Set xg to 0.015 if missing (1.5%)
```
df_events_wyscout.loc[df_events_wyscout.xg.isnull(), 'xg'] = 0.015
```
Tidy up dataframes for consistent formats
```
df_events_statsbomb['eventSec'] = (df_events_statsbomb.timestamp_minute * 60 +
df_events_statsbomb.timestamp_second +
df_events_statsbomb.timestamp_millisecond / 1000.)
df_events_statsbomb.rename({'shot_statsbomb_xg': 'xg'}, axis=1, inplace=True)
df_events_statsbomb.drop(['timestamp_minute', 'timestamp_second', 'timestamp_millisecond'], axis=1, inplace=True)
df_events_wyscout.rename({'matchPeriod': 'period'}, axis=1, inplace=True)
df_events_wyscout['period'] = df_events_wyscout.period.map({'1H': 1, '2H': 2})
```
Combine the dataframes
```
df_events_wyscout['data'] = 'wyscout'
df_events_statsbomb['data'] = 'statsbomb'
df_events = pd.concat([df_events_wyscout, df_events_statsbomb])
```
Sort the dataframe
```
df_events.sort_values(['match_id', 'period', 'eventSec'], inplace=True)
```
# Add a home_or_away column to the events dataframe:
home_shots when the shot is taken by the home team <br>
away_shots when the shot is taken by the away team
```
# first merge on the the home_team_id for each match from the df_match dataframe
df_events = df_events.merge(df_match[['match_id','home_team_id']], on='match_id', validate='m:1', how='left')
# then create a home_or_away column by comparing the home_team_id with the team_id
mask_home = df_events.team_id == df_events.home_team_id
df_events['home_or_away'] = np.where(mask_home,'home_shots', 'away_shots')
df_events.drop('home_team_id', axis=1, inplace=True)
df_events.head()
```
# Show matches where one team didn't get a single shot on target.
```
# group on match id and show matches wher there is only one team with a shot event
num_teams_per_match = df_events.groupby('match_id').team_id.nunique()
num_teams_per_match[num_teams_per_match==1].index.tolist()
```
# We are going to make the analysis easier by adding a fake shot with 0xG where the team didn't get a shot on target
Note for the purpose of the simulation 0xG will mean that the fake shot will never count as a goal. As the xG value will be lower than the simulated proability.
```
len(df_events)
# first we merge the number of shots for each team on to the df_match dataframe
df_shots_per_team = (df_events.groupby(['match_id', 'home_or_away']).team_id.count()
.reset_index()
.pivot(index='match_id', columns='home_or_away', values='team_id')
.reset_index()
.fillna(0))
df_match = df_match.merge(df_shots_per_team, on='match_id', how='left', validate='1:1')
# we create a fake shot record for each match where the away team has no shots
df_away_missing = df_match.loc[df_match.away_shots==0, ['match_id', 'away_team_id']].copy()
df_away_missing['xg'] = 0.0
df_away_missing['home_or_away'] = 'away_shots'
df_away_missing.rename({'away_team_id':'team_id'}, axis=1, inplace=True)
# we also create a fake shot record where the home team has no shots
df_home_missing = df_match.loc[df_match.home_shots==0, ['match_id', 'home_team_id']].copy()
df_home_missing['xg'] = 0.0
df_home_missing['home_or_away'] = 'home_shots'
df_home_missing.rename({'home_team_id': 'team_id'},axis=1,inplace=True)
# combine the missing shot information and add one to the max possession to make it unique
df_missing_shots = pd.concat([df_home_missing, df_away_missing], axis=0)
# putting in an eventSec/ period (it doesn't matter what this is as the xG is zero)
df_missing_shots['eventSec'] = 0
df_missing_shots['period'] = 1
# add the fake shot data to the event dataframe and sort it into the right order
df_events = pd.concat([df_missing_shots, df_events])
df_events.sort_values(['match_id', 'eventSec'], inplace=True)
len(df_events)
```
# Create a shot possession group (i.e. shots within 15 seconds of each other are in the same group)
```
df_events['eventSec_shift'] = df_events.groupby(['match_id', 'period']).eventSec.shift(1)
df_events['new_shot_sequence'] = (((df_events['eventSec'] - df_events['eventSec_shift']) > 15) |
(df_events.match_id != df_events.match_id.shift(1)) |
(df_events.period != df_events.period.shift(1)))
df_events['possession'] = df_events.new_shot_sequence.cumsum()
df_events.drop(['eventSec_shift', 'new_shot_sequence'], axis=1, inplace=True)
```
# Simulation parameters
```
np.random.seed(42)
n_sims = 10000
sim_columns = np.arange(n_sims)
```
# Show events where the possession has more than one team shooting
There a few shot sequences where more than one team has a shot. This impacts our simulation. If there is a goal, I want to model it so the rest of the possession didn't happen and the second team didn't have a chance to shoot. This is to mimic a real match where if one team scores, there is a kick-off and the possession ends.
```
num_teams_per_possession_sequence = df_events.groupby('possession').team_id.nunique()
num_teams_per_possession_sequence[num_teams_per_possession_sequence>1].index
```
# Simulate the goals for each shot and sum up the number of goals for each team in a match
First simulate the goals. We are going to create an array of uniform random numbers between 0-1. If the random number is greater than the xG we do not count it as a goal (=0). If the random number is less than the xG we count it as a goal (=1). For example, for a shot with xG of 0.17 then there is a 17% chance of a goal, we count it as a goal if the random number is <= 0.17.
```
n_shots = len(df_events)
simulated_probabilities = np.random.uniform(size=(n_shots, n_sims))
simulated_goals = np.where(simulated_probabilities > df_events.xg.values.reshape(-1, 1), 0, 1)
simulated_goals.shape
```
Create a dataframe of simulated goals and if there is more than one simulated goal in a unique possession keep the first and set the others to zero. The dataframe is n_shots length by n_sims wide.
```
df_simulated_goals = pd.concat([df_events[['possession']],
pd.DataFrame(simulated_goals, index=df_events.index)],
axis=1)
# here we are setting the first goal in the possession to 1 and the rest of the goals in the possession to 0
df_simulated_goals = df_simulated_goals.groupby('possession').cumsum()
df_simulated_goals = pd.DataFrame(np.where(df_simulated_goals > 1, 0, df_simulated_goals), index=df_events.index)
df_simulated_goals.shape
```
Next we sum the number of simulated goals for each team in the match. The dataframe is now (n_teams*2) length by n_sims wide
```
# we add on the match id and home_or_away flag and sum the goals in the game for each team (home_shots/ away_shots)
df_simulated_goals = pd.concat([df_events[['match_id', 'home_or_away']], df_simulated_goals],axis=1)
df_simulated_goals = df_simulated_goals.groupby(['match_id', 'home_or_away'])[sim_columns].sum()
df_simulated_goals.reset_index(inplace=True)
df_simulated_goals.shape
```
# Recreate the League Tables
```
# create some columns used to create the table stats: goal difference, win, lose, draw
df_match['home_goal_diff'] = df_match['home_score'] - df_match['away_score']
df_match['home_win'] = df_match.home_score > df_match.away_score
df_match['home_lose'] = df_match.home_score < df_match.away_score
df_match['away_goal_diff'] = df_match['away_score'] - df_match['home_score']
df_match['away_win'] = df_match.away_score > df_match.home_score
df_match['away_lose'] = df_match.away_score < df_match.home_score
df_match['draw'] = df_match.away_score == df_match.home_score
mask_home_win = df_match.home_score > df_match.away_score
mask_away_win = df_match.away_score > df_match.home_score
mask_draw = df_match.away_score == df_match.home_score
df_match.loc[mask_home_win,'home_points'] = 3
df_match.loc[mask_home_win,'away_points'] = 0
df_match.loc[mask_away_win,'away_points'] = 3
df_match.loc[mask_away_win,'home_points'] = 0
df_match.loc[mask_draw,'home_points'] = 1
df_match.loc[mask_draw,'away_points'] = 1
league_tables = []
for competition in ['Premier League', 'Ligue 1', 'Bundesliga', 'Serie A', 'La Liga']:
match_ids = df_match[(df_match.competition_name==competition)].match_id
# create a dataframe of away results
away_cols = ['away_team_name','away_points','away_goal_diff','away_win','draw',
'away_lose','away_score','home_score']
away_rename = {'away_points':'points','away_team_name':'team_name',
'away_goal_diff':'goal_difference','away_win':'win','away_lose':'lose',
'away_score':'goals_for','home_score':'goals_against'}
df_away = df_match.loc[df_match.match_id.isin(match_ids), away_cols]
# create a dataframe of home results
home_cols = ['home_team_name','home_points','home_goal_diff','home_win','draw','home_lose',
'home_score','away_score']
home_rename = {'home_points':'points','home_team_name':'team_name',
'home_goal_diff':'goal_difference','home_win':'win','home_lose':'lose',
'home_score':'goals_for','away_score':'goals_against'}
df_home = df_match.loc[df_match.match_id.isin(match_ids),home_cols]
# combine to get the whole league results
df_league = pd.concat([df_away.rename(away_rename,axis=1),
df_home.rename(home_rename,axis=1)])
# add the number of games played to the team stats to create a df_table dataframe
stats_cols = ['win','draw','lose','goals_for','goals_against','goal_difference','points']
df_table = (df_league.groupby('team_name')[stats_cols].sum())
df_played = df_league.team_name.value_counts()
df_played.name = 'played'
df_table = pd.concat([df_played, df_table],axis=1,sort=False)
# sort by the criteria for winning league, note there is one final criteria head-for-head not implemented here
df_table.sort_values(['points','goal_difference','goals_for','win'],ascending=False,inplace=True)
# index as team rank rather than team name
df_table.reset_index(inplace=True)
df_table.index = df_table.index+1
# set columns to easy names
df_table.columns = ['Team','Played','Won','Drawn','Lost','For','Against','Goal Difference','Points']
# columns to integers (some are float)
df_table[df_table.columns[1:]] = df_table[df_table.columns[1:]].astype(int)
# add position to dataframe
df_table.index.name = 'position'
df_table.reset_index(inplace=True)
league_tables.append(df_table)
```
In La liga and seria A. The fules for classification are different 1) Points; 2) Head-to-head points; 3) Head-to-head goal difference; 4) Goal difference; 5) Goals scored
I.e. head to head results get considered before goal difference so the tables are in the wrong order.
Manually set them in the right order
```
df_spain = league_tables[4]
df_spain.loc[df_spain.Team == 'Espanyol', 'position'] = 11
df_spain.loc[df_spain.Team == 'Real Sociedad', 'position'] = 12
df_spain.loc[df_spain.Team == 'Celta Vigo', 'position'] = 13
df_spain.sort_values('position', inplace=True)
league_tables[4] = df_spain
df_italy = league_tables[3]
df_italy.loc[df_italy.Team == 'FC Internazionale Milano', 'position'] = 4
df_italy.loc[df_italy.Team == 'SS Lazio', 'position'] = 5
df_italy.loc[df_italy.Team == 'AC Chievo Verona', 'position'] = 13
df_italy.loc[df_italy.Team == 'Udinese Calcio', 'position'] = 14
df_italy.sort_values('position', inplace=True)
league_tables[3] = df_italy
```
# Calculate the average points and add to the df_match dataframe.
```
df_match['away_points_sim'] = (df_match.away_win * 3 + df_match.draw)
df_match['home_points_sim'] = (df_match.home_win * 3 + df_match.draw)
```
# Add the goal difference, e.g. the probability that the home team is -5 goals down to the df_match dataframe
```
# first get the goal difference in relation to the home team
df_goals_home = (df_simulated_goals[df_simulated_goals.home_or_away == 'home_shots']
.drop('home_or_away', axis=1)
.set_index('match_id'))
df_goals_away = (df_simulated_goals[df_simulated_goals.home_or_away == 'away_shots']
.drop('home_or_away', axis=1)
.set_index('match_id'))
```
# Simuate the league points
```
home_win = df_goals_home > df_goals_away
away_win = df_goals_away > df_goals_home
draw = df_goals_home == df_goals_away
# calculate home win points
home_win = home_win.merge(df_match[['match_id', 'home_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'home_team_name': 'team_name'}, axis=1)
points_home = home_win.groupby(['team_name', 'competition_name'])[sim_columns].sum() * 3
# calculate away win points
away_win = away_win.merge(df_match[['match_id', 'away_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'away_team_name': 'team_name'}, axis=1)
points_away = away_win.groupby(['team_name', 'competition_name'])[sim_columns].sum() * 3
# calculate draw points for home team
draw1 = draw.merge(df_match[['match_id', 'home_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'home_team_name': 'team_name'}, axis=1)
points_draw1 = draw1.groupby(['team_name', 'competition_name'])[sim_columns].sum()
# calculate draw points for away team
draw2 = draw.merge(df_match[['match_id', 'away_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'away_team_name': 'team_name'}, axis=1)
points_draw2 = draw2.groupby(['team_name', 'competition_name'])[sim_columns].sum()
# add together for all points for each team
all_points = points_home + points_away + points_draw1 + points_draw2
all_points.reset_index(inplace=True)
# simulate how many points each team gets
league_sims_points = []
for i, league in enumerate(['Premier League', 'Ligue 1', 'Bundesliga', 'Serie A', 'La Liga']):
df_league_actual = league_tables[i]
df_league_points = all_points[all_points.competition_name == league].drop('competition_name', axis=1)
df_league_points = df_league_points.merge(df_league_actual[['Team', 'position']],
left_on='team_name', right_on='Team', how='left')
df_league_points.sort_values('position', inplace=True)
df_league_points.drop(['Team', 'position'], axis=1, inplace=True)
df_league_points.set_index('team_name', inplace=True)
league_sims_points.append(df_league_points)
# simulate the league position probabiliies
with pd.ExcelWriter(os.path.join('..', 'figures', '31-35_simulated_league_table.xlsx')) as writer:
sim_position = []
for i, competition in enumerate(league_sims_points):
df_pos = competition.rank(axis=0, ascending=False, method='first').T
df_pos = df_pos.apply(pd.Series.value_counts)
df_pos.fillna(0, inplace=True)
df_pos = np.round(df_pos / 100, 0).astype(np.int32)
df_pos.index.name = 'position'
df_pos = df_pos.T
df_pos.columns = list(range(1, len(df_pos.columns) + 1))
df_pos.index.name = ''
df_pos = df_pos.style.background_gradient(cmap='viridis')
name = ['premier_league', 'ligue_1', 'bundesliga', 'serie_a', 'la_liga'][i]
df_pos.to_excel(writer, sheet_name=name)
sim_position.append(df_pos)
sim_position[0]
sim_position[1]
sim_position[2]
sim_position[3]
sim_position[4]
```
|
github_jupyter
|
import pandas as pd
import numpy as np
import os
from mplsoccer.pitch import Pitch
df_events_wyscout = pd.read_parquet(os.path.join('..', 'data', 'wyscout', 'event.parquet'))
df_events_statsbomb = pd.read_parquet(os.path.join('..', 'data', 'statsbomb', 'event.parquet'))
df_match_wyscout = pd.read_parquet(os.path.join('..', 'data', 'wyscout', 'match.parquet'))
df_match_wyscout = df_match_wyscout[df_match_wyscout.competition_name.isin(['Premier League', 'Ligue 1',
'Bundesliga', 'Serie A', 'La Liga'])].copy()
df_match_wyscout_overlap = pd.read_parquet(os.path.join('..', 'data', 'wyscout', 'match_overlap.parquet'))
df_match_wyscout_overlap = df_match_wyscout_overlap[df_match_wyscout_overlap.competition_name == 'La Liga'].copy()
df_match = pd.concat([df_match_wyscout, df_match_wyscout_overlap])
df_match_statsbomb = pd.read_parquet(os.path.join('..', 'data', 'statsbomb', 'match.parquet'))
df_match_statsbomb = df_match_statsbomb[(df_match_statsbomb.competition_name == 'La Liga') &
(df_match_statsbomb.season_name == '2017/2018')].copy()
cols = ['match_id', 'home_team_id', 'home_team_name', 'away_team_id', 'away_team_name']
df_overlap_ids = df_match_wyscout_overlap[cols].merge(df_match_statsbomb[cols],
on=['home_team_name', 'away_team_name'], suffixes=['_wyscout', ''])
df_home = df_overlap_ids[['home_team_id_wyscout', 'home_team_id']].copy()
df_away = df_overlap_ids[['away_team_id_wyscout', 'away_team_id']].copy()
df_home.rename({'home_team_id_wyscout': 'team_id_wyscout', 'home_team_id': 'team_id'}, axis=1, inplace=True)
df_away.rename({'away_team_id_wyscout': 'team_id_wyscout', 'away_team_id': 'team_id'}, axis=1, inplace=True)
df_team_overlap = pd.concat([df_home, df_away])
df_team_overlap.drop_duplicates(inplace=True)
df_match_overlap = df_overlap_ids[['match_id', 'match_id_wyscout']]
df_events_statsbomb['goal'] = df_events_statsbomb.outcome_name == 'Goal'
mask_match = df_events_wyscout.match_id.isin(df_match_wyscout.match_id)
mask_shot = df_events_wyscout.subEventName.isin(['Penalty', 'Free kick shot', 'Shot'])
cols = ['match_id', 'id', 'subEventName', 'matchPeriod', 'eventSec', 'team_id', 'x', 'y', 'goal']
df_events_wyscout = df_events_wyscout.loc[mask_match & mask_shot, cols].copy()
mask_match = df_events_statsbomb.match_id.isin(df_match_statsbomb.match_id)
mask_shot = df_events_statsbomb.type_name == 'Shot'
cols = ['match_id', 'id', 'period', 'timestamp_minute', 'timestamp_second', 'timestamp_millisecond',
'team_id', 'x', 'y', 'shot_statsbomb_xg', 'goal']
df_events_statsbomb = df_events_statsbomb.loc[mask_match & mask_shot, cols].copy()
df_events_statsbomb = df_events_statsbomb.merge(df_match_overlap)
df_events_statsbomb = df_events_statsbomb.merge(df_team_overlap)
df_events_statsbomb.drop('match_id', axis=1, inplace=True)
df_events_statsbomb.drop('team_id', axis=1, inplace=True)
df_events_statsbomb = df_events_statsbomb.rename({'match_id_wyscout': 'match_id', 'team_id_wyscout': 'team_id'}, axis=1)
df_xg = pd.read_parquet(os.path.join('..', 'data', 'modelling', 'xg_shap.parquet'))
df_xg = df_xg.loc[df_xg.wyscout_id.notnull(), ['match_id', 'wyscout_id', 'xg']].rename({'wyscout_id': 'id'}, axis=1)
df_events_wyscout = df_events_wyscout.merge(df_xg, on=['match_id', 'id'], how='left')
df_events_wyscout.loc[df_events_wyscout.subEventName == 'Penalty', 'xg'] = 0.76
pitch = Pitch(pitch_type='wyscout')
fig, ax = pitch.draw()
mask_no_xg = df_events_wyscout.xg.isnull()
pitch.scatter(df_events_wyscout[mask_no_xg].x, df_events_wyscout[mask_no_xg].y, ax=ax)
df_events_wyscout.loc[df_events_wyscout.xg.isnull(), 'xg'] = 0.015
df_events_statsbomb['eventSec'] = (df_events_statsbomb.timestamp_minute * 60 +
df_events_statsbomb.timestamp_second +
df_events_statsbomb.timestamp_millisecond / 1000.)
df_events_statsbomb.rename({'shot_statsbomb_xg': 'xg'}, axis=1, inplace=True)
df_events_statsbomb.drop(['timestamp_minute', 'timestamp_second', 'timestamp_millisecond'], axis=1, inplace=True)
df_events_wyscout.rename({'matchPeriod': 'period'}, axis=1, inplace=True)
df_events_wyscout['period'] = df_events_wyscout.period.map({'1H': 1, '2H': 2})
df_events_wyscout['data'] = 'wyscout'
df_events_statsbomb['data'] = 'statsbomb'
df_events = pd.concat([df_events_wyscout, df_events_statsbomb])
df_events.sort_values(['match_id', 'period', 'eventSec'], inplace=True)
# first merge on the the home_team_id for each match from the df_match dataframe
df_events = df_events.merge(df_match[['match_id','home_team_id']], on='match_id', validate='m:1', how='left')
# then create a home_or_away column by comparing the home_team_id with the team_id
mask_home = df_events.team_id == df_events.home_team_id
df_events['home_or_away'] = np.where(mask_home,'home_shots', 'away_shots')
df_events.drop('home_team_id', axis=1, inplace=True)
df_events.head()
# group on match id and show matches wher there is only one team with a shot event
num_teams_per_match = df_events.groupby('match_id').team_id.nunique()
num_teams_per_match[num_teams_per_match==1].index.tolist()
len(df_events)
# first we merge the number of shots for each team on to the df_match dataframe
df_shots_per_team = (df_events.groupby(['match_id', 'home_or_away']).team_id.count()
.reset_index()
.pivot(index='match_id', columns='home_or_away', values='team_id')
.reset_index()
.fillna(0))
df_match = df_match.merge(df_shots_per_team, on='match_id', how='left', validate='1:1')
# we create a fake shot record for each match where the away team has no shots
df_away_missing = df_match.loc[df_match.away_shots==0, ['match_id', 'away_team_id']].copy()
df_away_missing['xg'] = 0.0
df_away_missing['home_or_away'] = 'away_shots'
df_away_missing.rename({'away_team_id':'team_id'}, axis=1, inplace=True)
# we also create a fake shot record where the home team has no shots
df_home_missing = df_match.loc[df_match.home_shots==0, ['match_id', 'home_team_id']].copy()
df_home_missing['xg'] = 0.0
df_home_missing['home_or_away'] = 'home_shots'
df_home_missing.rename({'home_team_id': 'team_id'},axis=1,inplace=True)
# combine the missing shot information and add one to the max possession to make it unique
df_missing_shots = pd.concat([df_home_missing, df_away_missing], axis=0)
# putting in an eventSec/ period (it doesn't matter what this is as the xG is zero)
df_missing_shots['eventSec'] = 0
df_missing_shots['period'] = 1
# add the fake shot data to the event dataframe and sort it into the right order
df_events = pd.concat([df_missing_shots, df_events])
df_events.sort_values(['match_id', 'eventSec'], inplace=True)
len(df_events)
df_events['eventSec_shift'] = df_events.groupby(['match_id', 'period']).eventSec.shift(1)
df_events['new_shot_sequence'] = (((df_events['eventSec'] - df_events['eventSec_shift']) > 15) |
(df_events.match_id != df_events.match_id.shift(1)) |
(df_events.period != df_events.period.shift(1)))
df_events['possession'] = df_events.new_shot_sequence.cumsum()
df_events.drop(['eventSec_shift', 'new_shot_sequence'], axis=1, inplace=True)
np.random.seed(42)
n_sims = 10000
sim_columns = np.arange(n_sims)
num_teams_per_possession_sequence = df_events.groupby('possession').team_id.nunique()
num_teams_per_possession_sequence[num_teams_per_possession_sequence>1].index
n_shots = len(df_events)
simulated_probabilities = np.random.uniform(size=(n_shots, n_sims))
simulated_goals = np.where(simulated_probabilities > df_events.xg.values.reshape(-1, 1), 0, 1)
simulated_goals.shape
df_simulated_goals = pd.concat([df_events[['possession']],
pd.DataFrame(simulated_goals, index=df_events.index)],
axis=1)
# here we are setting the first goal in the possession to 1 and the rest of the goals in the possession to 0
df_simulated_goals = df_simulated_goals.groupby('possession').cumsum()
df_simulated_goals = pd.DataFrame(np.where(df_simulated_goals > 1, 0, df_simulated_goals), index=df_events.index)
df_simulated_goals.shape
# we add on the match id and home_or_away flag and sum the goals in the game for each team (home_shots/ away_shots)
df_simulated_goals = pd.concat([df_events[['match_id', 'home_or_away']], df_simulated_goals],axis=1)
df_simulated_goals = df_simulated_goals.groupby(['match_id', 'home_or_away'])[sim_columns].sum()
df_simulated_goals.reset_index(inplace=True)
df_simulated_goals.shape
# create some columns used to create the table stats: goal difference, win, lose, draw
df_match['home_goal_diff'] = df_match['home_score'] - df_match['away_score']
df_match['home_win'] = df_match.home_score > df_match.away_score
df_match['home_lose'] = df_match.home_score < df_match.away_score
df_match['away_goal_diff'] = df_match['away_score'] - df_match['home_score']
df_match['away_win'] = df_match.away_score > df_match.home_score
df_match['away_lose'] = df_match.away_score < df_match.home_score
df_match['draw'] = df_match.away_score == df_match.home_score
mask_home_win = df_match.home_score > df_match.away_score
mask_away_win = df_match.away_score > df_match.home_score
mask_draw = df_match.away_score == df_match.home_score
df_match.loc[mask_home_win,'home_points'] = 3
df_match.loc[mask_home_win,'away_points'] = 0
df_match.loc[mask_away_win,'away_points'] = 3
df_match.loc[mask_away_win,'home_points'] = 0
df_match.loc[mask_draw,'home_points'] = 1
df_match.loc[mask_draw,'away_points'] = 1
league_tables = []
for competition in ['Premier League', 'Ligue 1', 'Bundesliga', 'Serie A', 'La Liga']:
match_ids = df_match[(df_match.competition_name==competition)].match_id
# create a dataframe of away results
away_cols = ['away_team_name','away_points','away_goal_diff','away_win','draw',
'away_lose','away_score','home_score']
away_rename = {'away_points':'points','away_team_name':'team_name',
'away_goal_diff':'goal_difference','away_win':'win','away_lose':'lose',
'away_score':'goals_for','home_score':'goals_against'}
df_away = df_match.loc[df_match.match_id.isin(match_ids), away_cols]
# create a dataframe of home results
home_cols = ['home_team_name','home_points','home_goal_diff','home_win','draw','home_lose',
'home_score','away_score']
home_rename = {'home_points':'points','home_team_name':'team_name',
'home_goal_diff':'goal_difference','home_win':'win','home_lose':'lose',
'home_score':'goals_for','away_score':'goals_against'}
df_home = df_match.loc[df_match.match_id.isin(match_ids),home_cols]
# combine to get the whole league results
df_league = pd.concat([df_away.rename(away_rename,axis=1),
df_home.rename(home_rename,axis=1)])
# add the number of games played to the team stats to create a df_table dataframe
stats_cols = ['win','draw','lose','goals_for','goals_against','goal_difference','points']
df_table = (df_league.groupby('team_name')[stats_cols].sum())
df_played = df_league.team_name.value_counts()
df_played.name = 'played'
df_table = pd.concat([df_played, df_table],axis=1,sort=False)
# sort by the criteria for winning league, note there is one final criteria head-for-head not implemented here
df_table.sort_values(['points','goal_difference','goals_for','win'],ascending=False,inplace=True)
# index as team rank rather than team name
df_table.reset_index(inplace=True)
df_table.index = df_table.index+1
# set columns to easy names
df_table.columns = ['Team','Played','Won','Drawn','Lost','For','Against','Goal Difference','Points']
# columns to integers (some are float)
df_table[df_table.columns[1:]] = df_table[df_table.columns[1:]].astype(int)
# add position to dataframe
df_table.index.name = 'position'
df_table.reset_index(inplace=True)
league_tables.append(df_table)
df_spain = league_tables[4]
df_spain.loc[df_spain.Team == 'Espanyol', 'position'] = 11
df_spain.loc[df_spain.Team == 'Real Sociedad', 'position'] = 12
df_spain.loc[df_spain.Team == 'Celta Vigo', 'position'] = 13
df_spain.sort_values('position', inplace=True)
league_tables[4] = df_spain
df_italy = league_tables[3]
df_italy.loc[df_italy.Team == 'FC Internazionale Milano', 'position'] = 4
df_italy.loc[df_italy.Team == 'SS Lazio', 'position'] = 5
df_italy.loc[df_italy.Team == 'AC Chievo Verona', 'position'] = 13
df_italy.loc[df_italy.Team == 'Udinese Calcio', 'position'] = 14
df_italy.sort_values('position', inplace=True)
league_tables[3] = df_italy
df_match['away_points_sim'] = (df_match.away_win * 3 + df_match.draw)
df_match['home_points_sim'] = (df_match.home_win * 3 + df_match.draw)
# first get the goal difference in relation to the home team
df_goals_home = (df_simulated_goals[df_simulated_goals.home_or_away == 'home_shots']
.drop('home_or_away', axis=1)
.set_index('match_id'))
df_goals_away = (df_simulated_goals[df_simulated_goals.home_or_away == 'away_shots']
.drop('home_or_away', axis=1)
.set_index('match_id'))
home_win = df_goals_home > df_goals_away
away_win = df_goals_away > df_goals_home
draw = df_goals_home == df_goals_away
# calculate home win points
home_win = home_win.merge(df_match[['match_id', 'home_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'home_team_name': 'team_name'}, axis=1)
points_home = home_win.groupby(['team_name', 'competition_name'])[sim_columns].sum() * 3
# calculate away win points
away_win = away_win.merge(df_match[['match_id', 'away_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'away_team_name': 'team_name'}, axis=1)
points_away = away_win.groupby(['team_name', 'competition_name'])[sim_columns].sum() * 3
# calculate draw points for home team
draw1 = draw.merge(df_match[['match_id', 'home_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'home_team_name': 'team_name'}, axis=1)
points_draw1 = draw1.groupby(['team_name', 'competition_name'])[sim_columns].sum()
# calculate draw points for away team
draw2 = draw.merge(df_match[['match_id', 'away_team_name', 'competition_name']],
left_index=True, right_on='match_id').rename({'away_team_name': 'team_name'}, axis=1)
points_draw2 = draw2.groupby(['team_name', 'competition_name'])[sim_columns].sum()
# add together for all points for each team
all_points = points_home + points_away + points_draw1 + points_draw2
all_points.reset_index(inplace=True)
# simulate how many points each team gets
league_sims_points = []
for i, league in enumerate(['Premier League', 'Ligue 1', 'Bundesliga', 'Serie A', 'La Liga']):
df_league_actual = league_tables[i]
df_league_points = all_points[all_points.competition_name == league].drop('competition_name', axis=1)
df_league_points = df_league_points.merge(df_league_actual[['Team', 'position']],
left_on='team_name', right_on='Team', how='left')
df_league_points.sort_values('position', inplace=True)
df_league_points.drop(['Team', 'position'], axis=1, inplace=True)
df_league_points.set_index('team_name', inplace=True)
league_sims_points.append(df_league_points)
# simulate the league position probabiliies
with pd.ExcelWriter(os.path.join('..', 'figures', '31-35_simulated_league_table.xlsx')) as writer:
sim_position = []
for i, competition in enumerate(league_sims_points):
df_pos = competition.rank(axis=0, ascending=False, method='first').T
df_pos = df_pos.apply(pd.Series.value_counts)
df_pos.fillna(0, inplace=True)
df_pos = np.round(df_pos / 100, 0).astype(np.int32)
df_pos.index.name = 'position'
df_pos = df_pos.T
df_pos.columns = list(range(1, len(df_pos.columns) + 1))
df_pos.index.name = ''
df_pos = df_pos.style.background_gradient(cmap='viridis')
name = ['premier_league', 'ligue_1', 'bundesliga', 'serie_a', 'la_liga'][i]
df_pos.to_excel(writer, sheet_name=name)
sim_position.append(df_pos)
sim_position[0]
sim_position[1]
sim_position[2]
sim_position[3]
sim_position[4]
| 0.281603 | 0.744935 |
```
from hist import Hist, axis, NamedHist
import boost_histogram as bh
h = Hist(
axis.Regular(10, 0, 1)
)
h.fill([1,2,3])
h2 = h + h
h.view(flow=True)
h2.view(flow=True)
```
# NamedHist is a type of Histogram that requires axes with a name
### 1. Each axis should have a name
### 2. Filling should be done using keywords as axis names
### 3. Axis names could also be used for indexing
## Passing an axis without a name would result in an error
```
h_named = NamedHist(
axis.Regular(10, 0, 1)
)
h_named = NamedHist(
axis.Regular(10, 0, 1, name='x'),
axis.Regular(10, 0, 1, name='y')
)
```
## Filling can be done using axis names as keywords
```
h_named.fill(x=[0.45, 0.65, 0.75], y=[0.55, 0.45, 0.65])
h_named.view()
```
## Filling without using keywords results in an error
```
h_named.fill([0.45, 0.65, 0.75], [0.55, 0.45, 0.65])
```
## Axis names can be used for indexing
```
h_named = NamedHist(
axis.Regular(5, 0, 1, name='x'),
axis.Regular(5, 0, 1, name='y')
)
h_named.fill(x=[0.3, 0.5, 0.7], y=[0.1, 0.3, 0.5])
h_named.view()
```
### The following lines are equivalent
```
h2 = h_named[1:4, :]
h3 = h_named[{0 : slice(1, 4, None)}]
h4 = h_named[{'x': slice(1, 4, None)}]
h2.view()
h3.view()
h4.view()
```
### Further slicing can also be done using the same way
```
h5 = h4[{'y': slice(None, 3, None)}]
h5.view()
```
### The following lines are equivalent
```
h2 = h_named[:, ::bh.sum]
h3 = h_named[{1: slice(None, None, bh.sum)}]
h4 = h_named[{'y': slice(None, None, bh.sum)}]
h2.view()
h3.view()
h4.view()
```
## There is a bug that raises an error when using bh.loc() in a dict
```
hst = bh.Histogram(
axis.Regular(5, 0, 5),
axis.Regular(5, 0, 5)
)
hst.fill([1.5, 2.5], [0.5, 3.5])
hst.view()
```
### The following line will give an error
```
h2 = hst[{0: bh.loc(2.5)}]
```
## The BaseHist class has been implemented to fix the error
```
hst = Hist(
axis.Regular(5, 0, 5),
axis.Regular(5, 0, 5)
)
hst.fill([1.5, 2.5], [0.5, 3.5])
hst.view()
```
### No error is raised
```
h2 = hst[{0: bh.loc(2.5)}]
h2.view()
```
### The same could be achieved using NamedHist
```
hst = NamedHist(
axis.Regular(5, 0, 5, name='x'),
axis.Regular(5, 0, 5, name='y')
)
hst.fill(x=[1.5, 2.5], y=[0.5, 3.5])
h2 = hst[{'x': bh.loc(2.5)}]
h2.view()
```
# bool is a shortcut for an Integer axis with no underflow or overflow and two bins starting from zero
```
h = Hist(
axis.bool()
)
h.view()
h.fill([0, 0, 1])
h.view()
```
### There are no flow bins
```
h.view(flow=True)
```
### Using bool with NamedHist
```
h = NamedHist(
axis.Regular(5, 0, 5, name='x'),
axis.bool(name='valid')
)
h.view()
h.fill(x=[2, 3, 4], valid=[0, 0, 1])
h.view()
```
|
github_jupyter
|
from hist import Hist, axis, NamedHist
import boost_histogram as bh
h = Hist(
axis.Regular(10, 0, 1)
)
h.fill([1,2,3])
h2 = h + h
h.view(flow=True)
h2.view(flow=True)
h_named = NamedHist(
axis.Regular(10, 0, 1)
)
h_named = NamedHist(
axis.Regular(10, 0, 1, name='x'),
axis.Regular(10, 0, 1, name='y')
)
h_named.fill(x=[0.45, 0.65, 0.75], y=[0.55, 0.45, 0.65])
h_named.view()
h_named.fill([0.45, 0.65, 0.75], [0.55, 0.45, 0.65])
h_named = NamedHist(
axis.Regular(5, 0, 1, name='x'),
axis.Regular(5, 0, 1, name='y')
)
h_named.fill(x=[0.3, 0.5, 0.7], y=[0.1, 0.3, 0.5])
h_named.view()
h2 = h_named[1:4, :]
h3 = h_named[{0 : slice(1, 4, None)}]
h4 = h_named[{'x': slice(1, 4, None)}]
h2.view()
h3.view()
h4.view()
h5 = h4[{'y': slice(None, 3, None)}]
h5.view()
h2 = h_named[:, ::bh.sum]
h3 = h_named[{1: slice(None, None, bh.sum)}]
h4 = h_named[{'y': slice(None, None, bh.sum)}]
h2.view()
h3.view()
h4.view()
hst = bh.Histogram(
axis.Regular(5, 0, 5),
axis.Regular(5, 0, 5)
)
hst.fill([1.5, 2.5], [0.5, 3.5])
hst.view()
h2 = hst[{0: bh.loc(2.5)}]
hst = Hist(
axis.Regular(5, 0, 5),
axis.Regular(5, 0, 5)
)
hst.fill([1.5, 2.5], [0.5, 3.5])
hst.view()
h2 = hst[{0: bh.loc(2.5)}]
h2.view()
hst = NamedHist(
axis.Regular(5, 0, 5, name='x'),
axis.Regular(5, 0, 5, name='y')
)
hst.fill(x=[1.5, 2.5], y=[0.5, 3.5])
h2 = hst[{'x': bh.loc(2.5)}]
h2.view()
h = Hist(
axis.bool()
)
h.view()
h.fill([0, 0, 1])
h.view()
h.view(flow=True)
h = NamedHist(
axis.Regular(5, 0, 5, name='x'),
axis.bool(name='valid')
)
h.view()
h.fill(x=[2, 3, 4], valid=[0, 0, 1])
h.view()
| 0.532668 | 0.939803 |
# Part 4: Federated Learning with Model Averaging
**Recap:** In Part 2 of this tutorial, we trained a model using a very simple version of Federated Learning. This required each data owner to trust the model owner to be able to see their gradients.
**Description:** In this tutorial, we'll show how to use the advanced aggregation tools from Part 3 to allow the weights to be aggregated by a trusted \"secure worker\" before the final resulting model is sent back to the model owner (us).
In this way, only the secure worker can see whose weights came from whom. We might be able to tell which parts of the model changed, but we do NOT know which worker (bob or alice) made which change, which creates a layer of privacy.
Authors:
- Andrew Trask - Twitter: [@iamtrask](https://twitter.com/iamtrask)
- Jason Mancuso - Twitter: [@jvmancuso](https://twitter.com/jvmancuso)
```
import torch
import syft as sy
import copy
hook = sy.TorchHook(torch)
from torch import nn, optim
```
# Step 1: Create Data Owners
First, we're going to create two data owners (Bob and Alice) each with a small amount of data. We're also going to initialize a secure machine called "secure_worker". In practice this could be secure hardware (such as Intel's SGX) or simply a trusted intermediary.
```
# create a couple workers
bob = sy.VirtualWorker(hook, id="bob")
alice = sy.VirtualWorker(hook, id="alice")
secure_worker = sy.VirtualWorker(hook, id="secure_worker")
# A Toy Dataset
data = torch.tensor([[0,0],[0,1],[1,0],[1,1.]], requires_grad=True)
target = torch.tensor([[0],[0],[1],[1.]], requires_grad=True)
# get pointers to training data on each worker by
# sending some training data to bob and alice
bobs_data = data[0:2].send(bob)
bobs_target = target[0:2].send(bob)
alices_data = data[2:].send(alice)
alices_target = target[2:].send(alice)
```
# Step 2: Create Our Model
For this example, we're going to train with a simple Linear model. We can initialize it normally using PyTorch's nn.Linear constructor.
```
# Iniitalize A Toy Model
model = nn.Linear(2,1)
```
# Step 3: Send a Copy of the Model to Alice and Bob
Next, we need to send a copy of the current model to Alice and Bob so that they can perform steps of learning on their own datasets.
```
bobs_model = model.copy().send(bob)
alices_model = model.copy().send(alice)
bobs_opt = optim.SGD(params=bobs_model.parameters(),lr=0.1)
alices_opt = optim.SGD(params=alices_model.parameters(),lr=0.1)
```
# Step 4: Train Bob's and Alice's Models (in parallel)
As is conventional with Federated Learning via Secure Averaging, each data owner first trains their model for several iterations locally before the models are averaged together.
```
for i in range(10):
# Train Bob's Model
bobs_opt.zero_grad()
bobs_pred = bobs_model(bobs_data)
bobs_loss = ((bobs_pred - bobs_target)**2).sum()
bobs_loss.backward()
bobs_opt.step()
bobs_loss = bobs_loss.get().data
# Train Alice's Model
alices_opt.zero_grad()
alices_pred = alices_model(alices_data)
alices_loss = ((alices_pred - alices_target)**2).sum()
alices_loss.backward()
alices_opt.step()
alices_loss = alices_loss.get().data
print("Bob:" + str(bobs_loss) + " Alice:" + str(alices_loss))
```
# Step 5: Send Both Updated Models to a Secure Worker
Now that each data owner has a partially trained model, it's time to average them together in a secure way. We achieve this by instructing Alice and Bob to send their model to the secure (trusted) server.
Note that this use of our API means that each model is sent DIRECTLY to the secure_worker. We never see it.
```
alices_model.move(secure_worker)
bobs_model.move(secure_worker)
```
# Step 6: Average the Models
Finally, the last step for this training epoch is to average Bob and Alice's trained models together and then use this to set the values for our global "model".
```
with torch.no_grad():
model.weight.set_(((alices_model.weight.data + bobs_model.weight.data) / 2).get())
model.bias.set_(((alices_model.bias.data + bobs_model.bias.data) / 2).get())
```
# Rinse and Repeat
And now we just need to iterate this multiple times!
```
iterations = 10
worker_iters = 5
for a_iter in range(iterations):
bobs_model = model.copy().send(bob)
alices_model = model.copy().send(alice)
bobs_opt = optim.SGD(params=bobs_model.parameters(),lr=0.1)
alices_opt = optim.SGD(params=alices_model.parameters(),lr=0.1)
for wi in range(worker_iters):
# Train Bob's Model
bobs_opt.zero_grad()
bobs_pred = bobs_model(bobs_data)
bobs_loss = ((bobs_pred - bobs_target)**2).sum()
bobs_loss.backward()
bobs_opt.step()
bobs_loss = bobs_loss.get().data
# Train Alice's Model
alices_opt.zero_grad()
alices_pred = alices_model(alices_data)
alices_loss = ((alices_pred - alices_target)**2).sum()
alices_loss.backward()
alices_opt.step()
alices_loss = alices_loss.get().data
alices_model.move(secure_worker)
bobs_model.move(secure_worker)
with torch.no_grad():
model.weight.set_(((alices_model.weight.data + bobs_model.weight.data) / 2).get())
model.bias.set_(((alices_model.bias.data + bobs_model.bias.data) / 2).get())
print("Bob:" + str(bobs_loss) + " Alice:" + str(alices_loss))
```
Lastly, we want to make sure that our resulting model learned correctly, so we'll evaluate it on a test dataset. In this toy problem, we'll use the original data, but in practice we'll want to use new data to understand how well the model generalizes to unseen examples.
```
preds = model(data)
loss = ((preds - target) ** 2).sum()
print(preds)
print(target)
print(loss.data)
```
In this toy example, the averaged model is underfitting relative to a plaintext model trained locally would behave, however we were able to train it without exposing each worker's training data. We were also able to aggregate the updated models from each worker on a trusted aggregator to prevent data leakage to the model owner.
In a future tutorial, we'll aim to do our trusted aggregation directly with the gradients, so that we can update the model with better gradient estimates and arrive at a stronger model.
# Congratulations!!! - Time to Join the Community!
Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!
### Star PySyft on GitHub
The easiest way to help our community is just by starring the Repos! This helps raise awareness of the cool tools we're building.
- [Star PySyft](https://github.com/OpenMined/PySyft)
### Join our Slack!
The best way to keep up to date on the latest advancements is to join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org)
### Join a Code Project!
The best way to contribute to our community is to become a code contributor! At any time you can go to PySyft GitHub Issues page and filter for "Projects". This will show you all the top level Tickets giving an overview of what projects you can join! If you don't want to join a project, but you would like to do a bit of coding, you can also look for more "one off" mini-projects by searching for GitHub issues marked "good first issue".
- [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject)
- [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
### Donate
If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!
[OpenMined's Open Collective Page](https://opencollective.com/openmined)
|
github_jupyter
|
import torch
import syft as sy
import copy
hook = sy.TorchHook(torch)
from torch import nn, optim
# create a couple workers
bob = sy.VirtualWorker(hook, id="bob")
alice = sy.VirtualWorker(hook, id="alice")
secure_worker = sy.VirtualWorker(hook, id="secure_worker")
# A Toy Dataset
data = torch.tensor([[0,0],[0,1],[1,0],[1,1.]], requires_grad=True)
target = torch.tensor([[0],[0],[1],[1.]], requires_grad=True)
# get pointers to training data on each worker by
# sending some training data to bob and alice
bobs_data = data[0:2].send(bob)
bobs_target = target[0:2].send(bob)
alices_data = data[2:].send(alice)
alices_target = target[2:].send(alice)
# Iniitalize A Toy Model
model = nn.Linear(2,1)
bobs_model = model.copy().send(bob)
alices_model = model.copy().send(alice)
bobs_opt = optim.SGD(params=bobs_model.parameters(),lr=0.1)
alices_opt = optim.SGD(params=alices_model.parameters(),lr=0.1)
for i in range(10):
# Train Bob's Model
bobs_opt.zero_grad()
bobs_pred = bobs_model(bobs_data)
bobs_loss = ((bobs_pred - bobs_target)**2).sum()
bobs_loss.backward()
bobs_opt.step()
bobs_loss = bobs_loss.get().data
# Train Alice's Model
alices_opt.zero_grad()
alices_pred = alices_model(alices_data)
alices_loss = ((alices_pred - alices_target)**2).sum()
alices_loss.backward()
alices_opt.step()
alices_loss = alices_loss.get().data
print("Bob:" + str(bobs_loss) + " Alice:" + str(alices_loss))
alices_model.move(secure_worker)
bobs_model.move(secure_worker)
with torch.no_grad():
model.weight.set_(((alices_model.weight.data + bobs_model.weight.data) / 2).get())
model.bias.set_(((alices_model.bias.data + bobs_model.bias.data) / 2).get())
iterations = 10
worker_iters = 5
for a_iter in range(iterations):
bobs_model = model.copy().send(bob)
alices_model = model.copy().send(alice)
bobs_opt = optim.SGD(params=bobs_model.parameters(),lr=0.1)
alices_opt = optim.SGD(params=alices_model.parameters(),lr=0.1)
for wi in range(worker_iters):
# Train Bob's Model
bobs_opt.zero_grad()
bobs_pred = bobs_model(bobs_data)
bobs_loss = ((bobs_pred - bobs_target)**2).sum()
bobs_loss.backward()
bobs_opt.step()
bobs_loss = bobs_loss.get().data
# Train Alice's Model
alices_opt.zero_grad()
alices_pred = alices_model(alices_data)
alices_loss = ((alices_pred - alices_target)**2).sum()
alices_loss.backward()
alices_opt.step()
alices_loss = alices_loss.get().data
alices_model.move(secure_worker)
bobs_model.move(secure_worker)
with torch.no_grad():
model.weight.set_(((alices_model.weight.data + bobs_model.weight.data) / 2).get())
model.bias.set_(((alices_model.bias.data + bobs_model.bias.data) / 2).get())
print("Bob:" + str(bobs_loss) + " Alice:" + str(alices_loss))
preds = model(data)
loss = ((preds - target) ** 2).sum()
print(preds)
print(target)
print(loss.data)
| 0.743354 | 0.984605 |
<a href="https://colab.research.google.com/github/Penitto/risk_project1/blob/master/Iriski.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Выделенные риск-факторы
1. Фондовый риск — риск снижения цены акций; __индекс РТС и МОЕКС__
2. Процентный риск — риск изменения процентных ставок; __процентные ставки__
3. Валютный риск — риск изменения курсов валют; __курс доллара и курс юаня__
4. Товарный риск — риск изменения цен товаров; __цены на нефть__
# Стохастическая модель динамики
```
import numpy as np
import pandas as pd
import os
from scipy.interpolate import CubicSpline
class Portfolio():
def __init__(self, bonds, shares, cur, risk):
self.bonds = bonds
self.shares = shares
self.cur = cur
self.risk = risk
self.price = 0
# Посчитать цену портфеля в определённый день
def countPriceInDate(self, date):
res = 0
for i in self.bonds:
res += i[3] * i[1]['<CLOSE>'].loc[date]
for i in self.shares:
res += i[3] * i[1]['<CLOSE>'].loc[date]
# Посчитать объём портфеля в уе
def countInitialValue(self):
for i in range(len(self.bonds)):
amount = self.bonds[i][2] / self.bonds[i][1]['<CLOSE>'][0]
self.bonds[i] = (*self.bonds[i], amount)
for i in range(len(self.shares)):
amount = self.shares[i][2] / self.shares[i][1]['<CLOSE>'][0]
self.shares[i] = (*self.shares[i], amount)
for i in range(len(self.cur)):
amount = self.cur[i][2] / self.cur[i][1]['<CLOSE>'][0]
self.cur[i] = (*self.cur[i], amount)
# Загрузка данных в класс
shares = ['./shares/AFLT_160101_200101.csv',
'./shares/GAZP_160101_200101.csv',
'./shares/GMKN_160101_200101.csv',
'./shares/KMAZ_160101_200101.csv',
'./shares/LKOH_160101_200101.csv',
'./shares/PIKK_160101_200101.csv',
'./shares/MGNT_160101_200101.csv',
'./shares/RBCM_160101_200101.csv',
'./shares/ROSN_160101_200101.csv',
'./shares/SBER_160101_200101.csv']
shares_name = [i[9:13] for i in shares]
# Нужно интерполировать
bonds = ['./bonds/SU26212RMFS9_160101_200101.csv',
'./bonds/SU26205RMFS3_160101_200101.csv',
'./bonds/SU26207RMFS9_160101_200101.csv',
'./bonds/SU26209RMFS5_160101_200101.csv',
'./bonds/SU26211RMFS1_160101_200101.csv']
bonds_name = [i[8:20] for i in bonds]
currencies = ['./index/USD_RUB.csv', './index/CNY_RUB.csv']
currencies_name = [i[8:15] for i in currencies]
indexes = ['./index/ICE.BRN_160101_200101.csv',
'./index/IMOEX_160101_200101.csv',
'./index/RTSI_160101_200101.csv']
indexes_name = ['Brent', 'MOEX', 'RTSI']
zero_bond = './zerobond.csv'
days = 1010
risk_df = pd.read_csv(shares[0], index_col='<DATE>').drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1)
risk_df.index = pd.to_datetime(risk_df.index)
k = 1
for i in shares[1:]:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>': shares_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
k = 0
for i in bonds:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : bonds_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
k = 0
for i in currencies:
tmp = pd.read_csv(i, index_col='Date') \
.drop(['Open', 'High', 'Low', 'Change %'], axis=1) \
.rename(columns={'Price' : currencies_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
k = 0
for i in indexes:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : indexes_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
risk_df = risk_df.rename(columns={'<CLOSE>' : shares_name[0]})
zero_bond_df = pd.read_csv(zero_bond, sep=';', index_col='Date')
zero_bond_df.index = pd.to_datetime(zero_bond_df.index)
risk_df = risk_df.join(zero_bond_df, how='left')
risk_df = risk_df.fillna(risk_df.mean(axis=0))
tmp = risk_df.diff()
temp = risk_df.iloc[:-1,:]
temp.index = tmp.iloc[1:,:].index
tmp = tmp.iloc[1:,:] / temp
def get_data():
#Здесь - подгрузка и подготовка данных
shares = ['./shares/AFLT_160101_200101.csv',
'./shares/GAZP_160101_200101.csv',
'./shares/GMKN_160101_200101.csv',
'./shares/KMAZ_160101_200101.csv',
'./shares/LKOH_160101_200101.csv',
'./shares/PIKK_160101_200101.csv',
'./shares/MGNT_160101_200101.csv',
'./shares/RBCM_160101_200101.csv',
'./shares/ROSN_160101_200101.csv',
'./shares/SBER_160101_200101.csv']
shares_name = [i[9:13] for i in shares]
# Нужно интерполировать
bonds = ['./bonds/SU26212RMFS9_160101_200101.csv',
'./bonds/SU26205RMFS3_160101_200101.csv',
'./bonds/SU26207RMFS9_160101_200101.csv',
'./bonds/SU26209RMFS5_160101_200101.csv',
'./bonds/SU26211RMFS1_160101_200101.csv']
bonds_name = [i[8:20] for i in bonds]
currencies = ['./index/USD_RUB.csv', './index/CNY_RUB.csv']
currencies_name = [i[8:15] for i in currencies]
indexes = ['./index/IMOEX_160101_200101.csv',
'./index/RTSI_160101_200101.csv',
'./index/ICE.BRN_160101_200101.csv']
indexes_name = ['MOEX', 'RTSI', 'Brent']
zero_bond = './zerobond.csv'
act_instruments = pd.read_csv(shares[0], index_col='<DATE>').drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1)
act_instruments.index = pd.to_datetime(act_instruments.index)
k = 1
for i in shares[1:]:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>': shares_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_instruments = act_instruments.join(tmp, how='left')
k += 1
k = 0
for i in bonds:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : bonds_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_instruments = act_instruments.join(tmp, how='left')
k += 1
act_instruments = act_instruments.rename(columns={'<CLOSE>' : shares_name[0]})
act_instruments = act_instruments.fillna(act_instruments.mean(axis=0))
act_risks = pd.read_csv(indexes[0], index_col='<DATE>').drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1)
act_risks.index = pd.to_datetime(act_risks.index)
k = 1
for i in indexes[1:]:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : indexes_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_risks = act_risks.join(tmp, how='left')
k += 1
k = 0
for i in currencies:
tmp = pd.read_csv(i, index_col='Date') \
.drop(['Open', 'High', 'Low', 'Change %'], axis=1) \
.rename(columns={'Price' : currencies_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_risks = act_risks.join(tmp, how='left')
k += 1
zero_bond_df = pd.read_csv(zero_bond, sep=';', index_col='Date')
zero_bond_df.index = pd.to_datetime(zero_bond_df.index)
act_risks = act_risks.join(zero_bond_df, how='left')
act_risks = act_risks.fillna(act_risks.mean(axis=0))
tmp = act_risks.diff()
temp = act_risks.iloc[:-1,:]
temp.index = tmp.iloc[1:,:].index
r_risks = tmp.iloc[1:,:] / temp
tmp = act_instruments.diff()
temp = act_instruments.iloc[:-1,:]
temp.index = tmp.iloc[1:,:].index
r_instruments = tmp.iloc[1:,:] / temp
# act_risks = pd.DataFrame() # реальные значения риск-факторов
# act_instruments = pd.DataFrame() # реальные значения инструментов
# r_risks = pd.DataFrame() # Риск-факторы, посчитанные в арифметических процентах
# r_instruments = pd.DataFrame() # доходность инструментов, посчитанная в арифметических процентах
return r_risks, r_instruments, act_risks, act_instruments
get_data()
risk_df
risk_df.diff()
t = 1
vol = [(-1 + np.sqrt(1 + (1 / 1009) * np.sum((log_risk_df[log_risk_df.columns[i]] - t*np.mean(log_risk_df[log_risk_df.columns[i]]))**2))) / (t/2) for i in range(len(log_risk_df.columns))]
vol
def get_index():
moex_ind = pd.read_csv('./index/IMOEX_160101_200101.csv')
rts_ind = pd.read_csv('./index/RTSI_160101_200101.csv')
zerobond = pd.read_csv('./zerobond.csv', sep=';')
weeks_in_month = (365/7)*(1/12)
maturity_rub = [3,6,9,12,24,36,60,84,120,180,240,360,]
maturity_rub= [i * weeks_in_month for i in maturity_rub]
interpolate_rub = CubicSpline(maturity_rub, ds_rate_rub)
interval_rub = np.arange(0, 54, 2)
df_rub_int = pd.DataFrame(data={'maturity_rub_2weeks': interval_rub, 'rub_act': interpolate_rub(interval_rub)})
df_rub_usd_int=pd.concat([df_usd_int, df_rub_int], axis=1, sort=False)
df_rub_usd_int=df_rub_usd_int.drop(['maturity_rub_2weeks'], axis=1)
df_rub_usd_int=df_rub_usd_int.rename(columns={"maturity_usd_2weeks":"maturity"})
df_rub_usd_int['maturity_frac'] = df_rub_usd_int['maturity']/54
# s=0.0134
s=1/0.01442
new_rates = pd.concat(
[
df_rub_usd_int,
df_rub_usd_int.diff(1).rename(columns={x:x.replace('act','diff') for x in df_rub_usd_int.columns})
],
axis=1)
new_rates.fillna(0, inplace=True)
new_rates['fx_act']=(s*(1+new_rates['usd_act']*0.01)/(1+new_rates['rub_act']*0.01))
new_rates['fx_diff'] =new_rates['fx_act'].diff()
curve_rub_act = new_rates.loc[1:,'rub_act']
curve_usd_act = new_rates.loc[1:,'usd_act']
curve_fx_act = new_rates.loc[1:,'fx_act']
curve_rub_diff = new_rates.loc[1:,'rub_diff']
curve_usd_diff = new_rates.loc[1:,'usd_diff']
curve_fx_diff = new_rates.loc[1:,'fx_diff']
init = new_rates.loc[0,['rub_act','usd_act','fx_act']]
return (
curve_rub_act,
curve_usd_act,
curve_fx_act,
curve_rub_diff,
curve_usd_diff,
curve_fx_diff,
init)
def stoch_wrapper(decomp):
def make_stoch(num):
sigma=[0.03, 0.0093, 0.11]
stoch_generator = np.dot(np.random.normal(size=(num,3)),decomp)*sigma
return stoch_generator
return make_stoch
stoch_generator = stoch_wrapper(get_decomp())
def simulate_hull_white(
sim_number = 10,):
rub_alpha=0.03
sigma=[0.03, 0.0093, 0.11]
k_fx=0.015
dt=14/365
timesteps = 26
(
curve_rub,
curve_rub_df,
init
) = get_rates()
results = np.zeros(shape=(timesteps+1, 3, sim_number))
passed_time=0
for sim_ix in range(sim_number):
results[0, :, sim_ix] = init
stochs = stoch_generator(timesteps+1)
for i, (rate_rub, df_rub, stoch_tuple) in enumerate(zip(curve_rub,curve_rub_df, stochs)):
passed_time += dt
theta_rub = df_rub + rub_alpha * rate_rub + (sigma[0]**2) * (1 - np.exp(-2 * rub_alpha * passed_time)) / 2 * rub_alpha
results[i + 1, 0, sim_ix] = (theta_rub - rub_alpha* results[:, 0, sim_ix].sum()) * dt + stoch_tuple[0]
return results
```
# Оценка справедливой стоимости в зависимости от риск-факторов
# Оценка риска по портфелю
# Простая количественная валидация
|
github_jupyter
|
import numpy as np
import pandas as pd
import os
from scipy.interpolate import CubicSpline
class Portfolio():
def __init__(self, bonds, shares, cur, risk):
self.bonds = bonds
self.shares = shares
self.cur = cur
self.risk = risk
self.price = 0
# Посчитать цену портфеля в определённый день
def countPriceInDate(self, date):
res = 0
for i in self.bonds:
res += i[3] * i[1]['<CLOSE>'].loc[date]
for i in self.shares:
res += i[3] * i[1]['<CLOSE>'].loc[date]
# Посчитать объём портфеля в уе
def countInitialValue(self):
for i in range(len(self.bonds)):
amount = self.bonds[i][2] / self.bonds[i][1]['<CLOSE>'][0]
self.bonds[i] = (*self.bonds[i], amount)
for i in range(len(self.shares)):
amount = self.shares[i][2] / self.shares[i][1]['<CLOSE>'][0]
self.shares[i] = (*self.shares[i], amount)
for i in range(len(self.cur)):
amount = self.cur[i][2] / self.cur[i][1]['<CLOSE>'][0]
self.cur[i] = (*self.cur[i], amount)
# Загрузка данных в класс
shares = ['./shares/AFLT_160101_200101.csv',
'./shares/GAZP_160101_200101.csv',
'./shares/GMKN_160101_200101.csv',
'./shares/KMAZ_160101_200101.csv',
'./shares/LKOH_160101_200101.csv',
'./shares/PIKK_160101_200101.csv',
'./shares/MGNT_160101_200101.csv',
'./shares/RBCM_160101_200101.csv',
'./shares/ROSN_160101_200101.csv',
'./shares/SBER_160101_200101.csv']
shares_name = [i[9:13] for i in shares]
# Нужно интерполировать
bonds = ['./bonds/SU26212RMFS9_160101_200101.csv',
'./bonds/SU26205RMFS3_160101_200101.csv',
'./bonds/SU26207RMFS9_160101_200101.csv',
'./bonds/SU26209RMFS5_160101_200101.csv',
'./bonds/SU26211RMFS1_160101_200101.csv']
bonds_name = [i[8:20] for i in bonds]
currencies = ['./index/USD_RUB.csv', './index/CNY_RUB.csv']
currencies_name = [i[8:15] for i in currencies]
indexes = ['./index/ICE.BRN_160101_200101.csv',
'./index/IMOEX_160101_200101.csv',
'./index/RTSI_160101_200101.csv']
indexes_name = ['Brent', 'MOEX', 'RTSI']
zero_bond = './zerobond.csv'
days = 1010
risk_df = pd.read_csv(shares[0], index_col='<DATE>').drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1)
risk_df.index = pd.to_datetime(risk_df.index)
k = 1
for i in shares[1:]:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>': shares_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
k = 0
for i in bonds:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : bonds_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
k = 0
for i in currencies:
tmp = pd.read_csv(i, index_col='Date') \
.drop(['Open', 'High', 'Low', 'Change %'], axis=1) \
.rename(columns={'Price' : currencies_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
k = 0
for i in indexes:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : indexes_name[k]})
tmp.index = pd.to_datetime(tmp.index)
risk_df = risk_df.join(tmp, how='left')
k += 1
risk_df = risk_df.rename(columns={'<CLOSE>' : shares_name[0]})
zero_bond_df = pd.read_csv(zero_bond, sep=';', index_col='Date')
zero_bond_df.index = pd.to_datetime(zero_bond_df.index)
risk_df = risk_df.join(zero_bond_df, how='left')
risk_df = risk_df.fillna(risk_df.mean(axis=0))
tmp = risk_df.diff()
temp = risk_df.iloc[:-1,:]
temp.index = tmp.iloc[1:,:].index
tmp = tmp.iloc[1:,:] / temp
def get_data():
#Здесь - подгрузка и подготовка данных
shares = ['./shares/AFLT_160101_200101.csv',
'./shares/GAZP_160101_200101.csv',
'./shares/GMKN_160101_200101.csv',
'./shares/KMAZ_160101_200101.csv',
'./shares/LKOH_160101_200101.csv',
'./shares/PIKK_160101_200101.csv',
'./shares/MGNT_160101_200101.csv',
'./shares/RBCM_160101_200101.csv',
'./shares/ROSN_160101_200101.csv',
'./shares/SBER_160101_200101.csv']
shares_name = [i[9:13] for i in shares]
# Нужно интерполировать
bonds = ['./bonds/SU26212RMFS9_160101_200101.csv',
'./bonds/SU26205RMFS3_160101_200101.csv',
'./bonds/SU26207RMFS9_160101_200101.csv',
'./bonds/SU26209RMFS5_160101_200101.csv',
'./bonds/SU26211RMFS1_160101_200101.csv']
bonds_name = [i[8:20] for i in bonds]
currencies = ['./index/USD_RUB.csv', './index/CNY_RUB.csv']
currencies_name = [i[8:15] for i in currencies]
indexes = ['./index/IMOEX_160101_200101.csv',
'./index/RTSI_160101_200101.csv',
'./index/ICE.BRN_160101_200101.csv']
indexes_name = ['MOEX', 'RTSI', 'Brent']
zero_bond = './zerobond.csv'
act_instruments = pd.read_csv(shares[0], index_col='<DATE>').drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1)
act_instruments.index = pd.to_datetime(act_instruments.index)
k = 1
for i in shares[1:]:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>': shares_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_instruments = act_instruments.join(tmp, how='left')
k += 1
k = 0
for i in bonds:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : bonds_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_instruments = act_instruments.join(tmp, how='left')
k += 1
act_instruments = act_instruments.rename(columns={'<CLOSE>' : shares_name[0]})
act_instruments = act_instruments.fillna(act_instruments.mean(axis=0))
act_risks = pd.read_csv(indexes[0], index_col='<DATE>').drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1)
act_risks.index = pd.to_datetime(act_risks.index)
k = 1
for i in indexes[1:]:
tmp = pd.read_csv(i, index_col='<DATE>') \
.drop(['<TICKER>', '<PER>', '<TIME>', '<HIGH>', '<LOW>', '<VOL>', '<OPEN>'], axis=1) \
.rename(columns={'<CLOSE>' : indexes_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_risks = act_risks.join(tmp, how='left')
k += 1
k = 0
for i in currencies:
tmp = pd.read_csv(i, index_col='Date') \
.drop(['Open', 'High', 'Low', 'Change %'], axis=1) \
.rename(columns={'Price' : currencies_name[k]})
tmp.index = pd.to_datetime(tmp.index)
act_risks = act_risks.join(tmp, how='left')
k += 1
zero_bond_df = pd.read_csv(zero_bond, sep=';', index_col='Date')
zero_bond_df.index = pd.to_datetime(zero_bond_df.index)
act_risks = act_risks.join(zero_bond_df, how='left')
act_risks = act_risks.fillna(act_risks.mean(axis=0))
tmp = act_risks.diff()
temp = act_risks.iloc[:-1,:]
temp.index = tmp.iloc[1:,:].index
r_risks = tmp.iloc[1:,:] / temp
tmp = act_instruments.diff()
temp = act_instruments.iloc[:-1,:]
temp.index = tmp.iloc[1:,:].index
r_instruments = tmp.iloc[1:,:] / temp
# act_risks = pd.DataFrame() # реальные значения риск-факторов
# act_instruments = pd.DataFrame() # реальные значения инструментов
# r_risks = pd.DataFrame() # Риск-факторы, посчитанные в арифметических процентах
# r_instruments = pd.DataFrame() # доходность инструментов, посчитанная в арифметических процентах
return r_risks, r_instruments, act_risks, act_instruments
get_data()
risk_df
risk_df.diff()
t = 1
vol = [(-1 + np.sqrt(1 + (1 / 1009) * np.sum((log_risk_df[log_risk_df.columns[i]] - t*np.mean(log_risk_df[log_risk_df.columns[i]]))**2))) / (t/2) for i in range(len(log_risk_df.columns))]
vol
def get_index():
moex_ind = pd.read_csv('./index/IMOEX_160101_200101.csv')
rts_ind = pd.read_csv('./index/RTSI_160101_200101.csv')
zerobond = pd.read_csv('./zerobond.csv', sep=';')
weeks_in_month = (365/7)*(1/12)
maturity_rub = [3,6,9,12,24,36,60,84,120,180,240,360,]
maturity_rub= [i * weeks_in_month for i in maturity_rub]
interpolate_rub = CubicSpline(maturity_rub, ds_rate_rub)
interval_rub = np.arange(0, 54, 2)
df_rub_int = pd.DataFrame(data={'maturity_rub_2weeks': interval_rub, 'rub_act': interpolate_rub(interval_rub)})
df_rub_usd_int=pd.concat([df_usd_int, df_rub_int], axis=1, sort=False)
df_rub_usd_int=df_rub_usd_int.drop(['maturity_rub_2weeks'], axis=1)
df_rub_usd_int=df_rub_usd_int.rename(columns={"maturity_usd_2weeks":"maturity"})
df_rub_usd_int['maturity_frac'] = df_rub_usd_int['maturity']/54
# s=0.0134
s=1/0.01442
new_rates = pd.concat(
[
df_rub_usd_int,
df_rub_usd_int.diff(1).rename(columns={x:x.replace('act','diff') for x in df_rub_usd_int.columns})
],
axis=1)
new_rates.fillna(0, inplace=True)
new_rates['fx_act']=(s*(1+new_rates['usd_act']*0.01)/(1+new_rates['rub_act']*0.01))
new_rates['fx_diff'] =new_rates['fx_act'].diff()
curve_rub_act = new_rates.loc[1:,'rub_act']
curve_usd_act = new_rates.loc[1:,'usd_act']
curve_fx_act = new_rates.loc[1:,'fx_act']
curve_rub_diff = new_rates.loc[1:,'rub_diff']
curve_usd_diff = new_rates.loc[1:,'usd_diff']
curve_fx_diff = new_rates.loc[1:,'fx_diff']
init = new_rates.loc[0,['rub_act','usd_act','fx_act']]
return (
curve_rub_act,
curve_usd_act,
curve_fx_act,
curve_rub_diff,
curve_usd_diff,
curve_fx_diff,
init)
def stoch_wrapper(decomp):
def make_stoch(num):
sigma=[0.03, 0.0093, 0.11]
stoch_generator = np.dot(np.random.normal(size=(num,3)),decomp)*sigma
return stoch_generator
return make_stoch
stoch_generator = stoch_wrapper(get_decomp())
def simulate_hull_white(
sim_number = 10,):
rub_alpha=0.03
sigma=[0.03, 0.0093, 0.11]
k_fx=0.015
dt=14/365
timesteps = 26
(
curve_rub,
curve_rub_df,
init
) = get_rates()
results = np.zeros(shape=(timesteps+1, 3, sim_number))
passed_time=0
for sim_ix in range(sim_number):
results[0, :, sim_ix] = init
stochs = stoch_generator(timesteps+1)
for i, (rate_rub, df_rub, stoch_tuple) in enumerate(zip(curve_rub,curve_rub_df, stochs)):
passed_time += dt
theta_rub = df_rub + rub_alpha * rate_rub + (sigma[0]**2) * (1 - np.exp(-2 * rub_alpha * passed_time)) / 2 * rub_alpha
results[i + 1, 0, sim_ix] = (theta_rub - rub_alpha* results[:, 0, sim_ix].sum()) * dt + stoch_tuple[0]
return results
| 0.200949 | 0.76921 |
## Data types
The data type for a column in a `DataFrame` or a `Series` is known as the `dtype`.
You can use the `dtype` property to grab the type of a specific column:
```
import pandas as pd
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
pd.set_option('max_rows', 5)
reviews.price.dtype
```
Alternatively, the `dtypes` property returns the `dtype` of _every_ column in the dataset:
```
reviews.dtypes
```
Data types tell us something about how `pandas` is storing the data internally. `float64` means that it's using a 64-bit floating point number; `int64` means a similarly sized integer instead, and so on.
One peculiarity to keep in mind (and on display very clearly here) is that columns consisting entirely of strings do not get their own type; they are instead given the `object` type.
It's possible to convert a column of one type into another wherever such a conversion makes sense by using the `astype` function. For example, we may transform the `points` column from its existing `int64` data type into a `float64` data type:
```
reviews.points.astype('float64')
```
A `DataFrame` or `Series` index has its own `dtype`, too:
```
reviews.index.dtype
```
`pandas` also supports more exotic data types: categorical data and timeseries data. Because these data types are more rarely used, we will omit them until a much later section of this tutorial.
## Missing data
Entries missing values are given the value `NaN`, short for "Not a Number". For technical reasons these `NaN` values are always of the `float64` dtype.
`pandas` provides some methods specific to missing data. To select `NaN` entreis you can use `pd.isnull` (or its companion `pd.notnull`). This is meant to be used thusly:
```
reviews[reviews.country.isnull()]
```
Replacing missing values is a common operation. `pandas` provides a really handy method for this problem: `fillna`. `fillna` provides a few different strategies for mitigating such data. For example, we can simply replace each `NaN` with an `"Unknown"`:
```
reviews.region_2.fillna("Unknown")
```
Or we could fill each missing value with the first non-null value that appears sometime after the given record in the database. This is known as the backfill strategy:
`fillna` supports a few strategies for imputing missing values. For more on that read [the official function documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html).
Alternatively, we may have a non-null value that we would like to replace. For example, suppose that since this dataset was published, reviewer Kerin O'Keefe has changed her Twitter handle from `@kerinokeefe` to `@kerino`. One way to reflect this in the dataset is using the `replace` method:
```
reviews.taster_twitter_handle.replace("@kerinokeefe", "@kerino")
```
The `replace` method is worth mentioning here because it's handy for replacing missing data which is given some kind of sentinel value in the dataset: things like `"Unknown"`, `"Undisclosed"`, `"Invalid"`, and so on.
|
github_jupyter
|
import pandas as pd
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
pd.set_option('max_rows', 5)
reviews.price.dtype
reviews.dtypes
reviews.points.astype('float64')
reviews.index.dtype
reviews[reviews.country.isnull()]
reviews.region_2.fillna("Unknown")
reviews.taster_twitter_handle.replace("@kerinokeefe", "@kerino")
| 0.128771 | 0.992 |
#### Jupyter notebooks
This is a [Jupyter](http://jupyter.org/) notebook using Python. You can install Jupyter locally to edit and interact with this notebook.
# Differential Equations
We now consider the problem of computing $y(t)$ from an *ordinary differential equation* (ODE)
$$ y'(t) = f(t,y) $$
and initial condition $y(0)$.
In the following, $y$ may be a scalar or vector value.
For convenience of notation, we may suppress the time dependence by writing $y$ instead of $y(t)$.
### Second Order Equations
A second order system such as
$$ y'' = f(t, y, y') $$
may always be converted to a first order system by introducing a new variable
$$ \begin{bmatrix} y_0 \\ y_1 \end{bmatrix}' = \begin{bmatrix} y_1 \\ f(t, y_0, y_1) \end{bmatrix} . $$
Therefore, without loss of generality, we will focus on first order systems.
### Implicit formulations
We have chosen the explicit representation $y' = f(t,y)$, but it is more general to write $h(t,y,y') = 0$.
If $\partial h/\partial y'$ is singular, then this describes a *differential algebraic equation* (DAE). DAE are more challenging to solve and beyond the scope of this course.
## Linear Equations
If $f(y,t)$ is a linear function of $y$ then we have a linear ODE
$$ y' = A(t) y + \text{source}(t) . $$
If $A(t)$ is independent of $t$ and $\text{source}(t) = 0$ then we have a linear, constant, autonomous ODE.
When $y$ is a scalar then $A = a$ is a scalar and the solution is
$$ y = y(0) e^{at} . $$
#### Question
What qualitative dynamics does this imply for
* $a > 0$?
* $a < 0$?
* $a$ imaginary?
#### Question
* What if $A$ is diagonal?
* What if $A = X \Lambda X^{-1}$?
#### Matrix exponential
The general solution can be written in terms of the matrix exponential.
$$ y(t) = e^{At} y(0) . $$
The matrix exponential is defined by its Taylor series
$$ e^A = 1 + A + \frac{A^2}{2} + \frac{A^3}{3!} + \dotsb $$
and there are many [practical ways to compute it](http://www.cs.cornell.edu/cv/ResearchPDF/19ways+.pdf).
#### Question
Suppose that the diagonalization $A = X \Lambda X^{-1}$ exists and derive a finite expression for the matrix exponential using the scalar `exp` function.
## Forward Euler Method
The simplest method for solving $y'(t) = f(t,y)$ is
to use numerical differentiation to write
$$ y' \approx \frac{y(h) - y(0)}{h} $$
which yields the solution estimate
$$ \tilde y(h) = y(0) + h f(0, y(0)) $$
where $h$ is the step size.
Let's try this on a scalar problem
$$ y' = -k (y - \cos t) $$
where $k$ is a parameter controlling the rate at which the solution $u(t)$ is pulled toward the curve $\cos t$.
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def ode_euler(f, y0, tfinal=1, h=0.1):
y = np.array(y0)
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
tnext = min(t+h, tfinal)
h = tnext - t
y += h * f(t, y)
t = tnext
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
tests = []
class cosine:
def __init__(self, k=5):
self.k = k
def __repr__(self):
return 'cosine(k={:d})'.format(self.k)
def f(self, t, y):
return -self.k * (y - np.cos(t))
def y(self, t, y0):
k2p1 = self.k**2+1
return (y0 - self.k**2/k2p1) * np.exp(-self.k*t) + self.k*(np.sin(t) + self.k*np.cos(t))/k2p1
tests.append(cosine(k=2))
tests.append(cosine(k=1000))
y0 = np.array([.2])
for test in tests:
thist, yhist = ode_euler(test.f, y0, h=.2, tfinal=20)
plt.plot(thist, yhist, '.', label=repr(test)+' Forward Euler')
plt.plot(thist, test.y(thist, y0), label=repr(test)+' exact')
plt.plot(thist, np.cos(thist), label='cos')
plt.legend(loc='upper right');
```
#### Question
* What happens when $h$ is increased?
* What if $k$ is increased?
* What if the final time is increased?
#### Example: Linear system
Now we consider linear systems
$$ y' = A y $$
which have an exact solution $y(t) = e^{At} y(0)$ in terms of the matrix exponential.
```
def expm(A):
"""Compute the matrix exponential"""
L, X = np.linalg.eig(A)
return X @ np.diag(np.exp(L)) @ np.linalg.inv(X)
class linear:
def __init__(self, A):
self.A = A.copy()
def f(self, t, y):
return self.A @ y
def y(self, t, y0):
return [np.real_if_close(expm(self.A*s) @ y0) for s in t]
test = linear(np.array([[0, 1],
[-1, 0]]))
y0 = np.array([.5, 0])
thist, yhist = ode_euler(test.f, y0, h=.1, tfinal=10)
plt.figure()
plt.plot(thist, yhist, '.', label='Euler')
plt.plot(thist, test.y(thist, y0), label='exact')
plt.legend(loc='upper right');
```
#### Questions
* Does shrinking $h$ make this more accurate?
* What if `tfinal` is extended?
* What are the eigenvalues of $A$?
#### Runge-Kutta 4
Let's try a different method. (We'll look back at where this comes from later.)
```
def ode_rk4(f, y0, tfinal=1, h=0.1):
y = np.array(y0)
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
h = min(h, tfinal - t)
k1 = f(t, y)
k2 = f(t+h/2, y + k1*h/2)
k3 = f(t+h/2, y + k2*h/2)
k4 = f(t+h, y + k3*h)
y += h/6 * (k1 + 2*k2 + 2*k3 + k4)
t += h
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
thist, yhist = ode_rk4(test.f, y0, h=.5, tfinal=50)
plt.figure()
plt.plot(thist, yhist, '.', label=repr(test)+' RK4')
plt.plot(thist, test.y(thist, y0), label=repr(test)+' exact');
```
### Linear Stability Analysis
Why did Euler diverge (even if slowly) while RK4 solved this problem accurately?
And why do both methods diverge if the step size is too large?
We can understand the convergence of methods by analyzing the test problem
$$ y' = \lambda y $$
for different values of $\lambda$ in the complex plane.
One step of the Euler method with step size $h$ maps
$$ y \to y + h \lambda y = \underbrace{(1 + h \lambda)}_{R(h \lambda)} y $$
where we have introduced the complex-valued function $R(z)$.
* When does this map cause solutions to "blow up" and when is it stable?
```
def plot_stability(x, y, Rz, label):
plt.figure()
levels = np.linspace(0, 2, 21)
C = plt.contourf(xx, yy, np.abs(Rz), levels, cmap=plt.cm.coolwarm)
cbar = plt.colorbar(C, ticks=np.linspace(0, 2, 5))
plt.axvline(x=0, linewidth=1, color='grey')
plt.axhline(y=0, linewidth=1, color='grey')
plt.contour(xx, yy, np.abs(Rz), [.5, 1, 1.5], colors='k')
plt.title(label)
x = np.linspace(-2,2)
xx, yy = np.meshgrid(x, x)
zz = xx + 1j*yy
R = 1 + zz
plot_stability(xx, yy, R, 'Forward Euler')
```
Evidently the forward Euler method is stable if $z = h\lambda$ is in the unit circle centered at $z=-1$, but not stable otherwise.
#### Implicit methods
Recall that forward Euler is the step
$$ \tilde y(h) = y(0) + h f(0, y(0)) . $$
This can be evaluated **explicitly**; all the terms on the right hand side are known so the approximation $\tilde y(h)$ is computed merely by evaluating the right hand side.
Let's consider an alternative, **backward Euler** (or "implicit Euler"),
$$ \tilde y(h) = y(0) + h f(h, \tilde y(h)) . $$
This is a (generally) nonlinear equation for $\tilde y(h)$.
For the test equation $y' = \lambda y$, the backward Euler method is
$$ \tilde y(h) = y(0) + h \lambda \tilde y(h) $$
or
$$ \tilde y(h) = \underbrace{\frac{1}{1 - h \lambda}}_{R(h\lambda)} y(0) . $$
```
plot_stability(xx, yy, 1/(1-zz), 'Backward Euler')
```
Evidently backward Euler is stable in the entire left half plane (where the exact solution is also stable) and also in some significant portions of the right half plane. Let's test it on the oscillator problem.
```
def solve_newtonfd(f, x0):
def fdjacobian(x):
J = np.eye(len(x),len(x))
base = f(x)
for col in J.T:
col[:] = (f(x + 1e-8*col) - base) / 1e-8
return J
x = x0.copy()
while True:
res = f(x)
if np.linalg.norm(res) < 1e-6:
return x
x -= np.linalg.solve(fdjacobian(x), res)
def ode_beuler(f, y0, tfinal=1, h=0.1):
y = np.array(y0)
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
h = min(h, tfinal - t)
# Solve x = y + h f(x)
def residual(ytilde):
return ytilde - (y + h * f(t+h, ytilde))
y = solve_newtonfd(residual, y)
t += h
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
y0 = np.array([.5, 0])
thist, yhist = ode_beuler(test.f, y0, h=.1, tfinal=40)
plt.plot(thist, yhist, '.')
plt.plot(thist, test.y(thist, y0))
plt.title('Backward Euler');
y0 = np.array([.2])
for tst in tests:
thist, yhist = ode_beuler(tst.f, y0, h=.3, tfinal=10)
plt.plot(thist, yhist, '.', label=repr(tst)+' Backward Euler')
plt.plot(thist, tst.y(thist, y0), label=repr(tst)+' exact')
plt.plot(thist, np.cos(thist), label='cos')
plt.legend(loc='upper right');
```
#### Observations
* We need to solve a system of equations on each time step.
* The cost to solve the linear system is $O(n^3)$ for a system of $n$ equations.
* The Jacobian matrix is $n\times n$, so requires $n^2$ storage when everything else requires $O(n)$.
* I used finite differencing to compute the Jacobian, but many problems have efficient ways to compute an exact Jacobian (it just takes some programming).
* Backward Euler is stable (as predicted by theory), but not very accurate.
* What happens when you make the time step smaller or larger?
### Midpoint Method
What if instead of evaluating the function at the end of the time step, we evaluated in the middle of the time step using the average of the endpoint values. After all, something similar improved accuracy for our numerical integration...
$$ \tilde y(h) = y(0) + h f\left(\frac h 2, \frac{\tilde y(h) + y(0)}{2} \right) $$
```
def ode_midpoint(f, y0, tfinal=1, h=0.1):
y = y0.copy()
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
h = min(h, tfinal - t)
# Solve x = y + h f(x)
def residual(x):
return x - h * f(t+h/2, (x + y)/2) - y
y = solve_newtonfd(residual, y)
t += h
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
test = linear(np.array([[0, 1],[-1, 0]]))
y0 = np.array([.5, 0])
thist, yhist = ode_midpoint(test.f, y0, h=1, tfinal=50)
plt.plot(thist, yhist, '.')
plt.plot(thist, test.y(thist, y0))
plt.title('Midpoint');
y0 = np.array([.2])
for test in [cosine(k=5), cosine(k=500)]:
thist, yhist = ode_midpoint(test.f, y0, h=.1, tfinal=5)
plt.plot(thist, yhist, '.', label=repr(test)+' Midpoint')
plt.plot(thist, test.y(thist, y0), label=repr(test)+' exact')
plt.legend(loc='upper right')
plt.title('Cosine');
```
#### Observations/Questions
* This appears to be quite accurate.
* We still have to solve a system of equations on each time step.
* What happens when the step size changes?
* What is the stability function $R(z)$ for the midpoint rule?
### Stability function
When applied to the test problem $y' = \lambda y$, the midpoint method is
\begin{align}
\tilde y(h) &= y(0) + h \lambda \frac{\tilde y(h) + y(0)}{2} \\
(1 - h \lambda/2) y(h) &= (1 + h\lambda/2) y(0) \\
y(h) &= \underbrace{\frac{1 + h\lambda/2}{1 - h \lambda/2}}_{R(h \lambda} y(0)
\end{align}
so the stability function is
$$ R(z) = \frac{1 + z/2}{1 - z/2} .$$
```
plot_stability(xx, yy, (1+zz/2)/(1-zz/2), 'Midpoint')
```
### Runge-Kutta Methods
All of the methods we have seen thus far can be represented as Runge-Kutta methods, which can be arranged as a series of $s$ "stage" equations (possibly coupled) and a completion formula.
$$\begin{split}
Y_i = y(0) + h \sum_j a_{ij} f(t+c_j h, Y_j) \\
y(h) = y(0) + h \sum_j b_j f(t+c_j h, Y_j)
\end{split}$$
where $c$ is a vector of *abscissa*, $A$ is a table of coefficients, and $b$ is a vector of completion weights.
These coefficients are typically expressed in a Butcher Table
$$ \left[ \begin{array}{c|cc}
c_0 & a_{00} & a_{01} \\
c_1 & a_{10} & a_{11} \\
\hline
& b_0 & b_1
\end{array} \right] . $$
If the matrix $A$ is strictly lower triangular, then the method is **explicit** (does not require solving equations). We have seen forward Euler
$$ \left[ \begin{array}{c|c}
0 & 0 \\
\hline
& 1
\end{array} \right] ,$$
backward Euler
$$ \left[ \begin{array}{c|c}
1 & 1 \\
\hline
& 1
\end{array} \right] ,$$
and Midpoint
$$ \left[ \begin{array}{c|c}
\frac 1 2 & \frac 1 2 \\
\hline
& 1
\end{array} \right], $$
and that explicit method we called RK4
$$ \left[ \begin{array}{c|cccc}
0 & 0 & 0 & 0 & 0 \\
\frac 1 2 & \frac 1 2 & 0 & 0 & 0 \\
\frac 1 2 & 0 & \frac 1 2 & 0 & 0 \\
1 & 0 & 0 & 1 & 0 \\
\hline
& \frac 1 6 & \frac 1 3 & \frac 1 3 & \frac 1 6
\end{array} \right] . $$
```
def Rstability(A, b, z):
s = len(b)
def R(z):
return 1 + z * (b @ np.linalg.solve(np.eye(s) - z*A, np.ones(s)))
f = np.vectorize(R)
return f(z)
def plot_rkstability(A, b, name):
x = np.linspace(-4,4)
xx, yy = np.meshgrid(x, x)
zz = xx + 1j*yy
R = Rstability(A, b, zz)
plot_stability(xx, yy, R, name)
A_rk4 = np.array([[0,0,0,0],
[.5,0,0,0],
[0,.5,0,0],
[0,0,1,0]])
b_rk4 = np.array([1/6, 1/3, 1/3, 1/6])
plot_rkstability(A_rk4, b_rk4, 'RK4')
plot_rkstability(np.eye(1)/2, [1], 'RK Midpoint')
```
## Ballistics
The velocity of a particle in the $x$-$z$ plane is the time derivative of its position,
$$ \begin{bmatrix} v_x \\ v_z \end{bmatrix} = \begin{bmatrix} p_x \\ p_z \end{bmatrix}' . $$
The acceleration (derivative of velocity) depends on the force applied,
$$ \begin{bmatrix} v_x \\ v_z \end{bmatrix}' = \frac 1 m \begin{bmatrix} F_x \\ F_z \end{bmatrix} $$
where $m$ is the mass of the particle.
The gravitational force is
$$ \mathbf F^{\text{grav}} = m \begin{bmatrix} 0 \\ -g \end{bmatrix} $$
where $g = 9.8\ \text{meter}/\text{second}^2 $.
The drag force is often approximated as
$$ F^{\text{drag}} = \frac 1 2 \rho v^2 c_d A $$
where $\rho$ is the density of the fluid (assume $1\ \text{kilogram}/\text{meter}^3$),
$v =\sqrt{v_x^2 + v_z^2}$ is the velocity, $c_d$ is the drag coefficient (assume 0.2 for this projectile), and $A = \pi r^2$ is the cross-sectional area of the projectile (assume the radius is $r = .05\ \text{meter}$).
The direction of the drag force is opposite the velocity, thus the vector drag is
$$ \mathbf F^{\text{drag}}(v_x, v_z) = - \frac{\pi}{2} v c_d r^2 \begin{bmatrix} v_x \\ v_z \end{bmatrix} . $$
This produces the system of equations
$$ \begin{bmatrix} p_x \\ p_z \\ v_x \\ v_z \end{bmatrix}' =
\begin{bmatrix} v_x \\ v_z \\ \frac 1 m F_x^{\text{drag}} \\ \frac 1 m F_z^{\text{drag}} - 9.8 \end{bmatrix} . $$
For the ballistics computation, you should solve this differential equation with an $m=8$ kilogram projectile and initial condition of a velocity $v_0 = 300\ \text{meter}/\text{second}$ at angle $\theta$ above the horizontal, i.e.,
$$ \begin{bmatrix} 0 \\ 0 \\ v_0 \cos \theta \\ v_0 \sin \theta \end{bmatrix} . $$
The homework question is to compute the angle needed to hit a target on a distant slope of angle $\phi = 40^\circ = 40 \pi / 180$. For that, you should adapt one of the ODE solvers to determine where the projectile crosses the plane of the target slope and adjust the angle (either by hand or using a rootfinder) to hit the target. Don't aim too high or your explosive will detonate on the other side of the mountain! Make sure you use a time step that you are confident is accurate enough.

Your code should print the angle $\theta$ and the time at which your projectile hits the slope.
### Chemical reactions
The [Oregonator](http://www.scholarpedia.org/article/Oregonator) mechanism in chemical kinetics describes an oscillatory chemical system. It consists of three species with concentrations $\mathbf x = [x_0,x_1,x_2]^T$ (scaled units) and the evolution equations
$$ \mathbf {x'} = \begin{bmatrix} 77.27 \big(x_1 + x_0 (1 - 8.375\cdot 10^{-6} x_0 - x_1) \big) \\
\frac{1}{77.27} \big(x_2 - (1 + x_0) x_1 \big) \\
0.161 (x_0 - x_2)
\end{bmatrix} . $$
Starting with the initial conditions $\mathbf x_0 = [1, 2, 3]^T$, this produces the time evolution below.

This calculation was performed using 287 steps of an adaptive implicit time integrator, where the time step size was adapted to preserve accuracy. The time step size in shown in the first panel and the other three are the evolution of concentrations over time.
#### 1. How to measure accuracy?
A differential equation solver produces a time history of chemical concentrations
$$ x(t), \quad 0 \le t \le T_{\text{final}} .$$
We would like a measure of accuracy that does not explicitly depend on the solver or time steps that are used. If a different method produces the evolution $\tilde x(t)$, then we would like to be able to measure the difference as
$$ \lVert \phi(\tilde x) - \phi(x) \rVert . $$
The function $\phi(\cdot)$ could return the solution at a particular time, it could measure the time between key events, or could be something else.
Choose a metric and explain why you chose it.
#### 2. Implicit versus Explicit
Is an implicit method better than an explicit method for solving the Oregonator?
Solve the Oregonator problem using an explicit method and using an implicit method (you can adapt code from the notebook or implement a method that is not in the notebook).
The Oregonator system has no known analytic solution, so you'll have to numerically compute a reference solution.
Do this using a method you believe to be highly accurate and explain why you believe it is very accurate.
(A common justification would be to refine the time step and compare using the metric $\phi$ above.)
Different methods (implicit versus explicit, Euler versus RK4, etc.) have different costs per time step. Choose a way to measure cost and explain your choice and any shortcomings that it may have.
Compare the cost (using the metric you chose above) and accuracy (using $\phi$) of the implicit method to the cost and accuracy of the explicit method. (This is often plotted as *accuracy versus cost* with a log-log scale.) Explain what factors might go into your decision.
#### 3. Eigenvalues
Calculate the eigenvalues of the Jacobian matrix at different times. (You can compute it analytically or using the `fdjacobian` code in this notebook.) Do you notice any qualitative differences? For example, are there any times when there are eigenvalues with positive real part?
#### 4. Generalization
Realistic chemical mechanisms can have hundreds or thousands of species (the Oregonator has only three). Explain how the choice of methods might change when solving a large reaction mechanism.
|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def ode_euler(f, y0, tfinal=1, h=0.1):
y = np.array(y0)
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
tnext = min(t+h, tfinal)
h = tnext - t
y += h * f(t, y)
t = tnext
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
tests = []
class cosine:
def __init__(self, k=5):
self.k = k
def __repr__(self):
return 'cosine(k={:d})'.format(self.k)
def f(self, t, y):
return -self.k * (y - np.cos(t))
def y(self, t, y0):
k2p1 = self.k**2+1
return (y0 - self.k**2/k2p1) * np.exp(-self.k*t) + self.k*(np.sin(t) + self.k*np.cos(t))/k2p1
tests.append(cosine(k=2))
tests.append(cosine(k=1000))
y0 = np.array([.2])
for test in tests:
thist, yhist = ode_euler(test.f, y0, h=.2, tfinal=20)
plt.plot(thist, yhist, '.', label=repr(test)+' Forward Euler')
plt.plot(thist, test.y(thist, y0), label=repr(test)+' exact')
plt.plot(thist, np.cos(thist), label='cos')
plt.legend(loc='upper right');
def expm(A):
"""Compute the matrix exponential"""
L, X = np.linalg.eig(A)
return X @ np.diag(np.exp(L)) @ np.linalg.inv(X)
class linear:
def __init__(self, A):
self.A = A.copy()
def f(self, t, y):
return self.A @ y
def y(self, t, y0):
return [np.real_if_close(expm(self.A*s) @ y0) for s in t]
test = linear(np.array([[0, 1],
[-1, 0]]))
y0 = np.array([.5, 0])
thist, yhist = ode_euler(test.f, y0, h=.1, tfinal=10)
plt.figure()
plt.plot(thist, yhist, '.', label='Euler')
plt.plot(thist, test.y(thist, y0), label='exact')
plt.legend(loc='upper right');
def ode_rk4(f, y0, tfinal=1, h=0.1):
y = np.array(y0)
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
h = min(h, tfinal - t)
k1 = f(t, y)
k2 = f(t+h/2, y + k1*h/2)
k3 = f(t+h/2, y + k2*h/2)
k4 = f(t+h, y + k3*h)
y += h/6 * (k1 + 2*k2 + 2*k3 + k4)
t += h
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
thist, yhist = ode_rk4(test.f, y0, h=.5, tfinal=50)
plt.figure()
plt.plot(thist, yhist, '.', label=repr(test)+' RK4')
plt.plot(thist, test.y(thist, y0), label=repr(test)+' exact');
def plot_stability(x, y, Rz, label):
plt.figure()
levels = np.linspace(0, 2, 21)
C = plt.contourf(xx, yy, np.abs(Rz), levels, cmap=plt.cm.coolwarm)
cbar = plt.colorbar(C, ticks=np.linspace(0, 2, 5))
plt.axvline(x=0, linewidth=1, color='grey')
plt.axhline(y=0, linewidth=1, color='grey')
plt.contour(xx, yy, np.abs(Rz), [.5, 1, 1.5], colors='k')
plt.title(label)
x = np.linspace(-2,2)
xx, yy = np.meshgrid(x, x)
zz = xx + 1j*yy
R = 1 + zz
plot_stability(xx, yy, R, 'Forward Euler')
plot_stability(xx, yy, 1/(1-zz), 'Backward Euler')
def solve_newtonfd(f, x0):
def fdjacobian(x):
J = np.eye(len(x),len(x))
base = f(x)
for col in J.T:
col[:] = (f(x + 1e-8*col) - base) / 1e-8
return J
x = x0.copy()
while True:
res = f(x)
if np.linalg.norm(res) < 1e-6:
return x
x -= np.linalg.solve(fdjacobian(x), res)
def ode_beuler(f, y0, tfinal=1, h=0.1):
y = np.array(y0)
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
h = min(h, tfinal - t)
# Solve x = y + h f(x)
def residual(ytilde):
return ytilde - (y + h * f(t+h, ytilde))
y = solve_newtonfd(residual, y)
t += h
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
y0 = np.array([.5, 0])
thist, yhist = ode_beuler(test.f, y0, h=.1, tfinal=40)
plt.plot(thist, yhist, '.')
plt.plot(thist, test.y(thist, y0))
plt.title('Backward Euler');
y0 = np.array([.2])
for tst in tests:
thist, yhist = ode_beuler(tst.f, y0, h=.3, tfinal=10)
plt.plot(thist, yhist, '.', label=repr(tst)+' Backward Euler')
plt.plot(thist, tst.y(thist, y0), label=repr(tst)+' exact')
plt.plot(thist, np.cos(thist), label='cos')
plt.legend(loc='upper right');
def ode_midpoint(f, y0, tfinal=1, h=0.1):
y = y0.copy()
t = 0
thist = [t]
yhist = [y0]
while t < tfinal:
h = min(h, tfinal - t)
# Solve x = y + h f(x)
def residual(x):
return x - h * f(t+h/2, (x + y)/2) - y
y = solve_newtonfd(residual, y)
t += h
thist.append(t)
yhist.append(y.copy())
return np.array(thist), np.array(yhist)
test = linear(np.array([[0, 1],[-1, 0]]))
y0 = np.array([.5, 0])
thist, yhist = ode_midpoint(test.f, y0, h=1, tfinal=50)
plt.plot(thist, yhist, '.')
plt.plot(thist, test.y(thist, y0))
plt.title('Midpoint');
y0 = np.array([.2])
for test in [cosine(k=5), cosine(k=500)]:
thist, yhist = ode_midpoint(test.f, y0, h=.1, tfinal=5)
plt.plot(thist, yhist, '.', label=repr(test)+' Midpoint')
plt.plot(thist, test.y(thist, y0), label=repr(test)+' exact')
plt.legend(loc='upper right')
plt.title('Cosine');
plot_stability(xx, yy, (1+zz/2)/(1-zz/2), 'Midpoint')
def Rstability(A, b, z):
s = len(b)
def R(z):
return 1 + z * (b @ np.linalg.solve(np.eye(s) - z*A, np.ones(s)))
f = np.vectorize(R)
return f(z)
def plot_rkstability(A, b, name):
x = np.linspace(-4,4)
xx, yy = np.meshgrid(x, x)
zz = xx + 1j*yy
R = Rstability(A, b, zz)
plot_stability(xx, yy, R, name)
A_rk4 = np.array([[0,0,0,0],
[.5,0,0,0],
[0,.5,0,0],
[0,0,1,0]])
b_rk4 = np.array([1/6, 1/3, 1/3, 1/6])
plot_rkstability(A_rk4, b_rk4, 'RK4')
plot_rkstability(np.eye(1)/2, [1], 'RK Midpoint')
| 0.609524 | 0.976333 |
```
import numpy as np
import matplotlib.pyplot as plt
settings = {}
settings['N'] = 2000 #Population size
settings['Nday'] = 240 # N Days of simulation
settings['N_meet_day'] = 2.3 #Average N of people met every day (poisson distribution)
settings['p_contagion'] = 0.3 #probability of contagion meeting a single sick person
settings['duration_days'] = 13 #Illness duration in days
settings['immunity_loss_days'] = 15 #Immunity duration after illnes
def simulate(Nday = settings['Nday'], N = settings['N'], p_contagion = settings['p_contagion'],
N_meet_day=settings['N_meet_day'], duration_days = settings['duration_days'],
immunity_loss_days = settings['immunity_loss_days'], immunity_loss = False, intrusion = -1):
humans = np.zeros(N)
contagion_era = np.zeros(N)
immunity_era = np.zeros(N)
humans[0] = 1
sick = []
immunized = []
total_sick = 1
for epoca in range(0,Nday):
meeting_number = np.random.poisson(N_meet_day, N)
contagion_era = contagion_era + np.where(humans==1,1,0)
immunity_era = immunity_era + np.where(humans==-1,1,0)
new_humans = np.copy(humans)
# random meeting between people and contagion
for i, p, nm in np.nditer([np.arange(N), humans, meeting_number]):
met_people = np.random.choice(humans,nm,replace = False)
x = p_contagion*np.sum(np.where(met_people>0,met_people,0))
if (p == 0) and (np.random.random() < x):
new_humans[i] = 1
total_sick = total_sick + 1
else:
new_humans[i] = p
#illnes duration for every sick person
if immunity_loss and immunity_loss_days > 1:
for i,e in np.nditer([np.arange(N),contagion_era]):
if e > duration_days:
new_humans[i] = -1
# immunity loss
if immunity_loss:
for i,e in np.nditer([np.arange(N),immunity_era]):
if e > immunity_loss_days:
new_humans[i] = 0
#illness return...
if (intrusion > 0) and (epoca == intrusion):
new_humans[0] = 1
humans = np.copy(new_humans)
sick.append(np.sum(np.where(humans==1,1,0)))
immunized.append(np.sum(np.where(humans==-1,1,0)))
return sick, immunized, total_sick
```
## Two cases...
```
fig, axs = plt.subplots(2, 1, figsize=(12, 9), sharey=True)
plt.subplots_adjust(hspace = 0.8)
nmd = 20.0
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= nmd, N=500, immunity_loss=True,
immunity_loss_days=10, intrusion = 100)
axs[0].plot(range(0,settings['Nday']), sick, label = 'Contagiati')
axs[0].plot(range(0,settings['Nday']), immunized, label = 'Immunizzati')
axs[0].set(xlabel="Giorni da inizio epidemia", ylabel='Numero', title='Se incontro in media: \n'+str(int(nmd))+ ' persone al giorno')
axs[0].annotate('Totale ammalati: '+str(total_sick),xy=(110, 250), fontsize=20)
axs[0].grid()
axs[0].legend()
nmd = 2.0
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= nmd, N=500, immunity_loss=True,
immunity_loss_days=10, intrusion = 100)
axs[1].plot(range(0,settings['Nday']), sick, label = 'Contagiati')
axs[1].plot(range(0,settings['Nday']), immunized, label = 'Immunizzati')
axs[1].set(xlabel="Giorni da inizio epidemia", ylabel='Numero', title='Se incontro in media: \n'+str(int(nmd))+ ' persone al giorno')
axs[1].annotate('Totale ammalati: '+str(total_sick),xy=(110, 300), fontsize=20)
axs[1].grid()
axs[1].legend()
plt.show()
```
## N-scaling
```
pop_sizes = [100,200,500,1000,2000,5000,10000]
peak_day=[]
for pop_size in pop_sizes:
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= 2, N = pop_size)
peak_day.append(np.argmax(sick))
peak_day
pop_sizes = [100,200,500,1000,2000,5000,10000]
peak_day_2=[]
for pop_size in pop_sizes:
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= 4, N = pop_size, Nday=60)
peak_day_2.append(np.argmax(sick))
peak_day_2
fig, ax = plt.subplots(figsize=(12, 9))
ax.scatter(pop_sizes, peak_day)
plt.plot()
fig, ax = plt.subplots(figsize=(12, 9))
ax.scatter(pop_sizes, peak_day_2)
plt.plot()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
settings = {}
settings['N'] = 2000 #Population size
settings['Nday'] = 240 # N Days of simulation
settings['N_meet_day'] = 2.3 #Average N of people met every day (poisson distribution)
settings['p_contagion'] = 0.3 #probability of contagion meeting a single sick person
settings['duration_days'] = 13 #Illness duration in days
settings['immunity_loss_days'] = 15 #Immunity duration after illnes
def simulate(Nday = settings['Nday'], N = settings['N'], p_contagion = settings['p_contagion'],
N_meet_day=settings['N_meet_day'], duration_days = settings['duration_days'],
immunity_loss_days = settings['immunity_loss_days'], immunity_loss = False, intrusion = -1):
humans = np.zeros(N)
contagion_era = np.zeros(N)
immunity_era = np.zeros(N)
humans[0] = 1
sick = []
immunized = []
total_sick = 1
for epoca in range(0,Nday):
meeting_number = np.random.poisson(N_meet_day, N)
contagion_era = contagion_era + np.where(humans==1,1,0)
immunity_era = immunity_era + np.where(humans==-1,1,0)
new_humans = np.copy(humans)
# random meeting between people and contagion
for i, p, nm in np.nditer([np.arange(N), humans, meeting_number]):
met_people = np.random.choice(humans,nm,replace = False)
x = p_contagion*np.sum(np.where(met_people>0,met_people,0))
if (p == 0) and (np.random.random() < x):
new_humans[i] = 1
total_sick = total_sick + 1
else:
new_humans[i] = p
#illnes duration for every sick person
if immunity_loss and immunity_loss_days > 1:
for i,e in np.nditer([np.arange(N),contagion_era]):
if e > duration_days:
new_humans[i] = -1
# immunity loss
if immunity_loss:
for i,e in np.nditer([np.arange(N),immunity_era]):
if e > immunity_loss_days:
new_humans[i] = 0
#illness return...
if (intrusion > 0) and (epoca == intrusion):
new_humans[0] = 1
humans = np.copy(new_humans)
sick.append(np.sum(np.where(humans==1,1,0)))
immunized.append(np.sum(np.where(humans==-1,1,0)))
return sick, immunized, total_sick
fig, axs = plt.subplots(2, 1, figsize=(12, 9), sharey=True)
plt.subplots_adjust(hspace = 0.8)
nmd = 20.0
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= nmd, N=500, immunity_loss=True,
immunity_loss_days=10, intrusion = 100)
axs[0].plot(range(0,settings['Nday']), sick, label = 'Contagiati')
axs[0].plot(range(0,settings['Nday']), immunized, label = 'Immunizzati')
axs[0].set(xlabel="Giorni da inizio epidemia", ylabel='Numero', title='Se incontro in media: \n'+str(int(nmd))+ ' persone al giorno')
axs[0].annotate('Totale ammalati: '+str(total_sick),xy=(110, 250), fontsize=20)
axs[0].grid()
axs[0].legend()
nmd = 2.0
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= nmd, N=500, immunity_loss=True,
immunity_loss_days=10, intrusion = 100)
axs[1].plot(range(0,settings['Nday']), sick, label = 'Contagiati')
axs[1].plot(range(0,settings['Nday']), immunized, label = 'Immunizzati')
axs[1].set(xlabel="Giorni da inizio epidemia", ylabel='Numero', title='Se incontro in media: \n'+str(int(nmd))+ ' persone al giorno')
axs[1].annotate('Totale ammalati: '+str(total_sick),xy=(110, 300), fontsize=20)
axs[1].grid()
axs[1].legend()
plt.show()
pop_sizes = [100,200,500,1000,2000,5000,10000]
peak_day=[]
for pop_size in pop_sizes:
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= 2, N = pop_size)
peak_day.append(np.argmax(sick))
peak_day
pop_sizes = [100,200,500,1000,2000,5000,10000]
peak_day_2=[]
for pop_size in pop_sizes:
sick, immunized, total_sick = simulate(p_contagion=0.1, N_meet_day= 4, N = pop_size, Nday=60)
peak_day_2.append(np.argmax(sick))
peak_day_2
fig, ax = plt.subplots(figsize=(12, 9))
ax.scatter(pop_sizes, peak_day)
plt.plot()
fig, ax = plt.subplots(figsize=(12, 9))
ax.scatter(pop_sizes, peak_day_2)
plt.plot()
| 0.352759 | 0.729062 |
## Setup
```
%pip install -r requirements.txt > /dev/null
!python -m pip install amazon-textract-response-parser
%matplotlib inline
import time
import json
import requests
import uuid
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import boto3
import smart_open
from time import sleep
from matplotlib import cm, colors
from spacy import displacy
from collections import Counter
from pyvis.network import Network
from trp import Document
# Amazon Textract client
textract = boto3.client('textract')
#amazon comprehend
comprehend_client = boto3.client('comprehend')
# Client and session information
session = boto3.Session()
s3_client = session.client(service_name="s3")
# Amazon S3 client
s3 = boto3.client('s3')
```
# Enter your Amazon S3 bucket name
```
# Constants for S3 bucket and input data file
bucket = "<enter your s3 bucket name>"
```
# Download sample financial documents to S3
We've included a set of Amazon press releases as example documents. Here we upload them as a single file `sample_financial_news_doc.pdf` to an S3 bucket for processing. The same bucket will be used to return service output.
```
filename = "sample_financial_news_doc.pdf"
# Upload the local file to S3
s3_client.upload_file(filename, bucket, filename)
# Document name in Amazon S3 Bucket
documentName = bucket + '/'+filename
```
# Convert pdf documents to text using Amazon Textract
```
def startJob(s3BucketName, objectName):
response = None
response = textract.start_document_text_detection(
DocumentLocation={
'S3Object': {
'Bucket': s3BucketName,
'Name': objectName
}
})
return response["JobId"]
def isJobComplete(jobId):
response = textract.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
while(status == "IN_PROGRESS"):
time.sleep(5)
response = textract.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
return status
def getJobResults(jobId):
pages = []
response = textract.get_document_text_detection(JobId=jobId)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
while(nextToken):
response = textract.get_document_text_detection(JobId=jobId, NextToken=nextToken)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
return pages
jobId = startJob(bucket, filename)
print("Started job with id: {}".format(jobId))
if(isJobComplete(jobId)):
response = getJobResults(jobId)
```
# Convert the extracted data from Amazon Textract into UTF 8 Text file
```
# Lets get the data into a text file
text_filename = 'sample_finance_data.txt'
doc = Document(response)
with open(text_filename, 'w', encoding='utf-8') as f:
for page in doc.pages:
# Print lines and words
page_string = ''
for line in page.lines:
#print((line.text))
page_string += str(line.text)
#print(page_string)
f.writelines(page_string + "\n")
# Load the documents locally for later analysis
with open(text_filename, "r") as fi:
raw_texts = [line.strip() for line in fi.readlines()]
```
# Upload this text file to Amazon S3 for Comprehend events analysis jobs
```
# Upload the local file to S3
s3_client.upload_file(text_filename, bucket, text_filename)
```
# Metadata Extraction
With Comprehend entity detection
and with Comprehend Events
# Lets extract some metadata using Amazon Comprehend Events
# Two choices here
1. Create Comprehend events analysis job through console OR
2. Start an asnynchronous job with python SDK by running below notebook cell
If you want to follow the steps using AWS Console click here https://console.aws.amazon.com/comprehend/v2/home?region=us-east-1#home
and follow instrcutions in the chapter 9
Note: If you are craeting events using Amazon Comprehend console, skip the "Start an asynchronous job with the SDK" section and move to "Collect the results from S3" Section
### Start an asynchronous job with the SDK
The first task is to kick off the inference job. We'll do this with the `start_events_detection_job` endpoint. Note that the API requires an IAM role with List, Read, and Write access to the bucket specified above.
```
input_data_s3_path = f's3://{bucket}/' + text_filename
output_data_s3_path = f's3://{bucket}/'
```
## Create a IAM role with Access to Comprehend and specified s3 bucket
```
# IAM role with access to Comprehend and specified S3 buckets
job_data_access_role = '<enter iam role or refer to code in action video>'
# Other job parameters
input_data_format = 'ONE_DOC_PER_LINE'
job_uuid = uuid.uuid1()
job_name = f"events-job-{job_uuid}"
event_types = ["BANKRUPTCY", "EMPLOYMENT", "CORPORATE_ACQUISITION",
"INVESTMENT_GENERAL", "CORPORATE_MERGER", "IPO",
"RIGHTS_ISSUE", "SECONDARY_OFFERING", "SHELF_OFFERING",
"TENDER_OFFERING", "STOCK_SPLIT"]
# Begin the inference job
response = comprehend_client.start_events_detection_job(
InputDataConfig={'S3Uri': input_data_s3_path,
'InputFormat': input_data_format},
OutputDataConfig={'S3Uri': output_data_s3_path},
DataAccessRoleArn=job_data_access_role,
JobName=job_name,
LanguageCode='en',
TargetEventTypes=event_types
)
# Get the job ID
events_job_id = response['JobId']
```
# The above code will submit a job in Comprehend Analysis job.
Go to Amazon Console to get the Job Id once the job is completed.
https://console.aws.amazon.com/comprehend/v2/home?region=us-east-1#analysis
Note that, as an asynchronous inference job, the task will take several minutes to complete.
# If you have created events job using Comprehend console, go to the analysis job and copy the job id and paste it below else continue.
```
#Uncomment and enter job id to run this after job is completed
events_job_id ="<enter completed analysis job id>"
# Get current job status
job = comprehend_client.describe_events_detection_job(JobId=events_job_id)
# Loop until job is completed
waited = 0
timeout_minutes = 30
while job['EventsDetectionJobProperties']['JobStatus'] != 'COMPLETED':
sleep(60)
waited += 60
assert waited//60 < timeout_minutes, "Job timed out after %d seconds." % waited
job = comprehend_client.describe_events_detection_job(JobId=events_job_id)
print("Job Status {}".format(job['EventsDetectionJobProperties']['JobStatus']))
# The output filename is the input filename + ".out"
output_data_s3_file = job['EventsDetectionJobProperties']['OutputDataConfig']['S3Uri'] + text_filename + '.out'
print(output_data_s3_file)
# Load the output into a result dictionary # Get the files.
results = []
with smart_open.open(output_data_s3_file) as fi:
results.extend([json.loads(line) for line in fi.readlines() if line])
```
## Analyzing Comprehend Events output
The remainder of this notebook provides examples of different ways to analyze a given document. For our example document, we'll use the kind of online posting that a Financial analyst might consume when projecting market trends, a [2017 press release about Amazon's acquisition of Whole Foods Market, Inc.](https://press.aboutamazon.com/news-releases/news-release-details/amazoncom-announces-third-quarter-sales-34-437-billion). It's the first document in the data set we submitted to the Comprehend Events API.
### Understanding Comprehend Events system output
The system returns JSON output for each submitted document. The structure of a response is shown below. Note:
* Events system output contains separate objects for `Entities` and `Events`, each organized into groups of coreferential object.
* Two additional fields, `File` and `Line` will be present as well to track document provenance.
```
# Use the first result document for analysis
result = results[0]
result
```
#### Events are groups of Triggers
* The API output includes the text, character offset, and type of each trigger.
* Confidence scores for classification tasks are given as `Score`. Confidence of event group membership is given with `GroupScore`.
```
result['Events'][1]['Triggers']
```
#### Arguments are linked to Entities by EntityIndex
* The API also return the classification confidence of the role assignment.
It talks about how the entity is related to the event
```
result['Events'][1]['Arguments']
```
#### Entities are groups of Mentions
* The API output includes the text, character offset, and type of each mention.
* Confidence scores for classification tasks are given as `Score`. Confidence of entity group membership is given with `GroupScore`.
```
result['Entities'][5]['Mentions']
```
### Visualizing the Events and Entities
In the remainder of the notebook, we'll give a number of tabulations and visualizations to help understand what the API is returning.
First we'll consider visualization of spans, both triggers and entity mentions. One of the most essential visualization tasks for sequence labeling tasks is highlighting of tagged text in documents. For demo purposes, we'll do this with [displaCy](https://spacy.io/usage/visualizers).
```
# Convert Events output to displaCy format.
entities = [
{'start': m['BeginOffset'], 'end': m['EndOffset'], 'label': m['Type']}
for e in result['Entities']
for m in e['Mentions']
]
triggers = [
{'start': t['BeginOffset'], 'end': t['EndOffset'], 'label': t['Type']}
for e in result['Events']
for t in e['Triggers']
]
# Spans need to be sorted for displaCy to process them correctly
spans = sorted(entities + triggers, key=lambda x: x['start'])
tags = [s['label'] for s in spans]
output = [{"text": raw_texts[0], "ents": spans, "title": None, "settings": {}}]
# Misc. objects for presentation purposes
spectral = cm.get_cmap("Spectral", len(tags))
tag_colors = [colors.rgb2hex(spectral(i)) for i in range(len(tags))]
color_map = dict(zip(*(tags, tag_colors)))
# Note that only Entities participating in Events are shown.
displacy.render(output, style="ent", options={"colors": color_map}, manual=True)
```
### Rendering as tabular data
Many users will use Events to create structured data from unstructured text. Here we'll demonstrate how to do this with `pandas`. First, we flatten hierarchical JSON to pandas dataframe.
```
# Creation of the entity dataframe. Entity indices must be explicitly created.
entities_df = pd.DataFrame([
{"EntityIndex": i, **m}
for i, e in enumerate(result['Entities'])
for m in e['Mentions']
])
# Creation of the events dataframe. Event indices must be explicitly created.
events_df = pd.DataFrame([
{"EventIndex": i, **a, **t}
for i, e in enumerate(result['Events'])
for a in e['Arguments']
for t in e['Triggers']
])
# Join the two tables into one flat data structure.
events_df = events_df.merge(entities_df, on="EntityIndex", suffixes=('Event', 'Entity'))
events_df
```
### A more succinct representation
We're primarity interested in the *event structure*, so let's make that more transparent by creating a new table with Roles as column headers, grouped by Event.
```
def format_compact_events(x):
"""Collapse groups of mentions and triggers into a single set."""
# Take the most commonly occurring EventType and the set of triggers.
d = {"EventType": Counter(x['TypeEvent']).most_common()[0][0],
"Triggers": set(x['TextEvent'])}
# For each argument Role, collect the set of mentions in the group.
for role in x['Role']:
d.update({role: set((x[x['Role']==role]['TextEntity']))})
return d
# Group data by EventIndex and format.
event_analysis_df = pd.DataFrame(
events_df.groupby("EventIndex").apply(format_compact_events).tolist()
).fillna('')
event_analysis_df
```
### Graphing event semantics
The most striking representation of Comprehend Events output is found in a semantic graph, a network of the entities and events referenced in a document or documents. The code below uses two open source libraries, `networkx` and `pyvis`, to render events system output. In the resulting graph, nodes are entity mentions and triggers, while edges are the argument roles held by the entities in relation to the triggers.
In the graph, vertices are entity mentions and triggers; edges are the argument roles held by the entities in relation to the triggers
#### Formatting the data
System output must first be conformed to the node (i.e., vertex) and edge list format required by `networkx`. This requires iterating over triggers, entities, and argument structural relations. Note that we can use the `GroupScore` and `Score` keys on various objects to prune nodes and edges in which the model has less confidence. We can also use various strategies to pick a 'canonical' mention from each mention group to appear in the graph; here we chose the mention with the string-wise longest extent.
```
# Entities are associated with events by group, not individual mention; for simplicity,
# assume the canonical mention is the longest one.
def get_canonical_mention(mentions):
extents = enumerate([m['Text'] for m in mentions])
longest_name = sorted(extents, key=lambda x: len(x[1]))
return [mentions[longest_name[-1][0]]]
# Set a global confidence threshold
thr = 0.5
# Nodes are (id, type, tag, score, mention_type) tuples.
trigger_nodes = [
("tr%d" % i, t['Type'], t['Text'], t['Score'], "trigger")
for i, e in enumerate(result['Events'])
for t in e['Triggers'][:1]
if t['GroupScore'] > thr
]
entity_nodes = [
("en%d" % i, m['Type'], m['Text'], m['Score'], "entity")
for i, e in enumerate(result['Entities'])
for m in get_canonical_mention(e['Mentions'])
if m['GroupScore'] > thr
]
# Edges are (trigger_id, node_id, role, score) tuples.
argument_edges = [
("tr%d" % i, "en%d" % a['EntityIndex'], a['Role'], a['Score'])
for i, e in enumerate(result['Events'])
for a in e['Arguments']
if a['Score'] > thr
]
```
#### Create a compact graph
Once the nodes and edges are defines, we can create and visualize the graph.
```
G = nx.Graph()
# Iterate over triggers and entity mentions.
for mention_id, tag, extent, score, mtype in trigger_nodes + entity_nodes:
label = extent if mtype.startswith("entity") else tag
G.add_node(mention_id, label=label, size=score*10, color=color_map[tag], tag=tag, group=mtype)
# Iterate over argument role assignments
for event_id, entity_id, role, score in argument_edges:
G.add_edges_from(
[(event_id, entity_id)],
label=role,
weight=score*100,
color="grey"
)
# Drop mentions that don't participate in events
G.remove_nodes_from(list(nx.isolates(G)))
nt = Network("600px", "800px", notebook=True, heading="")
nt.from_nx(G)
nt.show("compact_nx.html")
```
#### A more complete graph
The graph above is compact, only relaying essential event type and argument role information. We can use a slightly more complicated set of functions to graph all of the information returned by the API.
```
# This convenience function in `events_graph.py` plots a complete graph of the document,
# showing all events, triggers, entities, and their groups.
import events_graph as evg
evg.plot(result, node_types=['event', 'trigger', 'entity_group', 'entity'], thr=0.5)
```
# Clean up
Delete Amazon S3 Bucket and objects in the bucket
|
github_jupyter
|
%pip install -r requirements.txt > /dev/null
!python -m pip install amazon-textract-response-parser
%matplotlib inline
import time
import json
import requests
import uuid
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import boto3
import smart_open
from time import sleep
from matplotlib import cm, colors
from spacy import displacy
from collections import Counter
from pyvis.network import Network
from trp import Document
# Amazon Textract client
textract = boto3.client('textract')
#amazon comprehend
comprehend_client = boto3.client('comprehend')
# Client and session information
session = boto3.Session()
s3_client = session.client(service_name="s3")
# Amazon S3 client
s3 = boto3.client('s3')
# Constants for S3 bucket and input data file
bucket = "<enter your s3 bucket name>"
filename = "sample_financial_news_doc.pdf"
# Upload the local file to S3
s3_client.upload_file(filename, bucket, filename)
# Document name in Amazon S3 Bucket
documentName = bucket + '/'+filename
def startJob(s3BucketName, objectName):
response = None
response = textract.start_document_text_detection(
DocumentLocation={
'S3Object': {
'Bucket': s3BucketName,
'Name': objectName
}
})
return response["JobId"]
def isJobComplete(jobId):
response = textract.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
while(status == "IN_PROGRESS"):
time.sleep(5)
response = textract.get_document_text_detection(JobId=jobId)
status = response["JobStatus"]
print("Job status: {}".format(status))
return status
def getJobResults(jobId):
pages = []
response = textract.get_document_text_detection(JobId=jobId)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
while(nextToken):
response = textract.get_document_text_detection(JobId=jobId, NextToken=nextToken)
pages.append(response)
print("Resultset page recieved: {}".format(len(pages)))
nextToken = None
if('NextToken' in response):
nextToken = response['NextToken']
return pages
jobId = startJob(bucket, filename)
print("Started job with id: {}".format(jobId))
if(isJobComplete(jobId)):
response = getJobResults(jobId)
# Lets get the data into a text file
text_filename = 'sample_finance_data.txt'
doc = Document(response)
with open(text_filename, 'w', encoding='utf-8') as f:
for page in doc.pages:
# Print lines and words
page_string = ''
for line in page.lines:
#print((line.text))
page_string += str(line.text)
#print(page_string)
f.writelines(page_string + "\n")
# Load the documents locally for later analysis
with open(text_filename, "r") as fi:
raw_texts = [line.strip() for line in fi.readlines()]
# Upload the local file to S3
s3_client.upload_file(text_filename, bucket, text_filename)
input_data_s3_path = f's3://{bucket}/' + text_filename
output_data_s3_path = f's3://{bucket}/'
# IAM role with access to Comprehend and specified S3 buckets
job_data_access_role = '<enter iam role or refer to code in action video>'
# Other job parameters
input_data_format = 'ONE_DOC_PER_LINE'
job_uuid = uuid.uuid1()
job_name = f"events-job-{job_uuid}"
event_types = ["BANKRUPTCY", "EMPLOYMENT", "CORPORATE_ACQUISITION",
"INVESTMENT_GENERAL", "CORPORATE_MERGER", "IPO",
"RIGHTS_ISSUE", "SECONDARY_OFFERING", "SHELF_OFFERING",
"TENDER_OFFERING", "STOCK_SPLIT"]
# Begin the inference job
response = comprehend_client.start_events_detection_job(
InputDataConfig={'S3Uri': input_data_s3_path,
'InputFormat': input_data_format},
OutputDataConfig={'S3Uri': output_data_s3_path},
DataAccessRoleArn=job_data_access_role,
JobName=job_name,
LanguageCode='en',
TargetEventTypes=event_types
)
# Get the job ID
events_job_id = response['JobId']
#Uncomment and enter job id to run this after job is completed
events_job_id ="<enter completed analysis job id>"
# Get current job status
job = comprehend_client.describe_events_detection_job(JobId=events_job_id)
# Loop until job is completed
waited = 0
timeout_minutes = 30
while job['EventsDetectionJobProperties']['JobStatus'] != 'COMPLETED':
sleep(60)
waited += 60
assert waited//60 < timeout_minutes, "Job timed out after %d seconds." % waited
job = comprehend_client.describe_events_detection_job(JobId=events_job_id)
print("Job Status {}".format(job['EventsDetectionJobProperties']['JobStatus']))
# The output filename is the input filename + ".out"
output_data_s3_file = job['EventsDetectionJobProperties']['OutputDataConfig']['S3Uri'] + text_filename + '.out'
print(output_data_s3_file)
# Load the output into a result dictionary # Get the files.
results = []
with smart_open.open(output_data_s3_file) as fi:
results.extend([json.loads(line) for line in fi.readlines() if line])
# Use the first result document for analysis
result = results[0]
result
result['Events'][1]['Triggers']
result['Events'][1]['Arguments']
result['Entities'][5]['Mentions']
# Convert Events output to displaCy format.
entities = [
{'start': m['BeginOffset'], 'end': m['EndOffset'], 'label': m['Type']}
for e in result['Entities']
for m in e['Mentions']
]
triggers = [
{'start': t['BeginOffset'], 'end': t['EndOffset'], 'label': t['Type']}
for e in result['Events']
for t in e['Triggers']
]
# Spans need to be sorted for displaCy to process them correctly
spans = sorted(entities + triggers, key=lambda x: x['start'])
tags = [s['label'] for s in spans]
output = [{"text": raw_texts[0], "ents": spans, "title": None, "settings": {}}]
# Misc. objects for presentation purposes
spectral = cm.get_cmap("Spectral", len(tags))
tag_colors = [colors.rgb2hex(spectral(i)) for i in range(len(tags))]
color_map = dict(zip(*(tags, tag_colors)))
# Note that only Entities participating in Events are shown.
displacy.render(output, style="ent", options={"colors": color_map}, manual=True)
# Creation of the entity dataframe. Entity indices must be explicitly created.
entities_df = pd.DataFrame([
{"EntityIndex": i, **m}
for i, e in enumerate(result['Entities'])
for m in e['Mentions']
])
# Creation of the events dataframe. Event indices must be explicitly created.
events_df = pd.DataFrame([
{"EventIndex": i, **a, **t}
for i, e in enumerate(result['Events'])
for a in e['Arguments']
for t in e['Triggers']
])
# Join the two tables into one flat data structure.
events_df = events_df.merge(entities_df, on="EntityIndex", suffixes=('Event', 'Entity'))
events_df
def format_compact_events(x):
"""Collapse groups of mentions and triggers into a single set."""
# Take the most commonly occurring EventType and the set of triggers.
d = {"EventType": Counter(x['TypeEvent']).most_common()[0][0],
"Triggers": set(x['TextEvent'])}
# For each argument Role, collect the set of mentions in the group.
for role in x['Role']:
d.update({role: set((x[x['Role']==role]['TextEntity']))})
return d
# Group data by EventIndex and format.
event_analysis_df = pd.DataFrame(
events_df.groupby("EventIndex").apply(format_compact_events).tolist()
).fillna('')
event_analysis_df
# Entities are associated with events by group, not individual mention; for simplicity,
# assume the canonical mention is the longest one.
def get_canonical_mention(mentions):
extents = enumerate([m['Text'] for m in mentions])
longest_name = sorted(extents, key=lambda x: len(x[1]))
return [mentions[longest_name[-1][0]]]
# Set a global confidence threshold
thr = 0.5
# Nodes are (id, type, tag, score, mention_type) tuples.
trigger_nodes = [
("tr%d" % i, t['Type'], t['Text'], t['Score'], "trigger")
for i, e in enumerate(result['Events'])
for t in e['Triggers'][:1]
if t['GroupScore'] > thr
]
entity_nodes = [
("en%d" % i, m['Type'], m['Text'], m['Score'], "entity")
for i, e in enumerate(result['Entities'])
for m in get_canonical_mention(e['Mentions'])
if m['GroupScore'] > thr
]
# Edges are (trigger_id, node_id, role, score) tuples.
argument_edges = [
("tr%d" % i, "en%d" % a['EntityIndex'], a['Role'], a['Score'])
for i, e in enumerate(result['Events'])
for a in e['Arguments']
if a['Score'] > thr
]
G = nx.Graph()
# Iterate over triggers and entity mentions.
for mention_id, tag, extent, score, mtype in trigger_nodes + entity_nodes:
label = extent if mtype.startswith("entity") else tag
G.add_node(mention_id, label=label, size=score*10, color=color_map[tag], tag=tag, group=mtype)
# Iterate over argument role assignments
for event_id, entity_id, role, score in argument_edges:
G.add_edges_from(
[(event_id, entity_id)],
label=role,
weight=score*100,
color="grey"
)
# Drop mentions that don't participate in events
G.remove_nodes_from(list(nx.isolates(G)))
nt = Network("600px", "800px", notebook=True, heading="")
nt.from_nx(G)
nt.show("compact_nx.html")
# This convenience function in `events_graph.py` plots a complete graph of the document,
# showing all events, triggers, entities, and their groups.
import events_graph as evg
evg.plot(result, node_types=['event', 'trigger', 'entity_group', 'entity'], thr=0.5)
| 0.429908 | 0.45847 |
[View in Colaboratory](https://colab.research.google.com/github/SakshiPriya/guidedbackprop/blob/master/guidedbackprop.ipynb)
```
!apt-get install -y -qq software-properties-common python-software-properties module-init-tools
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!apt-get update -qq 2>&1 > /dev/null
!apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
!mkdir -p drive
!google-drive-ocamlfuse drive
!pip install torch
!pip install torchvision
!pip install pillow==4.0.0
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from torchvision import transforms,models
from PIL import Image
from torch import nn
mean=torch.Tensor([0.485, 0.456, 0.406])
std=torch.Tensor([0.229, 0.224, 0.225])
def transformimage(image):
transform=transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor(),transforms.Normalize(mean,std)])
transformedimage=transform(image)
transformedimage=transformedimage.unsqueeze(0)
return transformedimage
untransform=transforms.ToPILImage()
grayscale=transforms.Grayscale()
def showimage(transformedimage,show,gradient):
untrans_image=transformedimage.squeeze(0)
if show==True:
for i in range(untrans_image.shape[0]):
untrans_image[i]=(untrans_image[i]*std[i])+mean[i]
elif gradient==True:
untrans_image=(untrans_image-untrans_image.min())/untrans_image.max()
untrans_image=untransform(untrans_image)
plt.figure()
plt.imshow(untrans_image)
plt.show()
return untrans_image
def grayscale(image):
image=image.squeeze(0)
image_mean=torch.mean(image,dim=0)
plt.figure()
plt.imshow(image_mean)
plt.figure()
def saliency_map(image):
image=image.squeeze(0)
image_max=torch.max(image,dim=0)
plt.figure()
plt.imshow(image_max[0])
plt.figure()
def saliencypos_map(image):
image=image.squeeze(0)
image_pos=np.maximum(0,image)/image.max()
plt.figure()
plt.imshow(untransform(image_pos))
plt.figure()
def saliencyneg_map(image):
image=image.squeeze(0)
image_pos=np.maximum(0,-image)/-image.min()
plt.figure()
plt.imshow(untransform(image_pos))
plt.figure()
listofimages=[['drive/app/goldfish.jpg',1],
['drive/app/hamster.jpg',333],
['drive/app/jellyfish.jpg',107]]
index=1
imgpath=listofimages[index][0]
classid=listofimages[index][1]
model=models.vgg19(pretrained=True)
image=Image.open(imgpath)
imagetensor=transformimage(image)
class guidedbackprop():
def __init__(self,model):
self.model=model
self.relus()
self.model.eval()
def relus(self):
def reluhook(layer,grad_in,grad_out):
return (torch.clamp(grad_in[0],min=0),)
for layer in self.model.features:
if isinstance(layer,nn.ReLU):
layer.register_backward_hook(reluhook)
guided=guidedbackprop(model)
def imagevaluation(imageten):
model.zero_grad()
output=model(imageten)
gradient=torch.zeros_like(output)
gradient[0][classid]=1
output.backward(gradient)
return imageten.grad.data
imagetensor.requires_grad=True
image1=imagevaluation(imagetensor)
image=showimage(image1,False,True)
grayscale(image1)
saliency_map(image1)
saliencypos_map(image1)
saliencyneg_map(image1)
```
|
github_jupyter
|
!apt-get install -y -qq software-properties-common python-software-properties module-init-tools
!add-apt-repository -y ppa:alessandro-strada/ppa 2>&1 > /dev/null
!apt-get update -qq 2>&1 > /dev/null
!apt-get -y install -qq google-drive-ocamlfuse fuse
from google.colab import auth
auth.authenticate_user()
from oauth2client.client import GoogleCredentials
creds = GoogleCredentials.get_application_default()
import getpass
!google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret} < /dev/null 2>&1 | grep URL
vcode = getpass.getpass()
!echo {vcode} | google-drive-ocamlfuse -headless -id={creds.client_id} -secret={creds.client_secret}
!mkdir -p drive
!google-drive-ocamlfuse drive
!pip install torch
!pip install torchvision
!pip install pillow==4.0.0
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from torchvision import transforms,models
from PIL import Image
from torch import nn
mean=torch.Tensor([0.485, 0.456, 0.406])
std=torch.Tensor([0.229, 0.224, 0.225])
def transformimage(image):
transform=transforms.Compose([transforms.Resize((224,224)),transforms.ToTensor(),transforms.Normalize(mean,std)])
transformedimage=transform(image)
transformedimage=transformedimage.unsqueeze(0)
return transformedimage
untransform=transforms.ToPILImage()
grayscale=transforms.Grayscale()
def showimage(transformedimage,show,gradient):
untrans_image=transformedimage.squeeze(0)
if show==True:
for i in range(untrans_image.shape[0]):
untrans_image[i]=(untrans_image[i]*std[i])+mean[i]
elif gradient==True:
untrans_image=(untrans_image-untrans_image.min())/untrans_image.max()
untrans_image=untransform(untrans_image)
plt.figure()
plt.imshow(untrans_image)
plt.show()
return untrans_image
def grayscale(image):
image=image.squeeze(0)
image_mean=torch.mean(image,dim=0)
plt.figure()
plt.imshow(image_mean)
plt.figure()
def saliency_map(image):
image=image.squeeze(0)
image_max=torch.max(image,dim=0)
plt.figure()
plt.imshow(image_max[0])
plt.figure()
def saliencypos_map(image):
image=image.squeeze(0)
image_pos=np.maximum(0,image)/image.max()
plt.figure()
plt.imshow(untransform(image_pos))
plt.figure()
def saliencyneg_map(image):
image=image.squeeze(0)
image_pos=np.maximum(0,-image)/-image.min()
plt.figure()
plt.imshow(untransform(image_pos))
plt.figure()
listofimages=[['drive/app/goldfish.jpg',1],
['drive/app/hamster.jpg',333],
['drive/app/jellyfish.jpg',107]]
index=1
imgpath=listofimages[index][0]
classid=listofimages[index][1]
model=models.vgg19(pretrained=True)
image=Image.open(imgpath)
imagetensor=transformimage(image)
class guidedbackprop():
def __init__(self,model):
self.model=model
self.relus()
self.model.eval()
def relus(self):
def reluhook(layer,grad_in,grad_out):
return (torch.clamp(grad_in[0],min=0),)
for layer in self.model.features:
if isinstance(layer,nn.ReLU):
layer.register_backward_hook(reluhook)
guided=guidedbackprop(model)
def imagevaluation(imageten):
model.zero_grad()
output=model(imageten)
gradient=torch.zeros_like(output)
gradient[0][classid]=1
output.backward(gradient)
return imageten.grad.data
imagetensor.requires_grad=True
image1=imagevaluation(imagetensor)
image=showimage(image1,False,True)
grayscale(image1)
saliency_map(image1)
saliencypos_map(image1)
saliencyneg_map(image1)
| 0.48121 | 0.62778 |
# Advanced: Using brackets or other strange characters to make complex patterns on command line or with Python
If you have used PatMatch on the web, you'll know that you can create complex patterns for searching and that they can involve brackets and other odd characters as part of the query. For example you can see some of these listed in the 'Examples' column under 'Supported Pattern Syntax and Examples' [here](https://www.yeastgenome.org/nph-patmatch#examples).
These odd characters can be an issue when constructing and submitting them via the command line or a mix of shell and Python. And so this page presents some options I have found that work for brackets and presumably other odd characters.
Luckily, I had worked out some of this when working on [this demo notebook](https://nbviewer.jupyter.org/github/fomightez/sequencework/blob/master/Extract_from_FASTA/demo%20get_seq_following_seq_from_FASTA.ipynb), specifically the regular expressions section [here](https://nbviewer.jupyter.org/github/fomightez/sequencework/blob/master/Extract_from_FASTA/demo%20get_seq_following_seq_from_FASTA.ipynb#More-advanced-use-examples-#1:-Use-with-regular-expressions). [This](https://github.com/ipython/ipython/issues/10072) was also helpful.
## Preparing
In order to insure everything is all set, act as if this is a new session in this Jupyter environment, and run the next cell so that you can step through the preparation steps to get a sequence file, prepare it, and scan it for matches to insure there is data file present. Plus, you'll get the file for script to convert it to dataframe. Repeating these steps if you had already done so this session will cause no harm, and so go ahead and run this cell.
```
!curl -O http://sgd-archive.yeastgenome.org/sequence/S288C_reference/chromosomes/fasta/chrmt.fsa
!perl ../patmatch_1.2/unjustify_fasta.pl chrmt.fsa
!perl ../patmatch_1.2/patmatch.pl -c "DDWDWTAWAAGTN{{1,55}}ARTADDDD" chrmt.fsa.prepared > test.out
!curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/patmatch-utilities/patmatch_results_to_df.py
from patmatch_results_to_df import patmatch_results_to_df
```
## On the command line directly
When submitting a pattern on the command line directly, I found I needed to double-up the brackets inside the quoted patttern in order to get it recognized as brackets.
In the next cell is an example when the pattern what I want to search can be expressed as `DDWDWTAWAAGTN{1,55}ARTADDDD`.
Note that I submit `DDWDWTAWAAGTN{{1,55}}ARTADDDD` in the command.
```
!perl ../patmatch_1.2/unjustify_fasta.pl chrmt.fsa
!perl ../patmatch_1.2/patmatch.pl -c "DDWDWTAWAAGTN{{1,55}}ARTADDDD" chrmt.fsa.prepared > testing.out
```
Verify it ran:
```
!head testing.out
```
## When using Python
In the last of the introductory notebooks, [PatMatch with more Python](PatMatch%20with%20more%20Python.ipynb), we used the following to take a PatMatch query result and then get it into Python.
```python
my_pattern= "DDWDWTAWAAGTARTADDDD"
df = patmatch_results_to_df("test.out", pattern=my_pattern, name="promoter")
```
Fortunately, in that case no modification is really necessary.
```
my_pattern= "DDWDWTAWAAGTN{1,55}ARTADDDD"
df = patmatch_results_to_df("test.out", pattern=my_pattern, name="promoter_region")
```
As you see, the 'query pattern' text ends up in the ouput how it looked in the input as `pattern`.
```
df.head()
```
The issue arises when you start mixing running the command line-based PatMatch with Python variables.
The basis for this example actually gets fully explained in the previous notebook [Advanced: Sending PatMatch output directly to Python](Sending%20PatMatch%20output%20directly%20to%20Python.ipynb), and so see that notebook if you aren't following what is going on.
In this example we use Python to define the pattern for the PatMatch query. This way we only have to define pattern once. And we can easily change it in one place to change the pattern being searched. The advantages of this ability become clear when you want to search a lot of sequences with the same pattern.
To use the defined pattern in the call to trigger Patmatch exectuion, brackets are used to signal to Jupyter/IPython that we are referring to the Python variable `my_pattern`. The next cell runs a basic patter matching with that approach:
```
my_pattern= "DDWDWTAWAAGTARTADDDD"
output = !perl ../patmatch_1.2/patmatch.pl -c {my_pattern} chrmt.fsa.prepared
df2 = patmatch_results_to_df(output.n, pattern=my_pattern, name="promoterAGAIN")
df2.head()
```
Note that we didn't define it as a variable, because there is just one, but if you were searching different sequence you could use a Python variable in the place of `chrmt.fsa.prepared` in the above code. It too would need to be flanked by brackets in that case. This will be illustrated in the next advanced notebook in this series, [Iterating over genomes with PatMatch](Iterating%20over%20genomes%20with%20PatMatch.ipynb).
So now you may see the impending issue..
What if our search pattern had brackets inside it, such that in its basic form it would be expressed as `DDWDWTAWAAGTN{1,55}ARTADDDD`?
How do we use that pattern as a Python variable and get it to be recognized as we want when the Python variable is provided to call to execute PatMatch.
I have found that simply wrapping the pattern in quotes in the call to execute PatMatch suffices to allow using brackets within the pattern definition.
```
my_pattern = "DDWDWTAWAAGTN{1,55}ARTADDDD"
output = !perl ../patmatch_1.2/patmatch.pl -c "{my_pattern}" chrmt.fsa.prepared
complexq_df = patmatch_results_to_df(output.n, pattern=my_pattern, name="promoter_region")
complexq_df.head()
```
------
The next advanced notebook, [Iterating over genomes with PatMatch](Iterating%20over%20genomes%20with%20PatMatch.ipynb) builds on one of the options presented in the previous notebook along with topics covered in the [PatMatch with Python basics notebook](PatMatch%20with%20Python%20basics.ipynb) and subsequent intro-level notebooks to show how to cycle through searching multiple genomes easily.
|
github_jupyter
|
!curl -O http://sgd-archive.yeastgenome.org/sequence/S288C_reference/chromosomes/fasta/chrmt.fsa
!perl ../patmatch_1.2/unjustify_fasta.pl chrmt.fsa
!perl ../patmatch_1.2/patmatch.pl -c "DDWDWTAWAAGTN{{1,55}}ARTADDDD" chrmt.fsa.prepared > test.out
!curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/patmatch-utilities/patmatch_results_to_df.py
from patmatch_results_to_df import patmatch_results_to_df
!perl ../patmatch_1.2/unjustify_fasta.pl chrmt.fsa
!perl ../patmatch_1.2/patmatch.pl -c "DDWDWTAWAAGTN{{1,55}}ARTADDDD" chrmt.fsa.prepared > testing.out
!head testing.out
my_pattern= "DDWDWTAWAAGTARTADDDD"
df = patmatch_results_to_df("test.out", pattern=my_pattern, name="promoter")
my_pattern= "DDWDWTAWAAGTN{1,55}ARTADDDD"
df = patmatch_results_to_df("test.out", pattern=my_pattern, name="promoter_region")
df.head()
my_pattern= "DDWDWTAWAAGTARTADDDD"
output = !perl ../patmatch_1.2/patmatch.pl -c {my_pattern} chrmt.fsa.prepared
df2 = patmatch_results_to_df(output.n, pattern=my_pattern, name="promoterAGAIN")
df2.head()
my_pattern = "DDWDWTAWAAGTN{1,55}ARTADDDD"
output = !perl ../patmatch_1.2/patmatch.pl -c "{my_pattern}" chrmt.fsa.prepared
complexq_df = patmatch_results_to_df(output.n, pattern=my_pattern, name="promoter_region")
complexq_df.head()
| 0.475849 | 0.906239 |
# RadiusNeighborsClassifier with RobustScaler
This Code template is for the Classification task using a simple **Radius Neighbor Classifier**, with data being scaled by **RobustScaler**. It implements learning based on the number of neighbors within a fixed radius r of each training point, where r is a floating-point value specified by the user.
### Required Packages
```
!pip install imblearn
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import LabelEncoder, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X = df[features]
Y = df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
#### Distribution Of Target Variable
```
plt.figure(figsize = (10,6))
se.countplot(Y)
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
#### Handling Target Imbalance
The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.
One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
```
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
```
### Model
RadiusNeighborsClassifier implements learning based on the number of neighbors within a fixed radius of each training point, where is a floating-point value specified by the user.
In cases where the data is not uniformly sampled, radius-based neighbors classification can be a better choice.
#### Tuning parameters
> **radius**: Range of parameter space to use by default for radius_neighbors queries.
> **algorithm**: Algorithm used to compute the nearest neighbors:
> **leaf_size**: Leaf size passed to BallTree or KDTree.
> **p**: Power parameter for the Minkowski metric.
> **metric**: the distance metric to use for the tree.
> **outlier_label**: label for outlier samples
> **weights**: weight function used in prediction.
For more information refer: [API](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsClassifier.html)
#### Data Rescaling
RobustScaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th percentile) and the 3rd quartile (75th percentile).
```
# Build Model here
# Change outlier_label as per specific use-case
model = make_pipeline(RobustScaler(),RadiusNeighborsClassifier(n_jobs=-1, outlier_label='most_frequent'))
model.fit(x_train, y_train)
```
#### Model Accuracy
score() method return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
#### Confusion Matrix
A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
```
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
```
#### Classification Report
A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
* **where**:
- Precision:- Accuracy of positive predictions.
- Recall:- Fraction of positives that were correctly identified.
- f1-score:- percent of positive predictions were correct
- support:- Support is the number of actual occurrences of the class in the specified dataset.
```
print(classification_report(y_test,model.predict(x_test)))
```
#### Creator: Viraj Jayant, Github: [Profile](https://github.com/Viraj-Jayant/)
|
github_jupyter
|
!pip install imblearn
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from imblearn.over_sampling import RandomOverSampler
from sklearn.preprocessing import LabelEncoder, RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.pipeline import make_pipeline
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
#filepath
file_path= ""
#x_values
features=[]
#y_value
target=''
df=pd.read_csv(file_path)
df.head()
X = df[features]
Y = df[target]
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
plt.figure(figsize = (10,6))
se.countplot(Y)
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
# Build Model here
# Change outlier_label as per specific use-case
model = make_pipeline(RobustScaler(),RadiusNeighborsClassifier(n_jobs=-1, outlier_label='most_frequent'))
model.fit(x_train, y_train)
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
print(classification_report(y_test,model.predict(x_test)))
| 0.290477 | 0.955734 |
<a href="https://colab.research.google.com/github/willianrocha/bootcamp-datascience-alura/blob/main/module_2/ds_mod2_lecture6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!wget https://raw.githubusercontent.com/willianrocha/bootcamp-datascience-alura/main/files/dsbc.py
from dsbc import *
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import numpy as np
import pandas as pd
from datetime import date
pd.options.display.float_format = "{:,.2f}".format
dados = pd.read_csv("https://github.com/willianrocha/bootcamp-datascience-alura/raw/main/files/A160324189_28_143_208.csv", encoding="ISO-8859-1",
skiprows=3, skipfooter=12, sep=";", thousands=".", decimal=",",
engine='python')
colunas_usaveis = dados.mean().index.tolist()
colunas_usaveis.insert(0, "Unidade da Federação")
usaveis = dados[colunas_usaveis]
usaveis = usaveis.set_index("Unidade da Federação")
ordenado_por_total = usaveis.sort_values("Total", ascending=False)
ordenado_por_total = ordenado_por_total.drop("Total", axis=1)
colunas_interessadas = ordenado_por_total.columns[6:]
ordenado_por_total = ordenado_por_total[colunas_interessadas]
ordenado_por_total = ordenado_por_total / 10**6
ordenado_por_total.index = ordenado_por_total.index.str[3:]
mes_mais_recente = ordenado_por_total.columns[-1]
gastos_do_mais_recente = ordenado_por_total[mes_mais_recente]
url = 'https://pt.wikipedia.org/wiki/Lista_de_unidades_federativas_do_Brasil_por_popula%C3%A7%C3%A3o'
df_pop_por_uf = pd.read_html(url)
df_pop_por_uf = df_pop_por_uf[0]
gastos_e_populacao_recente = clean_n_join(df_pop_por_uf, gastos_do_mais_recente)
gastos_e_populacao_recente["gastos"] = gastos_e_populacao_recente["2020/Jul"] * 10**6
gastos_e_populacao_recente["gastos_por_habitante"] = gastos_e_populacao_recente["gastos"] / gastos_e_populacao_recente["populacao"]
mensal = ordenado_por_total.T
mensal.index = mensal.index.map(para_dia)
mensal_aberto = mensal.reset_index().melt(id_vars=['index'], value_vars=mensal.columns)
mensal_aberto.columns = ["dia_mes_ano", "uf", "gasto"]
from calendar import monthrange
mensal_aberto["dia_mes_ano"] = mensal_aberto["dia_mes_ano"].astype('datetime64')
mensal_aberto["mes"] = mensal_aberto["dia_mes_ano"].dt.month
mensal_aberto["ano"] = mensal_aberto["dia_mes_ano"].dt.year
mensal_aberto['gasto_diario'] = mensal_aberto.apply(lambda x: monthrange(x['ano'], x['mes'])[1], axis=1)
mensal_aberto['gasto_diario'] = mensal_aberto['gasto'] / mensal_aberto['gasto_diario']
mensal_aberto.head()
estado_a_analisar = mensal_aberto.query("uf == 'São Paulo'")
estado_a_analisar.head()
gastos_por_ano = estado_a_analisar.groupby("ano").sum()
gastos_por_ano.head()
sns.scatterplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto")
sns.lineplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto")
sns.barplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto", palette=sns.color_palette("rocket_r",13))#palette='rocket')
estados = ["São Paulo", "Minas Gerais", "Pernambuco"]
por_ano_dos_estados = mensal_aberto.query("uf in @estados").groupby(["uf", "ano"]).sum().reset_index()
por_ano_dos_estados.head()
sns.barplot(data=por_ano_dos_estados, x="ano", y="gasto", hue="uf")
sns.catplot(data=por_ano_dos_estados, x="ano", y="gasto", kind="bar", col="uf")
```
# Desafio
## Desafio 01: Reordenar as cores pelo valor do eixo y.
```
sns.barplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto", hue="gasto", dodge=False, palette=sns.color_palette("rocket_r",13))#palette='rocket')
plt.legend('')
```
## Desafio 02: Explorar a documentação do Seaborn.
## Desafio 03: Pensar e elaborar novos gráficos depois de estudar o Seaborn, compartilhar com uma breve descrição no Discord, para que outros colegas aprendam sobre o gráfico usado.
```
ax = sns.catplot(data=por_ano_dos_estados, x="ano", y="gasto", kind="bar", col="uf", hue="gasto", dodge=False, palette=sns.color_palette("rocket_r",13))
ax._legend.remove()
```
## Desafio 04: Baixe uma outra base de dados do tabnet e execute sua análise de forma similar a aula.
|
github_jupyter
|
!wget https://raw.githubusercontent.com/willianrocha/bootcamp-datascience-alura/main/files/dsbc.py
from dsbc import *
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import seaborn as sns
import numpy as np
import pandas as pd
from datetime import date
pd.options.display.float_format = "{:,.2f}".format
dados = pd.read_csv("https://github.com/willianrocha/bootcamp-datascience-alura/raw/main/files/A160324189_28_143_208.csv", encoding="ISO-8859-1",
skiprows=3, skipfooter=12, sep=";", thousands=".", decimal=",",
engine='python')
colunas_usaveis = dados.mean().index.tolist()
colunas_usaveis.insert(0, "Unidade da Federação")
usaveis = dados[colunas_usaveis]
usaveis = usaveis.set_index("Unidade da Federação")
ordenado_por_total = usaveis.sort_values("Total", ascending=False)
ordenado_por_total = ordenado_por_total.drop("Total", axis=1)
colunas_interessadas = ordenado_por_total.columns[6:]
ordenado_por_total = ordenado_por_total[colunas_interessadas]
ordenado_por_total = ordenado_por_total / 10**6
ordenado_por_total.index = ordenado_por_total.index.str[3:]
mes_mais_recente = ordenado_por_total.columns[-1]
gastos_do_mais_recente = ordenado_por_total[mes_mais_recente]
url = 'https://pt.wikipedia.org/wiki/Lista_de_unidades_federativas_do_Brasil_por_popula%C3%A7%C3%A3o'
df_pop_por_uf = pd.read_html(url)
df_pop_por_uf = df_pop_por_uf[0]
gastos_e_populacao_recente = clean_n_join(df_pop_por_uf, gastos_do_mais_recente)
gastos_e_populacao_recente["gastos"] = gastos_e_populacao_recente["2020/Jul"] * 10**6
gastos_e_populacao_recente["gastos_por_habitante"] = gastos_e_populacao_recente["gastos"] / gastos_e_populacao_recente["populacao"]
mensal = ordenado_por_total.T
mensal.index = mensal.index.map(para_dia)
mensal_aberto = mensal.reset_index().melt(id_vars=['index'], value_vars=mensal.columns)
mensal_aberto.columns = ["dia_mes_ano", "uf", "gasto"]
from calendar import monthrange
mensal_aberto["dia_mes_ano"] = mensal_aberto["dia_mes_ano"].astype('datetime64')
mensal_aberto["mes"] = mensal_aberto["dia_mes_ano"].dt.month
mensal_aberto["ano"] = mensal_aberto["dia_mes_ano"].dt.year
mensal_aberto['gasto_diario'] = mensal_aberto.apply(lambda x: monthrange(x['ano'], x['mes'])[1], axis=1)
mensal_aberto['gasto_diario'] = mensal_aberto['gasto'] / mensal_aberto['gasto_diario']
mensal_aberto.head()
estado_a_analisar = mensal_aberto.query("uf == 'São Paulo'")
estado_a_analisar.head()
gastos_por_ano = estado_a_analisar.groupby("ano").sum()
gastos_por_ano.head()
sns.scatterplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto")
sns.lineplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto")
sns.barplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto", palette=sns.color_palette("rocket_r",13))#palette='rocket')
estados = ["São Paulo", "Minas Gerais", "Pernambuco"]
por_ano_dos_estados = mensal_aberto.query("uf in @estados").groupby(["uf", "ano"]).sum().reset_index()
por_ano_dos_estados.head()
sns.barplot(data=por_ano_dos_estados, x="ano", y="gasto", hue="uf")
sns.catplot(data=por_ano_dos_estados, x="ano", y="gasto", kind="bar", col="uf")
sns.barplot(data=gastos_por_ano, x=gastos_por_ano.index, y="gasto", hue="gasto", dodge=False, palette=sns.color_palette("rocket_r",13))#palette='rocket')
plt.legend('')
ax = sns.catplot(data=por_ano_dos_estados, x="ano", y="gasto", kind="bar", col="uf", hue="gasto", dodge=False, palette=sns.color_palette("rocket_r",13))
ax._legend.remove()
| 0.487551 | 0.829216 |
# Statistics & Data Analysis
## Req
#### Import Requirements
##### HTML formatting
```
import numpy as np
import pandas as z
import scipy
import matplotlib.pyplot as plt
from pandas.api.types import CategoricalDtype
from plotnine import *
from scipy.stats import *
import scikit_posthocs as sp
data = pd.read_csv("./NewCols.csv")
```
## Calculating the differences between the noremalized values.
```
data_control = data[data["treatment"] == "baseline"]
data_control.to_csv("./control.csv")
data_treatment = data[data["treatment"] == "intravenous LPS"]
data_control.to_csv("./lps.csv")
procData = data_treatment
procData['diff_AVAR2'] = (
np.array(data_treatment["AVAR2"]) - np.array(data_control["AVAR2"])).tolist()
procData["diff_CVAR2"] = (
np.array(data_treatment["CVAR2"]) - np.array(data_control["CVAR2"])).tolist()
procData["diff_AWT2"] = (np.array(data_treatment["AWT2"]) -
np.array(data_control["AWT2"])).tolist()
procData["diff_CWT2"] = (np.array(data_treatment["CWT2"]) -
np.array(data_control["CWT2"])).tolist()
procData["diff_total2"] = (
np.array(data_treatment["total2"]) - np.array(data_control["total2"])).tolist()
procData["diff_totalA"] = (
np.array(data_treatment["totalA"]) - np.array(data_control["totalA"])).tolist()
procData["diff_totalC"] = (
np.array(data_treatment["totalC"]) - np.array(data_control["totalC"])).tolist()
procData["diff_totalWT"] = (np.array(
data_treatment["totalWT"]) - np.array(data_control["totalWT"])).tolist()
procData["diff_totalVar"] = (np.array(
data_treatment["totalVar"]) - np.array(data_control["totalVar"])).tolist()
procData.to_csv("./procData.csv")
newDF= data_control[["testGroup","tg2"]]
newDF
newDF.rename(columns = {'testGroup':'c_tg','tg2':'c_tg2'}, inplace=True)
newDF
newDF.index = procData.index
procData= pd.concat([procData,newDF], axis=1)
```
#### Difference Table
```
pd.set_option('display.max_rows', procData.shape[0]+1)
diff_data = procData.loc[ :,"diff_AVAR2":"diff_totalVar" ]
diff_data.to_csv("./diffData.csv")
diff_data.describe()
diff_data.var()
diff_data.std()
diff_data.skew()
diff_data.kurtosis().tolist()
diff_data.kurtosis()
```
## QQ Data for LPS
### summary Statistics
#### Baseline - summary stats - summary stats
```
baseline_summary = data_control.loc[:,'AWT2':'total2']
baseline_summary.describe()
```
#### Variance & STD DEv
```
baseline_summary.var()
baseline_summary.std()
```
#### skew
```
scipy.stats.skew(baseline_summary).tolist()
```
#### Kurtosis
```
scipy.stats.kurtosis(baseline_summary).tolist()
```
#### Intravenous LPS - summary stats
```
LPS_summary = data_treatment.loc[:,'AWT2':'total2']
LPS_summary.describe()
```
#### Variance & STd DEv
```
LPS_summary.var()
LPS_summary.std()
```
#### Skew
```
scipy.stats.skew(LPS_summary)
```
#### Kurtosis
```
scipy.stats.kurtosis(LPS_summary)
```
## Graph Data -
```
from plotnine import *
ggplot(data, aes(x='treatment', y='AWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["AWT2"],data_treatment["AWT2"])
ggplot(data, aes(x='treatment', y='CWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["CWT2"],data_treatment["CWT2"])
ggplot(data, aes(x='treatment', y='AVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["AVAR2"],data_treatment["AVAR2"])
ggplot(data, aes(x='treatment', y='CVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["CVAR2"],data_treatment["CVAR2"])
removed_outliers = data.total2.between(data.total2.quantile(.05), data.total2.quantile(.95))
data_total= data[removed_outliers]
ggplot(data_total, aes(x='treatment',y="total2" ), ) + geom_boxplot(outlier_shape = "") + geom_jitter(data_total,aes(y="total2",colour='treatment',shape='treatment') ) + ggtitle("QQ Plot of IRAK-1 expression per GbP") + xlab("Treatment") + ylab("Total IRAK-1 Levels per Gigabase pair") + ylim(data_total.total2.quantile(.05), data_total.total2.quantile(.95))
a = 0.05
wilcoxon(diff_data["diff_total2"])
removed_outliers_diffData = diff_data.diff_total2.between(diff_data.diff_total2.quantile(.05), diff_data.diff_total2.quantile(.95))
difftotalData=diff_data[removed_outliers_diffData]
ggplot(difftotalData, aes( x='0',y='diff_total2') ) + geom_boxplot() + geom_point(color="red") + ylim(difftotalData.diff_total2.quantile(.05), difftotalData.diff_total2.quantile(.95)) + ggtitle("QQ Plot of changes in IRAK-1 levels per Gbp") + xlab("Treatment") + ylab("Changes in IRAK-1 Levels per Gigabase pair")
data_plot = data_treatment
controlData = data_control['total2']
controlData
data_plot["ctrl_total2"]=controlData.to_list()
data_plot
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2)
r_sq= model.score(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2)
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
ggplot(data_plot,aes(x='total2',y='ctrl_total2') ) + geom_point() + geom_smooth(method='lm')
from sklearn import linear_model
lm = linear_model.LinearRegression()
shapiro_test = shapiro(data_control['total2'])
shapiro_test
shapiro_test = shapiro(data_treatment['total2'])
shapiro_test
shapiro_test = shapiro(diff_data['diff_total2'])
shapiro_test
ggplot(data, aes(x='treatment', y='totalVar') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalVar"])
ggplot(data, aes(x='treatment', y='totalWT') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalWT"])
ggplot(data, aes(x='treatment', y='totalA') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalA"])
ggplot(data, aes(x='treatment', y='totalC') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalC"])
```
## Statistics
### Total 2 Comparison
#### Wilcoxon non-parametric
```
a = 0.05
w, p = wilcoxon(data_control["total2"],data_treatment["total2"])
print(w, p)
if (p < a):
print("As P"+str(p)+" is less than a: "+str(a))
print( "we reject the Null Hypothesis.")
print(". There is significant difference betwween the groups")
else:
print("As P"+p+" is larger than a: "+str(a))
print( "we FAIL TO reject the Null Hypothesis.")
print(". There is NOT a significant difference betwween the groups")
```
#### Freidman's Anova
```
sp.posthoc_nemenyi_friedman(diff_data)
```
Friedman Tes
### other
```
a = 0.05
w, p = wilcoxon((data_control["totalA"]/data_control["totalC"] ),(data_treatment["totalA"]/data_treatment["totalC"]))
print(w, p)
a = 0.05
w, p = wilcoxon((data_control["AVAR2"]/data_control["CVAR2"] ),(data_treatment["AVAR2"]/data_treatment["CVAR2"]))
print(w, p)
a = 0.05
w, p = wilcoxon((data_control["AWT2"]/data_control["CWT2"] ),(data_treatment["AWT2"]/data_treatment["CWT2"]))
print(w, p)
ggplot()+geom_histogram(procData,aes(x="tg2"))
ggplot()+geom_histogram(procData,aes(x="mutant"))
ggplot()+geom_bar(procData,aes(x="spliceVariant",fill="mutant"))
ggplot()+geom_col(procData,aes(x="spliceVariant",y="diff_totalA/diff_totalC",fill="mutant"))
a = 0.05
diff_data = procData[(data["totalC"] > 0 ) & (data["totalA"] > 0 )]
ggplot()+geom_histogram(diff_data,aes(x="tg2"))
wilcoxon((data_control["totalC"] )/(data_control["totalA"]))
a = 0.05
w, p = wilcoxon(data_control["total2"],data_treatment["total2"])
print(w, p)
```
2 graphs
1. Do the Table
3. Black and white
3. Make sure its not sloppy
4.
control, LPS & Difference.
correlation plot for each patient - total 2 & diff_total2
Look for A/C ratios
ggplot(data_plot,aes(x='total2',y='ctrl_total2') ) + geom_point(colour) + geom_smooth(method='lm')
|
github_jupyter
|
import numpy as np
import pandas as z
import scipy
import matplotlib.pyplot as plt
from pandas.api.types import CategoricalDtype
from plotnine import *
from scipy.stats import *
import scikit_posthocs as sp
data = pd.read_csv("./NewCols.csv")
data_control = data[data["treatment"] == "baseline"]
data_control.to_csv("./control.csv")
data_treatment = data[data["treatment"] == "intravenous LPS"]
data_control.to_csv("./lps.csv")
procData = data_treatment
procData['diff_AVAR2'] = (
np.array(data_treatment["AVAR2"]) - np.array(data_control["AVAR2"])).tolist()
procData["diff_CVAR2"] = (
np.array(data_treatment["CVAR2"]) - np.array(data_control["CVAR2"])).tolist()
procData["diff_AWT2"] = (np.array(data_treatment["AWT2"]) -
np.array(data_control["AWT2"])).tolist()
procData["diff_CWT2"] = (np.array(data_treatment["CWT2"]) -
np.array(data_control["CWT2"])).tolist()
procData["diff_total2"] = (
np.array(data_treatment["total2"]) - np.array(data_control["total2"])).tolist()
procData["diff_totalA"] = (
np.array(data_treatment["totalA"]) - np.array(data_control["totalA"])).tolist()
procData["diff_totalC"] = (
np.array(data_treatment["totalC"]) - np.array(data_control["totalC"])).tolist()
procData["diff_totalWT"] = (np.array(
data_treatment["totalWT"]) - np.array(data_control["totalWT"])).tolist()
procData["diff_totalVar"] = (np.array(
data_treatment["totalVar"]) - np.array(data_control["totalVar"])).tolist()
procData.to_csv("./procData.csv")
newDF= data_control[["testGroup","tg2"]]
newDF
newDF.rename(columns = {'testGroup':'c_tg','tg2':'c_tg2'}, inplace=True)
newDF
newDF.index = procData.index
procData= pd.concat([procData,newDF], axis=1)
pd.set_option('display.max_rows', procData.shape[0]+1)
diff_data = procData.loc[ :,"diff_AVAR2":"diff_totalVar" ]
diff_data.to_csv("./diffData.csv")
diff_data.describe()
diff_data.var()
diff_data.std()
diff_data.skew()
diff_data.kurtosis().tolist()
diff_data.kurtosis()
baseline_summary = data_control.loc[:,'AWT2':'total2']
baseline_summary.describe()
baseline_summary.var()
baseline_summary.std()
scipy.stats.skew(baseline_summary).tolist()
scipy.stats.kurtosis(baseline_summary).tolist()
LPS_summary = data_treatment.loc[:,'AWT2':'total2']
LPS_summary.describe()
LPS_summary.var()
LPS_summary.std()
scipy.stats.skew(LPS_summary)
scipy.stats.kurtosis(LPS_summary)
from plotnine import *
ggplot(data, aes(x='treatment', y='AWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["AWT2"],data_treatment["AWT2"])
ggplot(data, aes(x='treatment', y='CWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["CWT2"],data_treatment["CWT2"])
ggplot(data, aes(x='treatment', y='AVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["AVAR2"],data_treatment["AVAR2"])
ggplot(data, aes(x='treatment', y='CVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["CVAR2"],data_treatment["CVAR2"])
removed_outliers = data.total2.between(data.total2.quantile(.05), data.total2.quantile(.95))
data_total= data[removed_outliers]
ggplot(data_total, aes(x='treatment',y="total2" ), ) + geom_boxplot(outlier_shape = "") + geom_jitter(data_total,aes(y="total2",colour='treatment',shape='treatment') ) + ggtitle("QQ Plot of IRAK-1 expression per GbP") + xlab("Treatment") + ylab("Total IRAK-1 Levels per Gigabase pair") + ylim(data_total.total2.quantile(.05), data_total.total2.quantile(.95))
a = 0.05
wilcoxon(diff_data["diff_total2"])
removed_outliers_diffData = diff_data.diff_total2.between(diff_data.diff_total2.quantile(.05), diff_data.diff_total2.quantile(.95))
difftotalData=diff_data[removed_outliers_diffData]
ggplot(difftotalData, aes( x='0',y='diff_total2') ) + geom_boxplot() + geom_point(color="red") + ylim(difftotalData.diff_total2.quantile(.05), difftotalData.diff_total2.quantile(.95)) + ggtitle("QQ Plot of changes in IRAK-1 levels per Gbp") + xlab("Treatment") + ylab("Changes in IRAK-1 Levels per Gigabase pair")
data_plot = data_treatment
controlData = data_control['total2']
controlData
data_plot["ctrl_total2"]=controlData.to_list()
data_plot
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2)
r_sq= model.score(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2)
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
ggplot(data_plot,aes(x='total2',y='ctrl_total2') ) + geom_point() + geom_smooth(method='lm')
from sklearn import linear_model
lm = linear_model.LinearRegression()
shapiro_test = shapiro(data_control['total2'])
shapiro_test
shapiro_test = shapiro(data_treatment['total2'])
shapiro_test
shapiro_test = shapiro(diff_data['diff_total2'])
shapiro_test
ggplot(data, aes(x='treatment', y='totalVar') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalVar"])
ggplot(data, aes(x='treatment', y='totalWT') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalWT"])
ggplot(data, aes(x='treatment', y='totalA') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalA"])
ggplot(data, aes(x='treatment', y='totalC') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalC"])
a = 0.05
w, p = wilcoxon(data_control["total2"],data_treatment["total2"])
print(w, p)
if (p < a):
print("As P"+str(p)+" is less than a: "+str(a))
print( "we reject the Null Hypothesis.")
print(". There is significant difference betwween the groups")
else:
print("As P"+p+" is larger than a: "+str(a))
print( "we FAIL TO reject the Null Hypothesis.")
print(". There is NOT a significant difference betwween the groups")
sp.posthoc_nemenyi_friedman(diff_data)
a = 0.05
w, p = wilcoxon((data_control["totalA"]/data_control["totalC"] ),(data_treatment["totalA"]/data_treatment["totalC"]))
print(w, p)
a = 0.05
w, p = wilcoxon((data_control["AVAR2"]/data_control["CVAR2"] ),(data_treatment["AVAR2"]/data_treatment["CVAR2"]))
print(w, p)
a = 0.05
w, p = wilcoxon((data_control["AWT2"]/data_control["CWT2"] ),(data_treatment["AWT2"]/data_treatment["CWT2"]))
print(w, p)
ggplot()+geom_histogram(procData,aes(x="tg2"))
ggplot()+geom_histogram(procData,aes(x="mutant"))
ggplot()+geom_bar(procData,aes(x="spliceVariant",fill="mutant"))
ggplot()+geom_col(procData,aes(x="spliceVariant",y="diff_totalA/diff_totalC",fill="mutant"))
a = 0.05
diff_data = procData[(data["totalC"] > 0 ) & (data["totalA"] > 0 )]
ggplot()+geom_histogram(diff_data,aes(x="tg2"))
wilcoxon((data_control["totalC"] )/(data_control["totalA"]))
a = 0.05
w, p = wilcoxon(data_control["total2"],data_treatment["total2"])
print(w, p)
| 0.442637 | 0.855429 |
# Линейная регрессия и стохастический градиентный спуск
Задание основано на материалах лекций по линейной регрессии и градиентному спуску. Вы будете прогнозировать выручку компании в зависимости от уровня ее инвестиций в рекламу по TV, в газетах и по радио.
## Вы научитесь:
- решать задачу восстановления линейной регрессии
- реализовывать стохастический градиентный спуск для ее настройки
- решать задачу линейной регрессии аналитически
## Введение
Линейная регрессия - один из наиболее хорошо изученных методов машинного обучения, позволяющий прогнозировать значения количественного признака в виде линейной комбинации прочих признаков с параметрами - весами модели. Оптимальные (в смысле минимальности некоторого функционала ошибки) параметры линейной регрессии можно найти аналитически с помощью нормального уравнения или численно с помощью методов оптимизации.
Линейная регрессия использует простой функционал качества - среднеквадратичную ошибку. Мы будем работать с выборкой, содержащей 3 признака. Для настройки параметров (весов) модели решается следующая задача:
$$\Large \frac{1}{\ell}\sum_{i=1}^\ell{{((w_0 + w_1x_{i1} + w_2x_{i2} + w_3x_{i3}) - y_i)}^2} \rightarrow \min_{w_0, w_1, w_2, w_3},$$
где $x_{i1}, x_{i2}, x_{i3}$ - значения признаков $i$-го объекта, $y_i$ - значение целевого признака $i$-го объекта, $\ell$ - число объектов в обучающей выборке.
## Градиентный спуск
Параметры $w_0, w_1, w_2, w_3$, по которым минимизируется среднеквадратичная ошибка, можно находить численно с помощью градиентного спуска.
Градиентный шаг для весов будет выглядеть следующим образом:
$$\Large w_0 \leftarrow w_0 - \frac{2\eta}{\ell} \sum_{i=1}^\ell{{((w_0 + w_1x_{i1} + w_2x_{i2} + w_3x_{i3}) - y_i)}}$$
$$\Large w_j \leftarrow w_j - \frac{2\eta}{\ell} \sum_{i=1}^\ell{{x_{ij}((w_0 + w_1x_{i1} + w_2x_{i2} + w_3x_{i3}) - y_i)}},\ j \in \{1,2,3\}$$
Здесь $\eta$ - параметр, шаг градиентного спуска.
## Стохастический градиентный спуск
Проблема градиентного спуска, описанного выше, в том, что на больших выборках считать на каждом шаге градиент по всем имеющимся данным может быть очень вычислительно сложно.
В стохастическом варианте градиентного спуска поправки для весов вычисляются только с учетом одного случайно взятого объекта обучающей выборки:
$$\Large w_0 \leftarrow w_0 - \frac{2\eta}{\ell} {((w_0 + w_1x_{k1} + w_2x_{k2} + w_3x_{k3}) - y_k)}$$
$$\Large w_j \leftarrow w_j - \frac{2\eta}{\ell} {x_{kj}((w_0 + w_1x_{k1} + w_2x_{k2} + w_3x_{k3}) - y_k)},\ j \in \{1,2,3\},$$
где $k$ - случайный индекс, $k \in \{1, \ldots, \ell\}$.
## Нормальное уравнение
Нахождение вектора оптимальных весов $w$ может быть сделано и аналитически.
Мы хотим найти такой вектор весов $w$, чтобы вектор $y$, приближающий целевой признак, получался умножением матрицы $X$ (состоящей из всех признаков объектов обучающей выборки, кроме целевого) на вектор весов $w$. То есть, чтобы выполнялось матричное уравнение:
$$\Large y = Xw$$
Домножением слева на $X^T$ получаем:
$$\Large X^Ty = X^TXw$$
Это хорошо, поскольку теперь матрица $X^TX$ - квадратная, и можно найти решение (вектор $w$) в виде:
$$\Large w = {(X^TX)}^{-1}X^Ty$$
Матрица ${(X^TX)}^{-1}X^T$ - [*псевдообратная*](https://ru.wikipedia.org/wiki/Псевдообратная_матрица) для матрицы $X$. В NumPy такую матрицу можно вычислить с помощью функции [numpy.linalg.pinv](http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.linalg.pinv.html).
Однако, нахождение псевдообратной матрицы - операция вычислительно сложная и нестабильная в случае малого определителя матрицы $X$ (проблема мультиколлинеарности).
На практике лучше находить вектор весов $w$ решением матричного уравнения
$$\Large X^TXw = X^Ty$$Это может быть сделано с помощью функции [numpy.linalg.solve](http://docs.scipy.org/doc/numpy-1.10.1/reference/generated/numpy.linalg.solve.html).
Но все же на практике для больших матриц $X$ быстрее работает градиентный спуск, особенно его стохастическая версия.
## Инструкции по выполнению
В начале напишем простую функцию для записи ответов в текстовый файл. Ответами будут числа, полученные в ходе решения этого задания, округленные до 3 знаков после запятой. Полученные файлы после выполнения задания надо отправить в форму на странице задания на Coursera.org.
```
def write_answer_to_file(answer, filename):
with open(filename, 'w') as f_out:
f_out.write(str(round(answer, 3)))
```
**1. Загрузите данные из файла *advertising.csv* в объект pandas DataFrame. [Источник данных](http://www-bcf.usc.edu/~gareth/ISL/data.html).**
```
import pandas as pd
adver_data = pd.read_csv('advertising.csv')
```
**Посмотрите на первые 5 записей и на статистику признаков в этом наборе данных.**
```
adver_data.head()
adver_data.describe()
```
**Создайте массивы NumPy *X* из столбцов TV, Radio и Newspaper и *y* - из столбца Sales. Используйте атрибут *values* объекта pandas DataFrame.**
```
import numpy as np
X = np.array(adver_data.values[:,0:3])
y = np.array(adver_data.values[:,3])
```
**Отмасштабируйте столбцы матрицы *X*, вычтя из каждого значения среднее по соответствующему столбцу и поделив результат на стандартное отклонение. Для определенности, используйте методы mean и std векторов NumPy (реализация std в Pandas может отличаться). Обратите внимание, что в numpy вызов функции .mean() без параметров возвращает среднее по всем элементам массива, а не по столбцам, как в pandas. Чтобы произвести вычисление по столбцам, необходимо указать параметр axis.**
```
means, stds = np.mean(X,axis=0), np.std(X, axis=0)
X = (X - means) / stds
```
**Добавьте к матрице *X* столбец из единиц, используя методы *hstack*, *ones* и *reshape* библиотеки NumPy. Вектор из единиц нужен для того, чтобы не обрабатывать отдельно коэффициент $w_0$ линейной регрессии.**
```
X = np.hstack((X, np.ones(X.shape[0]).reshape(X.shape[0],1)))
```
**2. Реализуйте функцию *mserror* - среднеквадратичную ошибку прогноза. Она принимает два аргумента - объекты Series *y* (значения целевого признака) и *y\_pred* (предсказанные значения). Не используйте в этой функции циклы - тогда она будет вычислительно неэффективной.**
```
def mserror(y, y_pred):
return np.mean((y-y_pred)**2)
```
**Какова среднеквадратичная ошибка прогноза значений Sales, если всегда предсказывать медианное значение Sales по исходной выборке? Запишите ответ в файл '1.txt'.**
```
y_pred = np.median(y)
answer1 = mserror(y, y_pred)
print(answer1)
write_answer_to_file(answer1, '1.txt')
```
**3. Реализуйте функцию *normal_equation*, которая по заданным матрицам (массивам NumPy) *X* и *y* вычисляет вектор весов $w$ согласно нормальному уравнению линейной регрессии.**
```
def normal_equation(X, y):
return np.linalg.solve(np.dot(X.transpose(),X),np.dot(X.transpose(),y))
norm_eq_weights = normal_equation(X, y)
print(norm_eq_weights)
```
**Какие продажи предсказываются линейной моделью с весами, найденными с помощью нормального уравнения, в случае средних инвестиций в рекламу по ТВ, радио и в газетах? (то есть при нулевых значениях масштабированных признаков TV, Radio и Newspaper). Запишите ответ в файл '2.txt'.**
```
answer2 = np.sum([0., 0., 0., 1.]*norm_eq_weights)
print(answer2)
write_answer_to_file(answer2, '2.txt')
```
**4. Напишите функцию *linear_prediction*, которая принимает на вход матрицу *X* и вектор весов линейной модели *w*, а возвращает вектор прогнозов в виде линейной комбинации столбцов матрицы *X* с весами *w*.**
```
def linear_prediction(X, w):
return np.dot(X, w)
```
**Какова среднеквадратичная ошибка прогноза значений Sales в виде линейной модели с весами, найденными с помощью нормального уравнения? Запишите ответ в файл '3.txt'.**
```
answer3 = mserror(y, linear_prediction(X, norm_eq_weights))
print(answer3)
write_answer_to_file(answer3, '3.txt')
```
**5. Напишите функцию *stochastic_gradient_step*, реализующую шаг стохастического градиентного спуска для линейной регрессии. Функция должна принимать матрицу *X*, вектора *y* и *w*, число *train_ind* - индекс объекта обучающей выборки (строки матрицы *X*), по которому считается изменение весов, а также число *$\eta$* (eta) - шаг градиентного спуска (по умолчанию *eta*=0.01). Результатом будет вектор обновленных весов. Наша реализация функции будет явно написана для данных с 3 признаками, но несложно модифицировать для любого числа признаков, можете это сделать.**
```
def stochastic_gradient_step(X, y, w, train_ind, eta=0.01):
return w + 2 * eta/X.shape[0] * X[train_ind] * (y[train_ind] - linear_prediction(X[train_ind], w))
print (stochastic_gradient_step(X, y, normal_equation(X, y), 0))
```
**6. Напишите функцию *stochastic_gradient_descent*, реализующую стохастический градиентный спуск для линейной регрессии. Функция принимает на вход следующие аргументы:**
- X - матрица, соответствующая обучающей выборке
- y - вектор значений целевого признака
- w_init - вектор начальных весов модели
- eta - шаг градиентного спуска (по умолчанию 0.01)
- max_iter - максимальное число итераций градиентного спуска (по умолчанию 10000)
- max_weight_dist - максимальное евклидово расстояние между векторами весов на соседних итерациях градиентного спуска,
при котором алгоритм прекращает работу (по умолчанию 1e-8)
- seed - число, используемое для воспроизводимости сгенерированных псевдослучайных чисел (по умолчанию 42)
- verbose - флаг печати информации (например, для отладки, по умолчанию False)
**На каждой итерации в вектор (список) должно записываться текущее значение среднеквадратичной ошибки. Функция должна возвращать вектор весов $w$, а также вектор (список) ошибок.**
```
def stochastic_gradient_descent(X, y, w_init, eta=1e-2, max_iter=1e4,
min_weight_dist=1e-8, seed=42, verbose=False):
# Инициализируем расстояние между векторами весов на соседних
# итерациях большим числом.
weight_dist = np.inf
# Инициализируем вектор весов
w = w_init
# Сюда будем записывать ошибки на каждой итерации
errors = []
# Счетчик итераций
iter_num = 0
# Будем порождать псевдослучайные числа
# (номер объекта, который будет менять веса), а для воспроизводимости
# этой последовательности псевдослучайных чисел используем seed.
np.random.seed(seed)
# Основной цикл
while weight_dist > min_weight_dist and iter_num < max_iter:
# порождаем псевдослучайный
# индекс объекта обучающей выборки
random_ind = np.random.randint(X.shape[0])
w_new = stochastic_gradient_step(X, y, w, random_ind, eta)
weight_dist = np.linalg.norm(w-w_new)
w = w_new
errors.append(mserror(y, linear_prediction(X, w)))
iter_num += 1
return w, errors
```
**Запустите $10^5$ итераций стохастического градиентного спуска. Укажите вектор начальных весов *w_init*, состоящий из нулей. Оставьте параметры *eta* и *seed* равными их значениям по умолчанию (*eta*=0.01, *seed*=42 - это важно для проверки ответов).**
```
%%time
stoch_grad_desc_weights, stoch_errors_by_iter = stochastic_gradient_descent(X, y, np.zeros(X.shape[1]),max_iter=1e5)
```
**Посмотрим, чему равна ошибка на первых 50 итерациях стохастического градиентного спуска. Видим, что ошибка не обязательно уменьшается на каждой итерации.**
```
%pylab inline
plot(range(50), stoch_errors_by_iter[:50])
xlabel('Iteration number')
ylabel('MSE')
```
**Теперь посмотрим на зависимость ошибки от номера итерации для $10^5$ итераций стохастического градиентного спуска. Видим, что алгоритм сходится.**
```
%pylab inline
#print(stoch_errors_by_iter)
plot(range(len(stoch_errors_by_iter)), stoch_errors_by_iter)
xlabel('Iteration number')
ylabel('MSE')
```
**Посмотрим на вектор весов, к которому сошелся метод.**
```
stoch_grad_desc_weights
```
**Посмотрим на среднеквадратичную ошибку на последней итерации.**
```
stoch_errors_by_iter[-1]
```
**Какова среднеквадратичная ошибка прогноза значений Sales в виде линейной модели с весами, найденными с помощью градиентного спуска? Запишите ответ в файл '4.txt'.**
```
answer4 = mserror(y, linear_prediction(X, stoch_grad_desc_weights))
print(answer4)
write_answer_to_file(answer4, '4.txt')
```
**Ответами к заданию будут текстовые файлы, полученные в ходе этого решения. Обратите внимание, что отправленные файлы не должны содержать пустую строку в конце. Данный нюанс является ограничением платформы Coursera. Мы работаем над исправлением этого ограничения.**
|
github_jupyter
|
def write_answer_to_file(answer, filename):
with open(filename, 'w') as f_out:
f_out.write(str(round(answer, 3)))
import pandas as pd
adver_data = pd.read_csv('advertising.csv')
adver_data.head()
adver_data.describe()
import numpy as np
X = np.array(adver_data.values[:,0:3])
y = np.array(adver_data.values[:,3])
means, stds = np.mean(X,axis=0), np.std(X, axis=0)
X = (X - means) / stds
X = np.hstack((X, np.ones(X.shape[0]).reshape(X.shape[0],1)))
def mserror(y, y_pred):
return np.mean((y-y_pred)**2)
y_pred = np.median(y)
answer1 = mserror(y, y_pred)
print(answer1)
write_answer_to_file(answer1, '1.txt')
def normal_equation(X, y):
return np.linalg.solve(np.dot(X.transpose(),X),np.dot(X.transpose(),y))
norm_eq_weights = normal_equation(X, y)
print(norm_eq_weights)
answer2 = np.sum([0., 0., 0., 1.]*norm_eq_weights)
print(answer2)
write_answer_to_file(answer2, '2.txt')
def linear_prediction(X, w):
return np.dot(X, w)
answer3 = mserror(y, linear_prediction(X, norm_eq_weights))
print(answer3)
write_answer_to_file(answer3, '3.txt')
def stochastic_gradient_step(X, y, w, train_ind, eta=0.01):
return w + 2 * eta/X.shape[0] * X[train_ind] * (y[train_ind] - linear_prediction(X[train_ind], w))
print (stochastic_gradient_step(X, y, normal_equation(X, y), 0))
def stochastic_gradient_descent(X, y, w_init, eta=1e-2, max_iter=1e4,
min_weight_dist=1e-8, seed=42, verbose=False):
# Инициализируем расстояние между векторами весов на соседних
# итерациях большим числом.
weight_dist = np.inf
# Инициализируем вектор весов
w = w_init
# Сюда будем записывать ошибки на каждой итерации
errors = []
# Счетчик итераций
iter_num = 0
# Будем порождать псевдослучайные числа
# (номер объекта, который будет менять веса), а для воспроизводимости
# этой последовательности псевдослучайных чисел используем seed.
np.random.seed(seed)
# Основной цикл
while weight_dist > min_weight_dist and iter_num < max_iter:
# порождаем псевдослучайный
# индекс объекта обучающей выборки
random_ind = np.random.randint(X.shape[0])
w_new = stochastic_gradient_step(X, y, w, random_ind, eta)
weight_dist = np.linalg.norm(w-w_new)
w = w_new
errors.append(mserror(y, linear_prediction(X, w)))
iter_num += 1
return w, errors
%%time
stoch_grad_desc_weights, stoch_errors_by_iter = stochastic_gradient_descent(X, y, np.zeros(X.shape[1]),max_iter=1e5)
%pylab inline
plot(range(50), stoch_errors_by_iter[:50])
xlabel('Iteration number')
ylabel('MSE')
%pylab inline
#print(stoch_errors_by_iter)
plot(range(len(stoch_errors_by_iter)), stoch_errors_by_iter)
xlabel('Iteration number')
ylabel('MSE')
stoch_grad_desc_weights
stoch_errors_by_iter[-1]
answer4 = mserror(y, linear_prediction(X, stoch_grad_desc_weights))
print(answer4)
write_answer_to_file(answer4, '4.txt')
| 0.282196 | 0.986662 |
# Link prediction example: GraphSAGE on the Cora citation dataset
In this example, we use our implementation of the [GraphSAGE](http://snap.stanford.edu/graphsage/) algorithm to build a model that predicts citation links in the Cora dataset (see below). The problem is treated as a supervised link prediction problem on a homogeneous citation network with nodes representing papers (with attributes such as binary keyword indicators and categorical subject) and links corresponding to paper-paper citations.
To address this problem, we build a model with the following architecture. First we build a two-layer GraphSAGE model that takes labeled `(paper1, paper2)` node pairs corresponding to possible citation links, and outputs a pair of node embeddings for the `paper1` and `paper2` nodes of the pair. These embeddings are then fed into a link classification layer, which first applies a binary operator to those node embeddings (e.g., concatenating them) to construct the embedding of the potential link. Thus obtained link embeddings are passed through the dense link classification layer to obtain link predictions - probability for these candidate links to actually exist in the network. The entire model is trained end-to-end by minimizing the loss function of choice (e.g., binary cross-entropy between predicted link probabilities and true link labels, with true/false citation links having labels 1/0) using stochastic gradient descent (SGD) updates of the model parameters, with minibatches of 'training' links fed into the model.
```
import networkx as nx
import pandas as pd
import os
import stellargraph as sg
from stellargraph.data import EdgeSplitter
from stellargraph.mapper import GraphSAGELinkGenerator
from stellargraph.layer import GraphSAGE, link_classification
import keras
from sklearn import preprocessing, feature_extraction, model_selection
from stellargraph import globalvar
```
### Loading the CORA network data
**Downloading the CORA dataset:**
The dataset used in this demo can be downloaded from https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz
The following is the description of the dataset:
> The Cora dataset consists of 2708 scientific publications classified into one of seven classes.
> The citation network consists of 5429 links. Each publication in the dataset is described by a
> 0/1-valued word vector indicating the absence/presence of the corresponding word from the dictionary.
> The dictionary consists of 1433 unique words. The README file in the dataset provides more details.
Download and unzip the cora.tgz file to a location on your computer and set the `data_dir` variable to
point to the location of the dataset (the directory containing "cora.cites" and "cora.content").
```
data_dir = "~/data/cora"
```
Load the graph from edgelist
```
edgelist = pd.read_table(os.path.join(data_dir, "cora.cites"), header=None, names=["source", "target"])
edgelist["label"] = "cites" # set the edge type
G = nx.from_pandas_edgelist(edgelist, edge_attr="label")
```
Load the features and subject for the nodes
```
feature_names = ["w_{}".format(ii) for ii in range(1433)]
column_names = feature_names + ["subject"]
node_data = pd.read_table(os.path.join(data_dir, "cora.content"), header=None, names=column_names)
```
Define a set of node features that will be used by the model as the difference between the set of all node features and a list of user-defined node attributes to ignore:
```
ignore_attr = []
feature_names = sorted(set(column_names) - set(ignore_attr))
```
We need to convert node features that will be used by the model to numeric values that are required for GraphSAGE input. Note that all node features in the Cora dataset, except the categorical "subject" feature, are already numeric, and don't require the conversion.
```
if "subject" in feature_names:
# Convert node features to numeric vectors
feature_encoding = feature_extraction.DictVectorizer(sparse=False)
node_features = feature_encoding.fit_transform(
node_data[feature_names].to_dict("records")
)
else: # node features are already numeric, no further conversion is needed
node_features = node_data[feature_names].values
```
Add node data to G:
```
for nid, f in zip(node_data.index, node_features):
G.node[nid][globalvar.TYPE_ATTR_NAME] = "paper" # specify node type
G.node[nid]["feature"] = f
```
We aim to train a link prediction model, hence we need to prepare the train and test sets of links and the corresponding graphs with those links removed.
We are going to split our input graph into a train and test graphs using the EdgeSplitter class in `stellargraph.data`. We will use the train graph for training the model (a binary classifier that, given two nodes, predicts whether a link between these two nodes should exist or not) and the test graph for evaluating the model's performance on hold out data.
Each of these graphs will have the same number of nodes as the input graph, but the number of links will differ (be reduced) as some of the links will be removed during each split and used as the positive samples for training/testing the link prediction classifier.
From the original graph G, extract a randomly sampled subset of test edges (true and false citation links) and the reduced graph G_test with the positive test edges removed:
```
# Define an edge splitter on the original graph G:
edge_splitter_test = EdgeSplitter(G)
# Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G, and obtain the
# reduced graph G_test with the sampled links removed:
G_test, edge_ids_test, edge_labels_test = edge_splitter_test.train_test_split(
p=0.1, method="global"
)
```
The reduced graph G_test, together with the test ground truth set of links (edge_ids_test, edge_labels_test), will be used for testing the model.
Now repeat this procedure to obtain the training data for the model. From the reduced graph G_test, extract a randomly sampled subset of train edges (true and false citation links) and the reduced graph G_train with the positive train edges removed:
```
# Define an edge splitter on the reduced graph G_test:
edge_splitter_train = EdgeSplitter(G_test)
# Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G_test, and obtain the
# reduced graph G_train with the sampled links removed:
G_train, edge_ids_train, edge_labels_train = edge_splitter_train.train_test_split(
p=0.1, method="global"
)
```
G_train, together with the train ground truth set of links (edge_ids_train, edge_labels_train), will be used for training the model.
Convert G_train and G_test to StellarGraph objects (undirected, as required by GraphSAGE) for ML:
```
G_train = sg.StellarGraph(G_train, node_features="feature")
G_test = sg.StellarGraph(G_test, node_features="feature")
```
Summary of G_train and G_test - note that they have the same set of nodes, only differing in their edge sets:
```
print(G_train.info())
print(G_test.info())
```
Next, we create the link mappers for sampling and streaming training and testing data to the model. The link mappers essentially "map" pairs of nodes `(paper1, paper2)` to the input of GraphSAGE: they take minibatches of node pairs, sample 2-hop subgraphs with `(paper1, paper2)` head nodes extracted from those pairs, and feed them, together with the corresponding binary labels indicating whether those pairs represent true or false citation links, to the input layer of the GraphSAGE model, for SGD updates of the model parameters.
Specify the minibatch size (number of node pairs per minibatch) and the number of epochs for training the model:
```
batch_size = 50
epochs = 10
```
Specify the sizes of 1- and 2-hop neighbour samples for GraphSAGE:
Note that the length of `num_samples` list defines the number of layers/iterations in the GraphSAGE model. In this example, we are defining a 2-layer GraphSAGE model.
```
num_samples = [20, 10]
train_gen = GraphSAGELinkGenerator(G_train, batch_size, num_samples).flow(edge_ids_train,edge_labels_train)
test_gen = GraphSAGELinkGenerator(G_test, batch_size, num_samples).flow(edge_ids_test, edge_labels_test)
```
Build the model: a 2-layer GraphSAGE model acting as node representation learner, with a link classification layer on concatenated `(paper1, paper2)` node embeddings.
GraphSAGE part of the model, with hidden layer sizes of 50 for both GraphSAGE layers, a bias term, and no dropout. (Dropout can be switched on by specifying a positive dropout rate, 0 < dropout < 1)
Note that the length of layer_sizes list must be equal to the length of num_samples, as len(num_samples) defines the number of hops (layers) in the GraphSAGE model.
```
layer_sizes = [50, 50]
assert len(layer_sizes) == len(num_samples)
graphsage = GraphSAGE(
layer_sizes=layer_sizes, generator=train_gen, bias=True, dropout=0.0
)
# Expose input and output sockets of graphsage, for source and destination nodes:
x_inp_src, x_out_src = graphsage.default_model(flatten_output=False)
x_inp_dst, x_out_dst = graphsage.default_model(flatten_output=False)
# re-pack into a list where (source, destination) inputs alternate, for link inputs:
x_inp = [x for ab in zip(x_inp_src, x_inp_dst) for x in ab]
# same for outputs:
x_out = [x_out_src, x_out_dst]
```
Final link classification layer that takes a pair of node embeddings produced by graphsage, applies a binary operator to them to produce the corresponding link embedding ('ip' for inner product; other options for the binary operator can be seen by running a cell with `?link_classification` in it), and passes it through a dense layer:
```
prediction = link_classification(
output_dim=1, output_act="sigmoid", edge_feature_method='ip'
)(x_out)
```
Stack the GraphSAGE and prediction layers into a Keras model, and specify the loss
```
model = keras.Model(inputs=x_inp, outputs=prediction)
model.compile(
optimizer=keras.optimizers.Adam(lr=1e-3),
loss=keras.losses.binary_crossentropy,
metrics=[keras.metrics.binary_accuracy],
)
```
Evaluate the initial (untrained) model on the train and test set:
```
init_train_metrics = model.evaluate_generator(train_gen)
init_test_metrics = model.evaluate_generator(test_gen)
print("\nTrain Set Metrics of the initial (untrained) model:")
for name, val in zip(model.metrics_names, init_train_metrics):
print("\t{}: {:0.4f}".format(name, val))
print("\nTest Set Metrics of the initial (untrained) model:")
for name, val in zip(model.metrics_names, init_test_metrics):
print("\t{}: {:0.4f}".format(name, val))
```
Train the model:
```
history = model.fit_generator(
train_gen,
epochs=epochs,
validation_data=test_gen,
verbose=1,
shuffle=True,
)
```
Plot the training history:
```
import matplotlib.pyplot as plt
%matplotlib inline
def plot_history(history):
metrics = sorted(history.history.keys())
metrics = metrics[:len(metrics)//2]
for m in metrics:
# summarize history for metric m
plt.plot(history.history[m])
plt.plot(history.history['val_' + m])
plt.title(m)
plt.ylabel(m)
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
plot_history(history)
```
Evaluate the trained model on test citation links:
```
train_metrics = model.evaluate_generator(train_gen)
test_metrics = model.evaluate_generator(test_gen)
print("\nTrain Set Metrics of the trained model:")
for name, val in zip(model.metrics_names, train_metrics):
print("\t{}: {:0.4f}".format(name, val))
print("\nTest Set Metrics of the trained model:")
for name, val in zip(model.metrics_names, test_metrics):
print("\t{}: {:0.4f}".format(name, val))
```
|
github_jupyter
|
import networkx as nx
import pandas as pd
import os
import stellargraph as sg
from stellargraph.data import EdgeSplitter
from stellargraph.mapper import GraphSAGELinkGenerator
from stellargraph.layer import GraphSAGE, link_classification
import keras
from sklearn import preprocessing, feature_extraction, model_selection
from stellargraph import globalvar
data_dir = "~/data/cora"
edgelist = pd.read_table(os.path.join(data_dir, "cora.cites"), header=None, names=["source", "target"])
edgelist["label"] = "cites" # set the edge type
G = nx.from_pandas_edgelist(edgelist, edge_attr="label")
feature_names = ["w_{}".format(ii) for ii in range(1433)]
column_names = feature_names + ["subject"]
node_data = pd.read_table(os.path.join(data_dir, "cora.content"), header=None, names=column_names)
ignore_attr = []
feature_names = sorted(set(column_names) - set(ignore_attr))
if "subject" in feature_names:
# Convert node features to numeric vectors
feature_encoding = feature_extraction.DictVectorizer(sparse=False)
node_features = feature_encoding.fit_transform(
node_data[feature_names].to_dict("records")
)
else: # node features are already numeric, no further conversion is needed
node_features = node_data[feature_names].values
for nid, f in zip(node_data.index, node_features):
G.node[nid][globalvar.TYPE_ATTR_NAME] = "paper" # specify node type
G.node[nid]["feature"] = f
# Define an edge splitter on the original graph G:
edge_splitter_test = EdgeSplitter(G)
# Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G, and obtain the
# reduced graph G_test with the sampled links removed:
G_test, edge_ids_test, edge_labels_test = edge_splitter_test.train_test_split(
p=0.1, method="global"
)
# Define an edge splitter on the reduced graph G_test:
edge_splitter_train = EdgeSplitter(G_test)
# Randomly sample a fraction p=0.1 of all positive links, and same number of negative links, from G_test, and obtain the
# reduced graph G_train with the sampled links removed:
G_train, edge_ids_train, edge_labels_train = edge_splitter_train.train_test_split(
p=0.1, method="global"
)
G_train = sg.StellarGraph(G_train, node_features="feature")
G_test = sg.StellarGraph(G_test, node_features="feature")
print(G_train.info())
print(G_test.info())
batch_size = 50
epochs = 10
num_samples = [20, 10]
train_gen = GraphSAGELinkGenerator(G_train, batch_size, num_samples).flow(edge_ids_train,edge_labels_train)
test_gen = GraphSAGELinkGenerator(G_test, batch_size, num_samples).flow(edge_ids_test, edge_labels_test)
layer_sizes = [50, 50]
assert len(layer_sizes) == len(num_samples)
graphsage = GraphSAGE(
layer_sizes=layer_sizes, generator=train_gen, bias=True, dropout=0.0
)
# Expose input and output sockets of graphsage, for source and destination nodes:
x_inp_src, x_out_src = graphsage.default_model(flatten_output=False)
x_inp_dst, x_out_dst = graphsage.default_model(flatten_output=False)
# re-pack into a list where (source, destination) inputs alternate, for link inputs:
x_inp = [x for ab in zip(x_inp_src, x_inp_dst) for x in ab]
# same for outputs:
x_out = [x_out_src, x_out_dst]
prediction = link_classification(
output_dim=1, output_act="sigmoid", edge_feature_method='ip'
)(x_out)
model = keras.Model(inputs=x_inp, outputs=prediction)
model.compile(
optimizer=keras.optimizers.Adam(lr=1e-3),
loss=keras.losses.binary_crossentropy,
metrics=[keras.metrics.binary_accuracy],
)
init_train_metrics = model.evaluate_generator(train_gen)
init_test_metrics = model.evaluate_generator(test_gen)
print("\nTrain Set Metrics of the initial (untrained) model:")
for name, val in zip(model.metrics_names, init_train_metrics):
print("\t{}: {:0.4f}".format(name, val))
print("\nTest Set Metrics of the initial (untrained) model:")
for name, val in zip(model.metrics_names, init_test_metrics):
print("\t{}: {:0.4f}".format(name, val))
history = model.fit_generator(
train_gen,
epochs=epochs,
validation_data=test_gen,
verbose=1,
shuffle=True,
)
import matplotlib.pyplot as plt
%matplotlib inline
def plot_history(history):
metrics = sorted(history.history.keys())
metrics = metrics[:len(metrics)//2]
for m in metrics:
# summarize history for metric m
plt.plot(history.history[m])
plt.plot(history.history['val_' + m])
plt.title(m)
plt.ylabel(m)
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper right')
plt.show()
plot_history(history)
train_metrics = model.evaluate_generator(train_gen)
test_metrics = model.evaluate_generator(test_gen)
print("\nTrain Set Metrics of the trained model:")
for name, val in zip(model.metrics_names, train_metrics):
print("\t{}: {:0.4f}".format(name, val))
print("\nTest Set Metrics of the trained model:")
for name, val in zip(model.metrics_names, test_metrics):
print("\t{}: {:0.4f}".format(name, val))
| 0.684475 | 0.988062 |
```
!pip install eli5
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import median_absolute_error
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
from ast import literal_eval
from tqdm import tqdm_notebook
cd '/content/drive/My Drive/Colab Notebooks/matrix'
ls data
df = pd.read_csv('data/men_shoes.csv', low_memory=False)
df.columns
def run_model(feats, model = DecisionTreeRegressor(max_depth=5)):
x = df[ feats ].values
y = df[ 'prices_amountmin'].values
scores = cross_val_score(model, x, y, scoring ='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0]
run_model(['brand_cat'])
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
run_model(['brand_cat'], model)
df.features.head().values
test = {'key': 'value'}
test['key']
str(test)
str_dict = '[{"key":"Gender","value":["Men"]},{"key":"Shoe Size","value":["M"]},{"key":"Shoe Category","value":["Men\'s Shoes"]},{"key":"Color","value":["Multicolor"]},{"key":"Manufacturer Part Number","value":["8190-W-NAVY-7.5"]},{"key":"Brand","value":["Josmo"]}]'
literal_eval(str_dict)[0]['value']
def parse_features(x):
output_dict = {}
if str(x) == 'nan' : return output_dict
features = literal_eval(x.replace('\\"', '"'))
for item in features:
key = item['key'].lower().strip()
value = item['value'][0].lower().strip()
output_dict[key] = value
return output_dict
df['features_parsed'] = df['features'].map(parse_features)
keys = set()
df['features_parsed'].map(lambda x: keys.update(x.keys()))
len(keys)
def get_name_feat(key):
return 'feat_' + key
for key in tqdm_notebook (keys):
df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan)
{'key': 'Gender', 'value': ['Men']},
{'key': 'Shoe Size', 'value': ['M']},
{'key': 'Shoe Category', 'value': ["Men's Shoes"]},
{'key': 'Color', 'value': ['Multicolor']},
{'key': 'Manufacturer Part Number', 'value': ['8190-W-NAVY-7.5']},
{'key': 'Brand', 'value': ['Josmo']}])
{
'Gender': 'Men'
'Shoe Size': 'M'
}
df.columns
keys_stat = {}
for key in keys:
keys_stat[key] = df[ False == df[get_name_feat(key)].isnull()].shape[0] / df.shape[0] * 100
{k:v for k,v in keys_stat.items() if v > 30}
df['feat_brand_cat'] = df['feat_brand'].factorize()[0]
df['feat_color_cat'] = df['feat_color'].factorize()[0]
df['feat_gender_cat'] = df['feat_gender'].factorize()[0]
df['manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0]
df['feat_material_cat'] = df['feat_material'].factorize()[0]
df['feat_sport_cat'] = df['feat_sport'].factorize()[0]
df['feat_style_cat'] = df['feat_style'].factorize()[0]
for key in keys:
df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0]
df['brand'] = df['brand'].map(lambda x: str(x).lower())
df[ df.brand != df.feat_brand] [ ['brand', 'feat_brand']].head()
feats_cat = [x for x in df.columns if 'cat' in x]
feats_cat
feats = ['brand_cat', 'feat_metal type_cat', 'feat_shape_cat','feat_brand_cat', 'feat_gender_cat','feat_material_cat', 'feat_sport_cat','feat_style_cat']
#feats += feats_cat
#feats = list(set(feats))
model = RandomForestRegressor(max_depth=5, n_estimators=100)
result = run_model(feats, model)
x = df[feats].values
y = df['prices_amountmin'].values
m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
m.fit(x, y)
print(result)
perm = PermutationImportance(m, random_state=1).fit(x, y);
eli5.show_weights(perm, feature_names=feats)
ls
!git add day5.ipynb
df [df['brand'] == 'nike'].features_parsed.sample(5).values
df['feat_age group'].value_counts()
```
|
github_jupyter
|
!pip install eli5
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import median_absolute_error
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
from ast import literal_eval
from tqdm import tqdm_notebook
cd '/content/drive/My Drive/Colab Notebooks/matrix'
ls data
df = pd.read_csv('data/men_shoes.csv', low_memory=False)
df.columns
def run_model(feats, model = DecisionTreeRegressor(max_depth=5)):
x = df[ feats ].values
y = df[ 'prices_amountmin'].values
scores = cross_val_score(model, x, y, scoring ='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
df['brand_cat'] = df['brand'].map(lambda x: str(x).lower()).factorize()[0]
run_model(['brand_cat'])
model = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
run_model(['brand_cat'], model)
df.features.head().values
test = {'key': 'value'}
test['key']
str(test)
str_dict = '[{"key":"Gender","value":["Men"]},{"key":"Shoe Size","value":["M"]},{"key":"Shoe Category","value":["Men\'s Shoes"]},{"key":"Color","value":["Multicolor"]},{"key":"Manufacturer Part Number","value":["8190-W-NAVY-7.5"]},{"key":"Brand","value":["Josmo"]}]'
literal_eval(str_dict)[0]['value']
def parse_features(x):
output_dict = {}
if str(x) == 'nan' : return output_dict
features = literal_eval(x.replace('\\"', '"'))
for item in features:
key = item['key'].lower().strip()
value = item['value'][0].lower().strip()
output_dict[key] = value
return output_dict
df['features_parsed'] = df['features'].map(parse_features)
keys = set()
df['features_parsed'].map(lambda x: keys.update(x.keys()))
len(keys)
def get_name_feat(key):
return 'feat_' + key
for key in tqdm_notebook (keys):
df[get_name_feat(key)] = df.features_parsed.map(lambda feats: feats[key] if key in feats else np.nan)
{'key': 'Gender', 'value': ['Men']},
{'key': 'Shoe Size', 'value': ['M']},
{'key': 'Shoe Category', 'value': ["Men's Shoes"]},
{'key': 'Color', 'value': ['Multicolor']},
{'key': 'Manufacturer Part Number', 'value': ['8190-W-NAVY-7.5']},
{'key': 'Brand', 'value': ['Josmo']}])
{
'Gender': 'Men'
'Shoe Size': 'M'
}
df.columns
keys_stat = {}
for key in keys:
keys_stat[key] = df[ False == df[get_name_feat(key)].isnull()].shape[0] / df.shape[0] * 100
{k:v for k,v in keys_stat.items() if v > 30}
df['feat_brand_cat'] = df['feat_brand'].factorize()[0]
df['feat_color_cat'] = df['feat_color'].factorize()[0]
df['feat_gender_cat'] = df['feat_gender'].factorize()[0]
df['manufacturer part number_cat'] = df['feat_manufacturer part number'].factorize()[0]
df['feat_material_cat'] = df['feat_material'].factorize()[0]
df['feat_sport_cat'] = df['feat_sport'].factorize()[0]
df['feat_style_cat'] = df['feat_style'].factorize()[0]
for key in keys:
df[get_name_feat(key) + '_cat'] = df[get_name_feat(key)].factorize()[0]
df['brand'] = df['brand'].map(lambda x: str(x).lower())
df[ df.brand != df.feat_brand] [ ['brand', 'feat_brand']].head()
feats_cat = [x for x in df.columns if 'cat' in x]
feats_cat
feats = ['brand_cat', 'feat_metal type_cat', 'feat_shape_cat','feat_brand_cat', 'feat_gender_cat','feat_material_cat', 'feat_sport_cat','feat_style_cat']
#feats += feats_cat
#feats = list(set(feats))
model = RandomForestRegressor(max_depth=5, n_estimators=100)
result = run_model(feats, model)
x = df[feats].values
y = df['prices_amountmin'].values
m = RandomForestRegressor(max_depth=5, n_estimators=100, random_state=0)
m.fit(x, y)
print(result)
perm = PermutationImportance(m, random_state=1).fit(x, y);
eli5.show_weights(perm, feature_names=feats)
ls
!git add day5.ipynb
df [df['brand'] == 'nike'].features_parsed.sample(5).values
df['feat_age group'].value_counts()
| 0.428233 | 0.363732 |
# Counterfactual instances on MNIST
Given a test instance $X$, this method can generate counterfactual instances $X^\prime$ given a desired counterfactual class $t$ which can either be a class specified upfront or any other class that is different from the predicted class of $X$.
The loss function for finding counterfactuals is the following:
$$L(X^\prime\vert X) = (f_t(X^\prime) - p_t)^2 + \lambda L_1(X^\prime, X).$$
The first loss term, guides the search towards instances $X^\prime$ for which the predicted class probability $f_t(X^\prime)$ is close to a pre-specified target class probability $p_t$ (typically $p_t=1$). The second loss term ensures that the counterfactuals are close in the feature space to the original test instance.
In this notebook we illustrate the usage of the basic counterfactual algorithm on the MNIST dataset.
```
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR) # suppress deprecation messages
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Input
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import to_categorical
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
from time import time
from alibi.explainers import CounterFactual
```
## Load and prepare MNIST data
```
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
plt.gray()
plt.imshow(x_test[1]);
```
Prepare data: scale, reshape and categorize
```
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
x_train = np.reshape(x_train, x_train.shape + (1,))
x_test = np.reshape(x_test, x_test.shape + (1,))
print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train shape:', y_train.shape, 'y_test shape:', y_test.shape)
xmin, xmax = -.5, .5
x_train = ((x_train - x_train.min()) / (x_train.max() - x_train.min())) * (xmax - xmin) + xmin
x_test = ((x_test - x_test.min()) / (x_test.max() - x_test.min())) * (xmax - xmin) + xmin
```
## Define and train CNN model
```
def cnn_model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')(x_in)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x_out = Dense(10, activation='softmax')(x)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
cnn = cnn_model()
cnn.summary()
cnn.fit(x_train, y_train, batch_size=64, epochs=3, verbose=0)
cnn.save('mnist_cnn.h5')
```
Evaluate the model on test set
```
cnn = load_model('mnist_cnn.h5')
score = cnn.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: ', score[1])
```
## Generate counterfactuals
Original instance:
```
X = x_test[0].reshape((1,) + x_test[0].shape)
plt.imshow(X.reshape(28, 28));
```
Counterfactual parameters:
```
shape = (1,) + x_train.shape[1:]
target_proba = 1.0
tol = 0.01 # want counterfactuals with p(class)>0.99
target_class = 'other' # any class other than 7 will do
max_iter = 1000
lam_init = 1e-1
max_lam_steps = 10
learning_rate_init = 0.1
feature_range = (x_train.min(),x_train.max())
```
Run counterfactual:
```
# initialize explainer
cf = CounterFactual(cnn, shape=shape, target_proba=target_proba, tol=tol,
target_class=target_class, max_iter=max_iter, lam_init=lam_init,
max_lam_steps=max_lam_steps, learning_rate_init=learning_rate_init,
feature_range=feature_range)
start_time = time()
explanation = cf.explain(X)
print('Explanation took {:.3f} sec'.format(time() - start_time))
```
Results:
```
pred_class = explanation['cf']['class']
proba = explanation['cf']['proba'][0][pred_class]
print(f'Counterfactual prediction: {pred_class} with probability {proba}')
plt.imshow(explanation['cf']['X'].reshape(28, 28));
```
The counterfactual starting from a 7 moves towards the closest class as determined by the model and the data - in this case a 9. The evolution of the counterfactual during the iterations over $\lambda$ can be seen below (note that all of the following examples satisfy the counterfactual condition):
```
n_cfs = np.array([len(explanation['all'][iter_cf]) for iter_cf in range(max_lam_steps)])
examples = {}
for ix, n in enumerate(n_cfs):
if n>0:
examples[ix] = {'ix': ix, 'lambda': explanation['all'][ix][0]['lambda'],
'X': explanation['all'][ix][0]['X']}
columns = len(examples) + 1
rows = 1
fig = plt.figure(figsize=(16,6))
for i, key in enumerate(examples.keys()):
ax = plt.subplot(rows, columns, i+1)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.imshow(examples[key]['X'].reshape(28,28))
plt.title(f'Iteration: {key}')
```
Typically, the first few iterations find counterfactuals that are out of distribution, while the later iterations make the counterfactual more sparse and interpretable.
Let's now try to steer the counterfactual to a specific class:
```
target_class = 1
cf = CounterFactual(cnn, shape=shape, target_proba=target_proba, tol=tol,
target_class=target_class, max_iter=max_iter, lam_init=lam_init,
max_lam_steps=max_lam_steps, learning_rate_init=learning_rate_init,
feature_range=feature_range)
explanation = start_time = time()
explanation = cf.explain(X)
print('Explanation took {:.3f} sec'.format(time() - start_time))
```
Results:
```
pred_class = explanation['cf']['class']
proba = explanation['cf']['proba'][0][pred_class]
print(f'Counterfactual prediction: {pred_class} with probability {proba}')
plt.imshow(explanation['cf']['X'].reshape(28, 28));
```
As you can see, by specifying a class, the search process can't go towards the closest class to the test instance (in this case a 9 as we saw previously), so the resulting counterfactual might be less interpretable. We can gain more insight by looking at the difference between the counterfactual and the original instance:
```
plt.imshow((explanation['cf']['X'] - X).reshape(28, 28));
```
This shows that the counterfactual is stripping out the top part of the 7 to make to result in a prediction of 1 - not very surprising as the dataset has a lot of examples of diagonally slanted 1’s.
Clean up:
```
os.remove('mnist_cnn.h5')
```
|
github_jupyter
|
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR) # suppress deprecation messages
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D, Input
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import to_categorical
import matplotlib
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import os
from time import time
from alibi.explainers import CounterFactual
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
print('x_train shape:', x_train.shape, 'y_train shape:', y_train.shape)
plt.gray()
plt.imshow(x_test[1]);
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
x_train = np.reshape(x_train, x_train.shape + (1,))
x_test = np.reshape(x_test, x_test.shape + (1,))
print('x_train shape:', x_train.shape, 'x_test shape:', x_test.shape)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('y_train shape:', y_train.shape, 'y_test shape:', y_test.shape)
xmin, xmax = -.5, .5
x_train = ((x_train - x_train.min()) / (x_train.max() - x_train.min())) * (xmax - xmin) + xmin
x_test = ((x_test - x_test.min()) / (x_test.max() - x_test.min())) * (xmax - xmin) + xmin
def cnn_model():
x_in = Input(shape=(28, 28, 1))
x = Conv2D(filters=64, kernel_size=2, padding='same', activation='relu')(x_in)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Conv2D(filters=32, kernel_size=2, padding='same', activation='relu')(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x_out = Dense(10, activation='softmax')(x)
cnn = Model(inputs=x_in, outputs=x_out)
cnn.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return cnn
cnn = cnn_model()
cnn.summary()
cnn.fit(x_train, y_train, batch_size=64, epochs=3, verbose=0)
cnn.save('mnist_cnn.h5')
cnn = load_model('mnist_cnn.h5')
score = cnn.evaluate(x_test, y_test, verbose=0)
print('Test accuracy: ', score[1])
X = x_test[0].reshape((1,) + x_test[0].shape)
plt.imshow(X.reshape(28, 28));
shape = (1,) + x_train.shape[1:]
target_proba = 1.0
tol = 0.01 # want counterfactuals with p(class)>0.99
target_class = 'other' # any class other than 7 will do
max_iter = 1000
lam_init = 1e-1
max_lam_steps = 10
learning_rate_init = 0.1
feature_range = (x_train.min(),x_train.max())
# initialize explainer
cf = CounterFactual(cnn, shape=shape, target_proba=target_proba, tol=tol,
target_class=target_class, max_iter=max_iter, lam_init=lam_init,
max_lam_steps=max_lam_steps, learning_rate_init=learning_rate_init,
feature_range=feature_range)
start_time = time()
explanation = cf.explain(X)
print('Explanation took {:.3f} sec'.format(time() - start_time))
pred_class = explanation['cf']['class']
proba = explanation['cf']['proba'][0][pred_class]
print(f'Counterfactual prediction: {pred_class} with probability {proba}')
plt.imshow(explanation['cf']['X'].reshape(28, 28));
n_cfs = np.array([len(explanation['all'][iter_cf]) for iter_cf in range(max_lam_steps)])
examples = {}
for ix, n in enumerate(n_cfs):
if n>0:
examples[ix] = {'ix': ix, 'lambda': explanation['all'][ix][0]['lambda'],
'X': explanation['all'][ix][0]['X']}
columns = len(examples) + 1
rows = 1
fig = plt.figure(figsize=(16,6))
for i, key in enumerate(examples.keys()):
ax = plt.subplot(rows, columns, i+1)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.imshow(examples[key]['X'].reshape(28,28))
plt.title(f'Iteration: {key}')
target_class = 1
cf = CounterFactual(cnn, shape=shape, target_proba=target_proba, tol=tol,
target_class=target_class, max_iter=max_iter, lam_init=lam_init,
max_lam_steps=max_lam_steps, learning_rate_init=learning_rate_init,
feature_range=feature_range)
explanation = start_time = time()
explanation = cf.explain(X)
print('Explanation took {:.3f} sec'.format(time() - start_time))
pred_class = explanation['cf']['class']
proba = explanation['cf']['proba'][0][pred_class]
print(f'Counterfactual prediction: {pred_class} with probability {proba}')
plt.imshow(explanation['cf']['X'].reshape(28, 28));
plt.imshow((explanation['cf']['X'] - X).reshape(28, 28));
os.remove('mnist_cnn.h5')
| 0.807574 | 0.966726 |
### Método Newton - Levenberg Marquard
Insalar las librerias para realizar calculos simbólicos.
```
!pip install sympy
import numpy as np
import numpy.linalg as npl
from sympy import symbols, diff, pi, cos, exp, sqrt, Abs
import matplotlib.pyplot as plt
```
## Función de Ackley
### 2 dimensiones
```
x1, x2= symbols('x1 x2') #variables simbolicas
x = np.array([[-0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 100 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
a = 20
b = 0.2
c = 2*pi
d=2
f=-a*exp(-b*sqrt(1/d*(x1**2+x2**2)))-exp(1/d*(cos(c*x1)+cos(c*x2)))+a+exp(1)#función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx1dx2=diff(df_dx1,x2)
x_temp = [(x1,x[0,0]),(x2,x[0,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),(x2,x[i,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
vals = np.arange(-1,1,0.1)
z_c = np.ones(len(vals)**2).reshape(len(vals),len(vals))
for i in range(0,len(vals)):
for j in range(0,len(vals)):
x_test = [(x1,vals[i]),(x2,vals[j])]
z_c[i,j] = float(f.subs(x_test).evalf())
plt.contourf(vals,vals,z_c)
plt.plot(x[:,0],x[:,1],c='red')
plt.colorbar();
```
### 5 Dimensiones
```
x1, x2, x3, x4, x5= symbols('x1 x2 x3 x4 x5') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 100 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
ide = np.identity(d) #matriz de identidad
error = 10**-60 #Error minimo, criterio de parada
a = 20
b = 0.2
c = 2*pi
d = 5
f=-a*exp(-b*sqrt(1/d*(x1**2+x2**2+x3**2+x4**3+x5**2)))-exp(1/d*(cos(c*x1)+cos(c*x2)+cos(c*x3)+cos(c*x4)+cos(c*x5)))+a+exp(1)#función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx4dx5=diff(df_dx4,x5)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),
(x2,x[i,1]),
(x3,x[i,2]),
(x4,x[i,3]),
(x5,x[i,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
```
### 10 Dimensiones
```
x1,x2,x3,x4,x5,x6,x7,x8,x9,x10= symbols('x1 x2 x3 x4 x5 x6 x7 x8 x9 x10') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5,-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 1000 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
ide = np.identity(d) #matriz de identidad
error = 10**-60 #Error minimo, criterio de parada
a = 20
b = 0.2
c = 2*pi
d = 10
f=-a*exp(-b*sqrt(1/d*(x1**2+x2**2+x3**2+x4**3+x5**2+x6**2+x7**2+x8**2+x9**3+x10**2)))-exp(1/d*(cos(c*x1)+cos(c*x2)+cos(c*x3)+cos(c*x4)+cos(c*x5)+cos(c*x6)+cos(c*x7)+cos(c*x8)+cos(c*x9)+cos(c*x10)))+a+exp(1)#función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
df_dx6=diff(f,x6)
df_dx7=diff(f,x7)
df_dx8=diff(f,x8)
df_dx9=diff(f,x9)
df_dx10=diff(f,x10)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx62=diff(df_dx6,x6)
d2f_dx72=diff(df_dx7,x7)
d2f_dx82=diff(df_dx8,x8)
d2f_dx92=diff(df_dx9,x9)
d2f_dx102=diff(df_dx10,x10)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx1dx6=diff(df_dx1,x6)
d2f_dx1dx7=diff(df_dx1,x7)
d2f_dx1dx8=diff(df_dx1,x8)
d2f_dx1dx9=diff(df_dx1,x9)
d2f_dx1dx10=diff(df_dx1,x10)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx2dx6=diff(df_dx2,x6)
d2f_dx2dx7=diff(df_dx2,x7)
d2f_dx2dx8=diff(df_dx2,x8)
d2f_dx2dx9=diff(df_dx2,x9)
d2f_dx2dx10=diff(df_dx2,x10)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx3dx6=diff(df_dx3,x6)
d2f_dx3dx7=diff(df_dx3,x7)
d2f_dx3dx8=diff(df_dx3,x8)
d2f_dx3dx9=diff(df_dx3,x9)
d2f_dx3dx10=diff(df_dx3,x10)
d2f_dx4dx5=diff(df_dx4,x5)
d2f_dx4dx6=diff(df_dx4,x6)
d2f_dx4dx7=diff(df_dx4,x7)
d2f_dx4dx8=diff(df_dx4,x8)
d2f_dx4dx9=diff(df_dx4,x9)
d2f_dx4dx10=diff(df_dx4,x10)
d2f_dx5dx6=diff(df_dx5,x6)
d2f_dx5dx7=diff(df_dx5,x7)
d2f_dx5dx8=diff(df_dx5,x8)
d2f_dx5dx9=diff(df_dx5,x9)
d2f_dx5dx10=diff(df_dx5,x10)
d2f_dx6dx7=diff(df_dx6,x7)
d2f_dx6dx8=diff(df_dx6,x8)
d2f_dx6dx9=diff(df_dx6,x9)
d2f_dx6dx10=diff(df_dx6,x10)
d2f_dx7dx8=diff(df_dx7,x8)
d2f_dx7dx9=diff(df_dx7,x9)
d2f_dx7dx10=diff(df_dx7,x10)
d2f_dx8dx9=diff(df_dx8,x9)
d2f_dx8dx10=diff(df_dx8,x10)
d2f_dx9dx10=diff(df_dx9,x10)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([ #Hessiano
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
```
## Funcion de Rastrigin
### 2 Dimensiones
```
x1, x2= symbols('x1 x2') #variables simbolicas
x = np.array([[-0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 200 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
d=2
f=10*d+((x1**2-10*cos(2*pi*x1))+(x2**2-10*cos(2*pi*x2))) #función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx1dx2=diff(df_dx1,x2)
x_temp = [(x1,x[0,0]),(x2,x[0,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),(x2,x[i,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
vals = np.arange(-1,1,0.1)
z_c = np.ones(len(vals)**2).reshape(len(vals),len(vals))
for i in range(0,len(vals)):
for j in range(0,len(vals)):
x_test = [(x1,vals[i]),(x2,vals[j])]
z_c[i,j] = float(f.subs(x_test).evalf())
plt.contourf(vals,vals,z_c)
plt.plot(x[:,0],x[:,1],c='red')
plt.colorbar();
```
### 5 Dimensiones
```
x1, x2, x3, x4, x5= symbols('x1 x2 x3 x4 x5') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 200 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
d = 5
f=10*d+((x1**2-10*cos(2*pi*x1))+(x2**2-10*cos(2*pi*x2))+(x3**2-10*cos(2*pi*x3))+(x4**2-10*cos(2*pi*x4))+(x5**2-10*cos(2*pi*x5))) #función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx4dx5=diff(df_dx4,x5)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),
(x2,x[i,1]),
(x3,x[i,2]),
(x4,x[i,3]),
(x5,x[i,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
```
### 10 Dimensiones
```
x1,x2,x3,x4,x5,x6,x7,x8,x9,x10= symbols('x1 x2 x3 x4 x5 x6 x7 x8 x9 x10') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5,-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 1000 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
d = 10
f=f=10*d+((x1**2-10*cos(2*pi*x1))+(x2**2-10*cos(2*pi*x2))+(x3**2-10*cos(2*pi*x3))+(x4**2-10*cos(2*pi*x4))+(x5**2-10*cos(2*pi*x5))+(x6**2-10*cos(2*pi*x6))+(x7**2-10*cos(2*pi*x7))+(x8**2-10*cos(2*pi*x8))+(x9**2-10*cos(2*pi*x9))+(x10**2-10*cos(2*pi*x10))) #function objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
df_dx6=diff(f,x6)
df_dx7=diff(f,x7)
df_dx8=diff(f,x8)
df_dx9=diff(f,x9)
df_dx10=diff(f,x10)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx62=diff(df_dx6,x6)
d2f_dx72=diff(df_dx7,x7)
d2f_dx82=diff(df_dx8,x8)
d2f_dx92=diff(df_dx9,x9)
d2f_dx102=diff(df_dx10,x10)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx1dx6=diff(df_dx1,x6)
d2f_dx1dx7=diff(df_dx1,x7)
d2f_dx1dx8=diff(df_dx1,x8)
d2f_dx1dx9=diff(df_dx1,x9)
d2f_dx1dx10=diff(df_dx1,x10)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx2dx6=diff(df_dx2,x6)
d2f_dx2dx7=diff(df_dx2,x7)
d2f_dx2dx8=diff(df_dx2,x8)
d2f_dx2dx9=diff(df_dx2,x9)
d2f_dx2dx10=diff(df_dx2,x10)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx3dx6=diff(df_dx3,x6)
d2f_dx3dx7=diff(df_dx3,x7)
d2f_dx3dx8=diff(df_dx3,x8)
d2f_dx3dx9=diff(df_dx3,x9)
d2f_dx3dx10=diff(df_dx3,x10)
d2f_dx4dx5=diff(df_dx4,x5)
d2f_dx4dx6=diff(df_dx4,x6)
d2f_dx4dx7=diff(df_dx4,x7)
d2f_dx4dx8=diff(df_dx4,x8)
d2f_dx4dx9=diff(df_dx4,x9)
d2f_dx4dx10=diff(df_dx4,x10)
d2f_dx5dx6=diff(df_dx5,x6)
d2f_dx5dx7=diff(df_dx5,x7)
d2f_dx5dx8=diff(df_dx5,x8)
d2f_dx5dx9=diff(df_dx5,x9)
d2f_dx5dx10=diff(df_dx5,x10)
d2f_dx6dx7=diff(df_dx6,x7)
d2f_dx6dx8=diff(df_dx6,x8)
d2f_dx6dx9=diff(df_dx6,x9)
d2f_dx6dx10=diff(df_dx6,x10)
d2f_dx7dx8=diff(df_dx7,x8)
d2f_dx7dx9=diff(df_dx7,x9)
d2f_dx7dx10=diff(df_dx7,x10)
d2f_dx8dx9=diff(df_dx8,x9)
d2f_dx8dx10=diff(df_dx8,x10)
d2f_dx9dx10=diff(df_dx9,x10)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([ #Hessiano
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
```
## Función de Levy
## Función de Griewank
## Función de Rosenbrock Extendida
|
github_jupyter
|
!pip install sympy
import numpy as np
import numpy.linalg as npl
from sympy import symbols, diff, pi, cos, exp, sqrt, Abs
import matplotlib.pyplot as plt
x1, x2= symbols('x1 x2') #variables simbolicas
x = np.array([[-0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 100 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
a = 20
b = 0.2
c = 2*pi
d=2
f=-a*exp(-b*sqrt(1/d*(x1**2+x2**2)))-exp(1/d*(cos(c*x1)+cos(c*x2)))+a+exp(1)#función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx1dx2=diff(df_dx1,x2)
x_temp = [(x1,x[0,0]),(x2,x[0,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),(x2,x[i,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
vals = np.arange(-1,1,0.1)
z_c = np.ones(len(vals)**2).reshape(len(vals),len(vals))
for i in range(0,len(vals)):
for j in range(0,len(vals)):
x_test = [(x1,vals[i]),(x2,vals[j])]
z_c[i,j] = float(f.subs(x_test).evalf())
plt.contourf(vals,vals,z_c)
plt.plot(x[:,0],x[:,1],c='red')
plt.colorbar();
x1, x2, x3, x4, x5= symbols('x1 x2 x3 x4 x5') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 100 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
ide = np.identity(d) #matriz de identidad
error = 10**-60 #Error minimo, criterio de parada
a = 20
b = 0.2
c = 2*pi
d = 5
f=-a*exp(-b*sqrt(1/d*(x1**2+x2**2+x3**2+x4**3+x5**2)))-exp(1/d*(cos(c*x1)+cos(c*x2)+cos(c*x3)+cos(c*x4)+cos(c*x5)))+a+exp(1)#función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx4dx5=diff(df_dx4,x5)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),
(x2,x[i,1]),
(x3,x[i,2]),
(x4,x[i,3]),
(x5,x[i,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
x1,x2,x3,x4,x5,x6,x7,x8,x9,x10= symbols('x1 x2 x3 x4 x5 x6 x7 x8 x9 x10') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5,-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 1000 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
ide = np.identity(d) #matriz de identidad
error = 10**-60 #Error minimo, criterio de parada
a = 20
b = 0.2
c = 2*pi
d = 10
f=-a*exp(-b*sqrt(1/d*(x1**2+x2**2+x3**2+x4**3+x5**2+x6**2+x7**2+x8**2+x9**3+x10**2)))-exp(1/d*(cos(c*x1)+cos(c*x2)+cos(c*x3)+cos(c*x4)+cos(c*x5)+cos(c*x6)+cos(c*x7)+cos(c*x8)+cos(c*x9)+cos(c*x10)))+a+exp(1)#función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
df_dx6=diff(f,x6)
df_dx7=diff(f,x7)
df_dx8=diff(f,x8)
df_dx9=diff(f,x9)
df_dx10=diff(f,x10)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx62=diff(df_dx6,x6)
d2f_dx72=diff(df_dx7,x7)
d2f_dx82=diff(df_dx8,x8)
d2f_dx92=diff(df_dx9,x9)
d2f_dx102=diff(df_dx10,x10)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx1dx6=diff(df_dx1,x6)
d2f_dx1dx7=diff(df_dx1,x7)
d2f_dx1dx8=diff(df_dx1,x8)
d2f_dx1dx9=diff(df_dx1,x9)
d2f_dx1dx10=diff(df_dx1,x10)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx2dx6=diff(df_dx2,x6)
d2f_dx2dx7=diff(df_dx2,x7)
d2f_dx2dx8=diff(df_dx2,x8)
d2f_dx2dx9=diff(df_dx2,x9)
d2f_dx2dx10=diff(df_dx2,x10)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx3dx6=diff(df_dx3,x6)
d2f_dx3dx7=diff(df_dx3,x7)
d2f_dx3dx8=diff(df_dx3,x8)
d2f_dx3dx9=diff(df_dx3,x9)
d2f_dx3dx10=diff(df_dx3,x10)
d2f_dx4dx5=diff(df_dx4,x5)
d2f_dx4dx6=diff(df_dx4,x6)
d2f_dx4dx7=diff(df_dx4,x7)
d2f_dx4dx8=diff(df_dx4,x8)
d2f_dx4dx9=diff(df_dx4,x9)
d2f_dx4dx10=diff(df_dx4,x10)
d2f_dx5dx6=diff(df_dx5,x6)
d2f_dx5dx7=diff(df_dx5,x7)
d2f_dx5dx8=diff(df_dx5,x8)
d2f_dx5dx9=diff(df_dx5,x9)
d2f_dx5dx10=diff(df_dx5,x10)
d2f_dx6dx7=diff(df_dx6,x7)
d2f_dx6dx8=diff(df_dx6,x8)
d2f_dx6dx9=diff(df_dx6,x9)
d2f_dx6dx10=diff(df_dx6,x10)
d2f_dx7dx8=diff(df_dx7,x8)
d2f_dx7dx9=diff(df_dx7,x9)
d2f_dx7dx10=diff(df_dx7,x10)
d2f_dx8dx9=diff(df_dx8,x9)
d2f_dx8dx10=diff(df_dx8,x10)
d2f_dx9dx10=diff(df_dx9,x10)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([ #Hessiano
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
x1, x2= symbols('x1 x2') #variables simbolicas
x = np.array([[-0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 200 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
d=2
f=10*d+((x1**2-10*cos(2*pi*x1))+(x2**2-10*cos(2*pi*x2))) #función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx1dx2=diff(df_dx1,x2)
x_temp = [(x1,x[0,0]),(x2,x[0,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),(x2,x[i,1])]
grad = np.array([df_dx1.subs(x_temp).evalf(),df_dx2.subs(x_temp).evalf()])#Vector gradiente
H = np.array([[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf()],[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
vals = np.arange(-1,1,0.1)
z_c = np.ones(len(vals)**2).reshape(len(vals),len(vals))
for i in range(0,len(vals)):
for j in range(0,len(vals)):
x_test = [(x1,vals[i]),(x2,vals[j])]
z_c[i,j] = float(f.subs(x_test).evalf())
plt.contourf(vals,vals,z_c)
plt.plot(x[:,0],x[:,1],c='red')
plt.colorbar();
x1, x2, x3, x4, x5= symbols('x1 x2 x3 x4 x5') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 200 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
d = 5
f=10*d+((x1**2-10*cos(2*pi*x1))+(x2**2-10*cos(2*pi*x2))+(x3**2-10*cos(2*pi*x3))+(x4**2-10*cos(2*pi*x4))+(x5**2-10*cos(2*pi*x5))) #función objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx4dx5=diff(df_dx4,x5)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[i,0]),
(x2,x[i,1]),
(x3,x[i,2]),
(x4,x[i,3]),
(x5,x[i,4])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf()]])#Hessiano
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
x1,x2,x3,x4,x5,x6,x7,x8,x9,x10= symbols('x1 x2 x3 x4 x5 x6 x7 x8 x9 x10') #variables simbolicas
x = np.array([[-0.5,0.5,0.5,0.5,0.5,-0.5,0.5,0.5,0.5,0.5]])#Vector de pasos con el punto inicial
lamb = 1000 #Valor lambda para el meta-hessiano
z = np.array([]) #Vector de resultados
error = 10**-60 #Error minimo, criterio de parada
d = 10
f=f=10*d+((x1**2-10*cos(2*pi*x1))+(x2**2-10*cos(2*pi*x2))+(x3**2-10*cos(2*pi*x3))+(x4**2-10*cos(2*pi*x4))+(x5**2-10*cos(2*pi*x5))+(x6**2-10*cos(2*pi*x6))+(x7**2-10*cos(2*pi*x7))+(x8**2-10*cos(2*pi*x8))+(x9**2-10*cos(2*pi*x9))+(x10**2-10*cos(2*pi*x10))) #function objetivo
#Derivadas
df_dx1=diff(f,x1)
df_dx2=diff(f,x2)
df_dx3=diff(f,x3)
df_dx4=diff(f,x4)
df_dx5=diff(f,x5)
df_dx6=diff(f,x6)
df_dx7=diff(f,x7)
df_dx8=diff(f,x8)
df_dx9=diff(f,x9)
df_dx10=diff(f,x10)
d2f_dx12=diff(df_dx1,x1)
d2f_dx22=diff(df_dx2,x2)
d2f_dx32=diff(df_dx3,x3)
d2f_dx42=diff(df_dx4,x4)
d2f_dx52=diff(df_dx5,x5)
d2f_dx62=diff(df_dx6,x6)
d2f_dx72=diff(df_dx7,x7)
d2f_dx82=diff(df_dx8,x8)
d2f_dx92=diff(df_dx9,x9)
d2f_dx102=diff(df_dx10,x10)
d2f_dx1dx2=diff(df_dx1,x2)
d2f_dx1dx3=diff(df_dx1,x3)
d2f_dx1dx4=diff(df_dx1,x4)
d2f_dx1dx5=diff(df_dx1,x5)
d2f_dx1dx6=diff(df_dx1,x6)
d2f_dx1dx7=diff(df_dx1,x7)
d2f_dx1dx8=diff(df_dx1,x8)
d2f_dx1dx9=diff(df_dx1,x9)
d2f_dx1dx10=diff(df_dx1,x10)
d2f_dx2dx3=diff(df_dx2,x3)
d2f_dx2dx4=diff(df_dx2,x4)
d2f_dx2dx5=diff(df_dx2,x5)
d2f_dx2dx6=diff(df_dx2,x6)
d2f_dx2dx7=diff(df_dx2,x7)
d2f_dx2dx8=diff(df_dx2,x8)
d2f_dx2dx9=diff(df_dx2,x9)
d2f_dx2dx10=diff(df_dx2,x10)
d2f_dx3dx4=diff(df_dx3,x4)
d2f_dx3dx5=diff(df_dx3,x5)
d2f_dx3dx6=diff(df_dx3,x6)
d2f_dx3dx7=diff(df_dx3,x7)
d2f_dx3dx8=diff(df_dx3,x8)
d2f_dx3dx9=diff(df_dx3,x9)
d2f_dx3dx10=diff(df_dx3,x10)
d2f_dx4dx5=diff(df_dx4,x5)
d2f_dx4dx6=diff(df_dx4,x6)
d2f_dx4dx7=diff(df_dx4,x7)
d2f_dx4dx8=diff(df_dx4,x8)
d2f_dx4dx9=diff(df_dx4,x9)
d2f_dx4dx10=diff(df_dx4,x10)
d2f_dx5dx6=diff(df_dx5,x6)
d2f_dx5dx7=diff(df_dx5,x7)
d2f_dx5dx8=diff(df_dx5,x8)
d2f_dx5dx9=diff(df_dx5,x9)
d2f_dx5dx10=diff(df_dx5,x10)
d2f_dx6dx7=diff(df_dx6,x7)
d2f_dx6dx8=diff(df_dx6,x8)
d2f_dx6dx9=diff(df_dx6,x9)
d2f_dx6dx10=diff(df_dx6,x10)
d2f_dx7dx8=diff(df_dx7,x8)
d2f_dx7dx9=diff(df_dx7,x9)
d2f_dx7dx10=diff(df_dx7,x10)
d2f_dx8dx9=diff(df_dx8,x9)
d2f_dx8dx10=diff(df_dx8,x10)
d2f_dx9dx10=diff(df_dx9,x10)
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])#Hessiano
last_z = f.subs(x_temp)#evaluar Z con el punto inicial
z = np.append(z,[last_z.evalf()]) # Evaluación del punto inicial
ide = np.identity(d) #matriz de identidad
i = 0
while i <= 100:
h_new = npl.inv(((lamb*ide)+H.astype(float))) #Inverso del Hessiano Aproximado.
new_x = (x[i].T-np.matmul(h_new,grad.T)).T #Siguiente paso
x = np.append(x,new_x).reshape((i+2),d) #Agregar el paso al vector de pasos
i += 1 #Aumentar el contador de iteraciones
x_temp = [(x1,x[0,0]),
(x2,x[0,1]),
(x3,x[0,2]),
(x4,x[0,3]),
(x5,x[0,4]),
(x6,x[0,5]),
(x7,x[0,6]),
(x8,x[0,7]),
(x9,x[0,8]),
(x10,x[0,9])]
grad = np.array([df_dx1.subs(x_temp).evalf(),
df_dx2.subs(x_temp).evalf(),
df_dx3.subs(x_temp).evalf(),
df_dx4.subs(x_temp).evalf(),
df_dx5.subs(x_temp).evalf(),
df_dx6.subs(x_temp).evalf(),
df_dx7.subs(x_temp).evalf(),
df_dx8.subs(x_temp).evalf(),
df_dx9.subs(x_temp).evalf(),
df_dx10.subs(x_temp).evalf()])#Vector gradiente
H = np.array([ #Hessiano
[d2f_dx12.subs(x_temp).evalf(),d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx1dx5.subs(x_temp).evalf(),
d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx1dx10.subs(x_temp).evalf()],
[d2f_dx1dx2.subs(x_temp).evalf(),d2f_dx22.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),
d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf()],
[d2f_dx1dx3.subs(x_temp).evalf(),d2f_dx2dx3.subs(x_temp).evalf(),d2f_dx32.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),
d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf()],
[d2f_dx1dx4.subs(x_temp).evalf(),d2f_dx2dx4.subs(x_temp).evalf(),d2f_dx3dx4.subs(x_temp).evalf(),d2f_dx42.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),
d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf()],
[d2f_dx1dx5.subs(x_temp).evalf(),d2f_dx2dx5.subs(x_temp).evalf(),d2f_dx3dx5.subs(x_temp).evalf(),d2f_dx4dx5.subs(x_temp).evalf(),d2f_dx52.subs(x_temp).evalf(),
d2f_dx5dx6.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf()],
[d2f_dx1dx6.subs(x_temp).evalf(),d2f_dx2dx6.subs(x_temp).evalf(),d2f_dx3dx6.subs(x_temp).evalf(),d2f_dx4dx6.subs(x_temp).evalf(),d2f_dx5dx6.subs(x_temp).evalf(),
d2f_dx62.subs(x_temp).evalf(),d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx6dx10.subs(x_temp).evalf()],
[d2f_dx1dx7.subs(x_temp).evalf(),d2f_dx2dx7.subs(x_temp).evalf(),d2f_dx3dx7.subs(x_temp).evalf(),d2f_dx4dx7.subs(x_temp).evalf(),d2f_dx5dx7.subs(x_temp).evalf(),
d2f_dx6dx7.subs(x_temp).evalf(),d2f_dx72.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf()],
[d2f_dx1dx8.subs(x_temp).evalf(),d2f_dx2dx8.subs(x_temp).evalf(),d2f_dx3dx8.subs(x_temp).evalf(),d2f_dx4dx8.subs(x_temp).evalf(),d2f_dx5dx8.subs(x_temp).evalf(),
d2f_dx6dx8.subs(x_temp).evalf(),d2f_dx7dx8.subs(x_temp).evalf(),d2f_dx82.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf()],
[d2f_dx1dx9.subs(x_temp).evalf(),d2f_dx2dx9.subs(x_temp).evalf(),d2f_dx3dx9.subs(x_temp).evalf(),d2f_dx4dx9.subs(x_temp).evalf(),d2f_dx5dx9.subs(x_temp).evalf(),
d2f_dx6dx9.subs(x_temp).evalf(),d2f_dx7dx9.subs(x_temp).evalf(),d2f_dx8dx9.subs(x_temp).evalf(),d2f_dx92.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf()],
[d2f_dx1dx10.subs(x_temp).evalf(),d2f_dx2dx10.subs(x_temp).evalf(),d2f_dx3dx10.subs(x_temp).evalf(),d2f_dx4dx10.subs(x_temp).evalf(),d2f_dx5dx10.subs(x_temp).evalf(),
d2f_dx6dx10.subs(x_temp).evalf(),d2f_dx7dx10.subs(x_temp).evalf(),d2f_dx8dx10.subs(x_temp).evalf(),d2f_dx9dx10.subs(x_temp).evalf(),d2f_dx102.subs(x_temp).evalf()]])
actual_z = f.subs(x_temp)
z = np.append(z,[actual_z.evalf()]) # Evaluación del punto actual
if Abs(actual_z-last_z).evalf()<=error:
break
else:
last_z = actual_z
print("last iterarion: " + str(i))
print("final Z value: " + str(actual_z.evalf()))
print("last x values: "+ str(new_x))
| 0.189671 | 0.863103 |
# variação do troco de moedas
Achar a menor quantidade de moedas para um determinado troco.
A ideia é encontrar a resposta mais eficiente para cada troco possível $n^{\ast} \in \{0, 1, 2, \dots, n \}$, partindo de um caso base (todo o troco é dado com moedas de 1 centavo), e com isso encontrar recursivamente a sequência ótima que leva a um troco total de $n$.
Partindo de uma lista de k moedas válidas $M = \{m_1, m_2, \dots, m_k \}$, o algoritmo deve encontrar qual moeda deve ser escolhida para se encontrar o melhor caminho que leva a um troco $n^{\ast}$, e repetir para todos os $n^{\ast}$ (em ordem crescente) até chegar no caso $n^{\ast} = n$, que é o caso desejado.
* Para cada $n^{\ast}$, como é conhecido que uma solução possível é "n∗ moedas de 1 centavo", atribui-se moedas factíveis (ou seja, moedas válidas em $M$ e que sejam menores que $n^{\ast}$, pois caso contrário "estouraria" a quantia imediatamente).
* Após testar para cada moeda factível, armazenar a moeda que induz a sequência com menor quantidade de moedas necessárias para se completar $n^{\ast}$, e assim sucessivamente, até que se chegue em $n$.
### mostra quais moedas efetivamente pegar
```
def caminho(melhor_moeda, troco):
moedas = [] # cria lista vazia que irá armazenar as moedas para o dado "troco"
# repetir até zerar troco
while troco > 0:
# qual moeda escolher dado 'n'?
moeda_escolhida = melhor_moeda[troco]
# guarda essa moeda na lista
moedas.append(moeda_escolhida)
# atualiza troco diminuindo valor da moeda escolhida
troco = troco - moeda_escolhida
return moedas # quais são as moedas selecionadas para o troco
```
### obtem o valor de n* de 0 até n
```
def acha_solucao(moedas_validas, troco):
""" versão iterativa
retorna em melhor_moeda, a melhor moeda para cada troco de 0 a troco
"""
quantidade = [0]*(troco + 1) # aux: contador que indica n*[i] da melhor_moeda[i]
melhor_moeda = [0]*(troco + 1) # guarda qual a melhor moeda para cada troco (de 0 a n)
moedas_validas = sorted(moedas_validas) # garante que as moedas validas estão em ordem
# i: ponteiro para o range dos n*'s testado até o momento. varia de 1 até troco
for i in range(1, troco + 1):
_min = i
moeda_a_utilizar = 1 # caso base: troco todo em moeda de 1 centavo
for m in moedas_validas:
# não continua se moeda tiver valor maior que n*
if (m <= i) and (quantidade[i - m] + 1 < _min):
# se quantidade ótima para troco de n* for menor que o minimo provisório
# atualizar quantidade ótima
_min = quantidade[i - m] + 1
# qual moeda escolher para minimizar quantidade exigida para troco de n*
moeda_a_utilizar = m
# achou a moeda no loop anterior, armazena então os valores no array auxiliar
quantidade[i] = _min # armazena na tabela os trocos ótimos para todos os n*'s
# para todo n*, qual moeda 'marginalmente' deve ser escolhida?
melhor_moeda[i] = moeda_a_utilizar
# retorna a quantidade de moedas minima e quais são elas
return quantidade[troco], caminho(melhor_moeda, troco)
```
## Programa principal
```
# define a quantidade de troco que deve ser dada
troco = 15
# define o conjunto de moedas válidas
moedas_validas = [10, 7, 1]
qtd, lista = acha_solucao(moedas_validas, troco)
print("São necessárias {} moedas: {}".format(qtd, lista))
```
# Outro exemplo
```
# define a quantidade de troco que deve ser dada
troco = 17
# define o conjunto de moedas válidas
moedas_validas = [1, 2, 5]
qtd, lista = acha_solucao(moedas_validas, troco)
print("São necessárias {} moedas: {}".format(qtd, lista))
```
|
github_jupyter
|
def caminho(melhor_moeda, troco):
moedas = [] # cria lista vazia que irá armazenar as moedas para o dado "troco"
# repetir até zerar troco
while troco > 0:
# qual moeda escolher dado 'n'?
moeda_escolhida = melhor_moeda[troco]
# guarda essa moeda na lista
moedas.append(moeda_escolhida)
# atualiza troco diminuindo valor da moeda escolhida
troco = troco - moeda_escolhida
return moedas # quais são as moedas selecionadas para o troco
def acha_solucao(moedas_validas, troco):
""" versão iterativa
retorna em melhor_moeda, a melhor moeda para cada troco de 0 a troco
"""
quantidade = [0]*(troco + 1) # aux: contador que indica n*[i] da melhor_moeda[i]
melhor_moeda = [0]*(troco + 1) # guarda qual a melhor moeda para cada troco (de 0 a n)
moedas_validas = sorted(moedas_validas) # garante que as moedas validas estão em ordem
# i: ponteiro para o range dos n*'s testado até o momento. varia de 1 até troco
for i in range(1, troco + 1):
_min = i
moeda_a_utilizar = 1 # caso base: troco todo em moeda de 1 centavo
for m in moedas_validas:
# não continua se moeda tiver valor maior que n*
if (m <= i) and (quantidade[i - m] + 1 < _min):
# se quantidade ótima para troco de n* for menor que o minimo provisório
# atualizar quantidade ótima
_min = quantidade[i - m] + 1
# qual moeda escolher para minimizar quantidade exigida para troco de n*
moeda_a_utilizar = m
# achou a moeda no loop anterior, armazena então os valores no array auxiliar
quantidade[i] = _min # armazena na tabela os trocos ótimos para todos os n*'s
# para todo n*, qual moeda 'marginalmente' deve ser escolhida?
melhor_moeda[i] = moeda_a_utilizar
# retorna a quantidade de moedas minima e quais são elas
return quantidade[troco], caminho(melhor_moeda, troco)
# define a quantidade de troco que deve ser dada
troco = 15
# define o conjunto de moedas válidas
moedas_validas = [10, 7, 1]
qtd, lista = acha_solucao(moedas_validas, troco)
print("São necessárias {} moedas: {}".format(qtd, lista))
# define a quantidade de troco que deve ser dada
troco = 17
# define o conjunto de moedas válidas
moedas_validas = [1, 2, 5]
qtd, lista = acha_solucao(moedas_validas, troco)
print("São necessárias {} moedas: {}".format(qtd, lista))
| 0.212069 | 0.946745 |
# Modelling poisson using PINN
Solving the poisson equation $-\Delta u = f$ using a physics informed neural network in multi dim.
## 2D problem
### Manufactured solution
We use $u = \sin (x+y)$ for $x\in [-1,1]^2$ to manufacture the right hand side to be $f=4$ which is same as $f=-\Delta u$.
### Importing packages
```
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
```
### Manufacturing data for trainig
```
np.random.seed(123)
pde_data_size = 10000
dim_d = 2
X_tr_pde = np.random.uniform(-1,1,pde_data_size*dim_d).reshape(pde_data_size,dim_d)
```
### Looking at the scatter plot of genrated 2d points
```
plt.scatter(X_tr_pde[:,0],X_tr_pde[:,1])
plt.title("Randomly generated collocation points")
```
### Defining the labels(true values) for the training data
```
Y_tr_pde = np.zeros((X_tr_pde.shape[0],1))
# Y_tr_pde = X_tr_pde[:,0:1]
Y_tr_pde = np.concatenate([Y_tr_pde,np.zeros((Y_tr_pde.shape[0],1))],axis=1)
Y_tr_pde.shape
```
### Experiment with surface plotting( might help later while plotting the computed solution)
```
from matplotlib import cm
from matplotlib.ticker import LinearLocator
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# Make data.
X = np.arange(-1, 1, 0.25)
Y = np.arange(-1, 1, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sin(X+Y)
# Z = np.sin(R)
# Plot the surface.
surf = ax.plot_surface(X, Y, R, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
```
## BC data
```
bc_data_size = 500
X_bc_left = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_left = np.concatenate([-1*np.ones((bc_data_size,1)), X_bc_left], axis=1)
X_bc_bottom = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_bottom = np.concatenate([X_bc_bottom, -1*np.ones((bc_data_size,1))], axis=1)
X_bc_right = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_right = np.concatenate([1*np.ones((bc_data_size,1)), X_bc_right], axis=1)
X_bc_top = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_top = np.concatenate([X_bc_top, 1*np.ones((bc_data_size,1))], axis=1)
X_bc = np.concatenate([X_bc_left, X_bc_right, X_bc_bottom, X_bc_top],axis=0)
Y_bc = np.sin(X_bc[:,0:1] + X_bc[:,1:])
Y_bc = np.concatenate([Y_bc, np.ones((Y_bc.shape[0],1))], axis=1 )
# dom_data_size = 1000
# X_domain = np.random.uniform(-1,1,dom_data_size*dim_d).reshape(dom_data_size,dim_d)
# Y_domain = X_domain[:,0:1]**2 + X_domain[:,1:]**2
# Y_domain = np.concatenate([Y_domain, np.ones((Y_domain.shape[0],1))], axis=1)
# X_tr = np.concatenate((X_tr_pde, X_bc, X_domain), axis=0)
# Y_tr = np.concatenate((Y_tr_pde, Y_bc, Y_domain), axis=0)
X_tr = np.concatenate((X_tr_pde, X_bc), axis=0)
Y_tr = np.concatenate((Y_tr_pde, Y_bc), axis=0)
```
## Training the model
```
from tensorflow.keras import backend as K
class CustomModel(tf.keras.Model):
def __init__(self):
super(CustomModel, self).__init__()
self.dense_1 = keras.layers.Dense(20, activation="tanh",
name="dense_1")
self.dense_2 = keras.layers.Dense(20, activation="tanh",
name="dense_2")
self.dense_3 = keras.layers.Dense(20, activation="tanh",
name="dense_3")
self.dense_4 = keras.layers.Dense(1,
name="dense_4")
def findGrad(self,func,argm):
try:
return keras.layers.Lambda(lambda z: [tf.gradients(z[0],x_i,
unconnected_gradients='zero')
for x_i in z[1] ]) ([func,argm])
except Exception as e:
print("error occured in find gradient lambda layer of type {} as follows: ".format(type(e)),e)
def findSecGrad(self,func,argm):
try:
# list containng diagonal entries of hessian matrix. Note that tf.gradients
#returns a list of tensors and hence thats why we have a [0] at the end of
#the tf.gradients fucntion as tf.gradients(func,argm) [0]
del_sq_layer = keras.layers.Lambda( lambda z: [ tf.gradients(z[0][i], z[1][i],
unconnected_gradients='zero') [0]
for i in range(len(z[1])) ] ) ([func,argm])
return sum(del_sq_layer)
except Exception as e:
print("Error occured in find laplacian lambda layer of type {} as follows: ".format(type(e)),e)
def findPdeLayer(self,pde_lhs,input_arg):
return keras.layers.Lambda(lambda z: z[0] + 2*tf.sin(z[1][0] + z[1][1])) ([pde_lhs, input_arg])
def call(self, inputs):
inputs_conc = keras.layers.concatenate(inputs)
layer_1 = self.dense_1(inputs_conc)
layer_2 = self.dense_2(layer_1)
layer_3 = self.dense_3(layer_2)
layer_4 = self.dense_4(layer_3)
grad_layer = self.findGrad(layer_4, inputs)
laplace_layer = self.findSecGrad(grad_layer, inputs)
pde_layer = self.findPdeLayer(laplace_layer, inputs)
return layer_4, pde_layer
custom_model = CustomModel()
#Loss coming from the boundary terms
def u_loss(y_true, y_pred):
# print("\n\nreached here 1 \n\n\n")
y_true_act = y_true[:,:-1]
at_boundary = tf.cast(y_true[:,-1:,],bool)
u_sq_error = (1/2)*tf.square(y_true_act-y_pred)
# print("\n\nreached here 2 \n\n\n")
# print("\nu_loss: ",tf.where(at_boundary, u_sq_error, 0.))
return tf.where(at_boundary, u_sq_error, 0.)
#Loss coming from the PDE constrain
def pde_loss(y_true, y_pred):
y_true_act = y_true[:,:-1]
at_boundary = tf.cast(y_true[:,-1:,],bool)
#need to change this to just tf.square(y_pred) after pde constrain is added to grad_layer
pde_sq_error = (1/2)*tf.square(y_pred)
# print("\npde_loss: ",tf.where(at_boundary,0.,pde_sq_error))
return tf.where(at_boundary,0.,pde_sq_error)
custom_model.compile(loss=[u_loss,pde_loss], optimizer="adam")
# custom_model.compile(loss=u_loss, optimizer=keras.optimizers.SGD(lr=1e-3))
custom_model.fit(x=[X_tr[:,0:1], X_tr[:,1:2]], y=[Y_tr, Y_tr], epochs=10)
# custom_model.compile(loss=[u_loss,pde_loss], optimizer=keras.optimizers.SGD(lr=1e-4))
# custom_model.fit(x=[X_tr[:,0:1], X_tr[:,1:2]], y=[Y_tr, Y_tr], epochs=10)
```
### Testing the model
```
# X_test_st = np.random.uniform(-0.5,0.5,20*dim_d).reshape(20,dim_d)
X_test_st = np.random.uniform(-1,1,100*dim_d).reshape(100,dim_d)
#custom_model.evaluate(x=[X_test_st[:,0:1], X_test_st[:,1:2]], y=[Y_tr[-100:,:], Y_tr[-100:,:]])
Y_test = custom_model.predict(x=[X_test_st[:,0:1], X_test_st[:,1:2]])[0]
Y_test_true = np.sin(X_test_st[:,0:1] + X_test_st[:,1:2])
Y_eval = np.concatenate([Y_test_true,np.ones((Y_test_true.shape[0],1))], axis=1)
custom_model.evaluate(x=[X_test_st[:,0:1], X_test_st[:,1:2]], y= Y_eval)
np.concatenate([Y_test_true, Y_test], axis=1)
plt.scatter(Y_test_true,Y_test)
plt.title("true vs predicted solution")
plt.xlabel("True solution")
plt.ylabel("Predicted solution")
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# Make data.
X = np.arange(-1, 1, 0.25)
Y = np.arange(-1, 1, 0.25)
X, Y = np.meshgrid(X, Y)
R = X**2 + Y**2
# Z = np.sin(R)
# Plot the surface.
surf = ax.plot_surface(X, Y, R, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
```
|
github_jupyter
|
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(123)
pde_data_size = 10000
dim_d = 2
X_tr_pde = np.random.uniform(-1,1,pde_data_size*dim_d).reshape(pde_data_size,dim_d)
plt.scatter(X_tr_pde[:,0],X_tr_pde[:,1])
plt.title("Randomly generated collocation points")
Y_tr_pde = np.zeros((X_tr_pde.shape[0],1))
# Y_tr_pde = X_tr_pde[:,0:1]
Y_tr_pde = np.concatenate([Y_tr_pde,np.zeros((Y_tr_pde.shape[0],1))],axis=1)
Y_tr_pde.shape
from matplotlib import cm
from matplotlib.ticker import LinearLocator
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# Make data.
X = np.arange(-1, 1, 0.25)
Y = np.arange(-1, 1, 0.25)
X, Y = np.meshgrid(X, Y)
R = np.sin(X+Y)
# Z = np.sin(R)
# Plot the surface.
surf = ax.plot_surface(X, Y, R, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
bc_data_size = 500
X_bc_left = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_left = np.concatenate([-1*np.ones((bc_data_size,1)), X_bc_left], axis=1)
X_bc_bottom = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_bottom = np.concatenate([X_bc_bottom, -1*np.ones((bc_data_size,1))], axis=1)
X_bc_right = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_right = np.concatenate([1*np.ones((bc_data_size,1)), X_bc_right], axis=1)
X_bc_top = np.random.uniform(-1.,1.,bc_data_size).reshape(bc_data_size,1)
X_bc_top = np.concatenate([X_bc_top, 1*np.ones((bc_data_size,1))], axis=1)
X_bc = np.concatenate([X_bc_left, X_bc_right, X_bc_bottom, X_bc_top],axis=0)
Y_bc = np.sin(X_bc[:,0:1] + X_bc[:,1:])
Y_bc = np.concatenate([Y_bc, np.ones((Y_bc.shape[0],1))], axis=1 )
# dom_data_size = 1000
# X_domain = np.random.uniform(-1,1,dom_data_size*dim_d).reshape(dom_data_size,dim_d)
# Y_domain = X_domain[:,0:1]**2 + X_domain[:,1:]**2
# Y_domain = np.concatenate([Y_domain, np.ones((Y_domain.shape[0],1))], axis=1)
# X_tr = np.concatenate((X_tr_pde, X_bc, X_domain), axis=0)
# Y_tr = np.concatenate((Y_tr_pde, Y_bc, Y_domain), axis=0)
X_tr = np.concatenate((X_tr_pde, X_bc), axis=0)
Y_tr = np.concatenate((Y_tr_pde, Y_bc), axis=0)
from tensorflow.keras import backend as K
class CustomModel(tf.keras.Model):
def __init__(self):
super(CustomModel, self).__init__()
self.dense_1 = keras.layers.Dense(20, activation="tanh",
name="dense_1")
self.dense_2 = keras.layers.Dense(20, activation="tanh",
name="dense_2")
self.dense_3 = keras.layers.Dense(20, activation="tanh",
name="dense_3")
self.dense_4 = keras.layers.Dense(1,
name="dense_4")
def findGrad(self,func,argm):
try:
return keras.layers.Lambda(lambda z: [tf.gradients(z[0],x_i,
unconnected_gradients='zero')
for x_i in z[1] ]) ([func,argm])
except Exception as e:
print("error occured in find gradient lambda layer of type {} as follows: ".format(type(e)),e)
def findSecGrad(self,func,argm):
try:
# list containng diagonal entries of hessian matrix. Note that tf.gradients
#returns a list of tensors and hence thats why we have a [0] at the end of
#the tf.gradients fucntion as tf.gradients(func,argm) [0]
del_sq_layer = keras.layers.Lambda( lambda z: [ tf.gradients(z[0][i], z[1][i],
unconnected_gradients='zero') [0]
for i in range(len(z[1])) ] ) ([func,argm])
return sum(del_sq_layer)
except Exception as e:
print("Error occured in find laplacian lambda layer of type {} as follows: ".format(type(e)),e)
def findPdeLayer(self,pde_lhs,input_arg):
return keras.layers.Lambda(lambda z: z[0] + 2*tf.sin(z[1][0] + z[1][1])) ([pde_lhs, input_arg])
def call(self, inputs):
inputs_conc = keras.layers.concatenate(inputs)
layer_1 = self.dense_1(inputs_conc)
layer_2 = self.dense_2(layer_1)
layer_3 = self.dense_3(layer_2)
layer_4 = self.dense_4(layer_3)
grad_layer = self.findGrad(layer_4, inputs)
laplace_layer = self.findSecGrad(grad_layer, inputs)
pde_layer = self.findPdeLayer(laplace_layer, inputs)
return layer_4, pde_layer
custom_model = CustomModel()
#Loss coming from the boundary terms
def u_loss(y_true, y_pred):
# print("\n\nreached here 1 \n\n\n")
y_true_act = y_true[:,:-1]
at_boundary = tf.cast(y_true[:,-1:,],bool)
u_sq_error = (1/2)*tf.square(y_true_act-y_pred)
# print("\n\nreached here 2 \n\n\n")
# print("\nu_loss: ",tf.where(at_boundary, u_sq_error, 0.))
return tf.where(at_boundary, u_sq_error, 0.)
#Loss coming from the PDE constrain
def pde_loss(y_true, y_pred):
y_true_act = y_true[:,:-1]
at_boundary = tf.cast(y_true[:,-1:,],bool)
#need to change this to just tf.square(y_pred) after pde constrain is added to grad_layer
pde_sq_error = (1/2)*tf.square(y_pred)
# print("\npde_loss: ",tf.where(at_boundary,0.,pde_sq_error))
return tf.where(at_boundary,0.,pde_sq_error)
custom_model.compile(loss=[u_loss,pde_loss], optimizer="adam")
# custom_model.compile(loss=u_loss, optimizer=keras.optimizers.SGD(lr=1e-3))
custom_model.fit(x=[X_tr[:,0:1], X_tr[:,1:2]], y=[Y_tr, Y_tr], epochs=10)
# custom_model.compile(loss=[u_loss,pde_loss], optimizer=keras.optimizers.SGD(lr=1e-4))
# custom_model.fit(x=[X_tr[:,0:1], X_tr[:,1:2]], y=[Y_tr, Y_tr], epochs=10)
# X_test_st = np.random.uniform(-0.5,0.5,20*dim_d).reshape(20,dim_d)
X_test_st = np.random.uniform(-1,1,100*dim_d).reshape(100,dim_d)
#custom_model.evaluate(x=[X_test_st[:,0:1], X_test_st[:,1:2]], y=[Y_tr[-100:,:], Y_tr[-100:,:]])
Y_test = custom_model.predict(x=[X_test_st[:,0:1], X_test_st[:,1:2]])[0]
Y_test_true = np.sin(X_test_st[:,0:1] + X_test_st[:,1:2])
Y_eval = np.concatenate([Y_test_true,np.ones((Y_test_true.shape[0],1))], axis=1)
custom_model.evaluate(x=[X_test_st[:,0:1], X_test_st[:,1:2]], y= Y_eval)
np.concatenate([Y_test_true, Y_test], axis=1)
plt.scatter(Y_test_true,Y_test)
plt.title("true vs predicted solution")
plt.xlabel("True solution")
plt.ylabel("Predicted solution")
plt.show()
fig, ax = plt.subplots(subplot_kw={"projection": "3d"})
# Make data.
X = np.arange(-1, 1, 0.25)
Y = np.arange(-1, 1, 0.25)
X, Y = np.meshgrid(X, Y)
R = X**2 + Y**2
# Z = np.sin(R)
# Plot the surface.
surf = ax.plot_surface(X, Y, R, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
| 0.73029 | 0.916708 |
# This notebook is only used for parameter sweeping, hence it lacks proper documentation. See other notebooks.
```
%matplotlib inline
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import gym
import numpy as np
from collections import namedtuple
import random
from matplotlib import pyplot as pl
from IPython.display import clear_output
env = gym.make('Taxi-v2')
eval_env = gym.make('Taxi-v2')
Transition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state', 'done'))
input_size = env.observation_space.n
output_size = env.action_space.n
mem_capacity = 20000
batch = 256
lr = 0.005
double_dqn = False
gamma = 0.99
num_steps = 50000
target_update_freq = 500
learn_start = 10000
eval_freq = 300
eval_episodes = 10
eps_decay = 10000
eps_end = 0.1
hidden_layer = 50
l1_regularization = 0
dropout = 0
class DQN_MLP(nn.Module):
def __init__(self, in_size, out_size, hidden_size, dropout_prob):
super().__init__()
self.lin1 = nn.Linear(in_size, hidden_size)
self.dropout1 = nn.Dropout(dropout_prob)
self.lin2 = nn.Linear(hidden_size, out_size)
self.dropout2 = nn.Dropout(dropout_prob)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.dropout1(F.relu(self.lin1(x)))
return self.dropout2(self.lin2(x))
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def add(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
mem_size = len(self.memory)
batch = random.sample(self.memory, batch_size)
batch_state, batch_action, batch_reward, batch_next_state, batch_done = zip(*batch)
return batch_state, batch_action, batch_reward, batch_next_state, batch_done
def __len__(self):
return len(self.memory)
def restart_network():
network = DQN_MLP(input_size, output_size, hidden_layer, dropout)
network.apply(init_weights)
target_network = DQN_MLP(input_size, output_size, hidden_layer, dropout)
target_network.load_state_dict(network.state_dict())
memory = ReplayBuffer(mem_capacity)
optimizer = optim.Adam(network.parameters(), lr=lr)
return network, target_network, memory, optimizer
results = {}
for dropout in [0, 1e-6, 1e-4, 1e-2, 0.1]:
results[dropout] = {}
results[dropout]['average_rewards'] = []
results[dropout]['avg_rew_steps'] = []
results[dropout]['losses'] = []
results[dropout]['losses_steps'] = []
network, target_network, memory, optimizer = restart_network()
done = True
for step in range(num_steps):
if done:
state_idx = env.reset()
state = torch.zeros([input_size], dtype=torch.float32)
state[state_idx] = 1
action = network(state.unsqueeze(0)).max(1)[1].item()
eps = max((eps_decay - step + learn_start) / eps_decay, eps_end)
if random.random() < eps:
action = env.action_space.sample()
next_state_idx, reward, done, _ = env.step(action)
next_state = torch.zeros([input_size], dtype=torch.float32)
next_state[next_state_idx] = 1
# Done due to timeout is a non-markovian property. This is an artifact which we would not like to learn from.
if not (done and reward < 0):
memory.add(state, action, reward, next_state, not done)
state = next_state
if step > learn_start:
batch_state, batch_action, batch_reward, batch_next_state, not_done_mask = memory.sample(batch)
batch_state = torch.stack(batch_state)
batch_next_state = torch.stack(batch_next_state)
batch_action = torch.tensor(batch_action, dtype=torch.int64).unsqueeze(-1)
batch_reward = torch.tensor(batch_reward, dtype=torch.float32).unsqueeze(-1)
not_done_mask = torch.tensor(not_done_mask, dtype=torch.float32).unsqueeze(-1)
current_Q = network(batch_state).gather(1, batch_action)
with torch.no_grad():
if double_dqn:
next_state_actions = network(batch_next_state).max(1, keepdim=True)[1]
next_Q = target_network(batch_next_state).gather(1, next_state_actions)
else:
next_Q = target_network(batch_next_state).max(1, keepdim=True)[0]
target_Q = batch_reward + (gamma * next_Q) * not_done_mask
loss = F.smooth_l1_loss(current_Q, target_Q)
all_params = torch.cat([x.view(-1) for x in network.parameters()])
loss += l1_regularization * torch.norm(all_params, 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
results[dropout]['losses'].append(loss.item())
results[dropout]['losses_steps'].append(step)
if step % target_update_freq == 0:
target_network.load_state_dict(network.state_dict())
if step % eval_freq == 0 and step > learn_start:
network.eval()
total_reward = 0
for eval_ep in range(eval_episodes):
eval_state_idx = eval_env.reset()
while True:
eval_state = torch.zeros([input_size], dtype=torch.float32)
eval_state[eval_state_idx] = 1
action = network(eval_state.unsqueeze(0)).max(1)[1].item()
if random.random() < 0.01:
action = random.randrange(output_size)
eval_state_idx, reward, done, _ = eval_env.step(action)
total_reward += reward
if done:
break
network.train()
average_reward = total_reward * 1.0 / eval_episodes
results[dropout]['average_rewards'].append(average_reward)
results[dropout]['avg_rew_steps'].append(step)
print('Step: ' + str(step) + ' Avg reward: ' + str(average_reward))
for key in results.keys():
pl.plot(results[key]['avg_rew_steps'], results[key]['average_rewards'], label=key)
pl.legend()
pl.title('Reward')
pl.show()
```
|
github_jupyter
|
%matplotlib inline
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import gym
import numpy as np
from collections import namedtuple
import random
from matplotlib import pyplot as pl
from IPython.display import clear_output
env = gym.make('Taxi-v2')
eval_env = gym.make('Taxi-v2')
Transition = namedtuple('Transition', ('state', 'action', 'reward', 'next_state', 'done'))
input_size = env.observation_space.n
output_size = env.action_space.n
mem_capacity = 20000
batch = 256
lr = 0.005
double_dqn = False
gamma = 0.99
num_steps = 50000
target_update_freq = 500
learn_start = 10000
eval_freq = 300
eval_episodes = 10
eps_decay = 10000
eps_end = 0.1
hidden_layer = 50
l1_regularization = 0
dropout = 0
class DQN_MLP(nn.Module):
def __init__(self, in_size, out_size, hidden_size, dropout_prob):
super().__init__()
self.lin1 = nn.Linear(in_size, hidden_size)
self.dropout1 = nn.Dropout(dropout_prob)
self.lin2 = nn.Linear(hidden_size, out_size)
self.dropout2 = nn.Dropout(dropout_prob)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.dropout1(F.relu(self.lin1(x)))
return self.dropout2(self.lin2(x))
def init_weights(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6. / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def add(self, *args):
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
mem_size = len(self.memory)
batch = random.sample(self.memory, batch_size)
batch_state, batch_action, batch_reward, batch_next_state, batch_done = zip(*batch)
return batch_state, batch_action, batch_reward, batch_next_state, batch_done
def __len__(self):
return len(self.memory)
def restart_network():
network = DQN_MLP(input_size, output_size, hidden_layer, dropout)
network.apply(init_weights)
target_network = DQN_MLP(input_size, output_size, hidden_layer, dropout)
target_network.load_state_dict(network.state_dict())
memory = ReplayBuffer(mem_capacity)
optimizer = optim.Adam(network.parameters(), lr=lr)
return network, target_network, memory, optimizer
results = {}
for dropout in [0, 1e-6, 1e-4, 1e-2, 0.1]:
results[dropout] = {}
results[dropout]['average_rewards'] = []
results[dropout]['avg_rew_steps'] = []
results[dropout]['losses'] = []
results[dropout]['losses_steps'] = []
network, target_network, memory, optimizer = restart_network()
done = True
for step in range(num_steps):
if done:
state_idx = env.reset()
state = torch.zeros([input_size], dtype=torch.float32)
state[state_idx] = 1
action = network(state.unsqueeze(0)).max(1)[1].item()
eps = max((eps_decay - step + learn_start) / eps_decay, eps_end)
if random.random() < eps:
action = env.action_space.sample()
next_state_idx, reward, done, _ = env.step(action)
next_state = torch.zeros([input_size], dtype=torch.float32)
next_state[next_state_idx] = 1
# Done due to timeout is a non-markovian property. This is an artifact which we would not like to learn from.
if not (done and reward < 0):
memory.add(state, action, reward, next_state, not done)
state = next_state
if step > learn_start:
batch_state, batch_action, batch_reward, batch_next_state, not_done_mask = memory.sample(batch)
batch_state = torch.stack(batch_state)
batch_next_state = torch.stack(batch_next_state)
batch_action = torch.tensor(batch_action, dtype=torch.int64).unsqueeze(-1)
batch_reward = torch.tensor(batch_reward, dtype=torch.float32).unsqueeze(-1)
not_done_mask = torch.tensor(not_done_mask, dtype=torch.float32).unsqueeze(-1)
current_Q = network(batch_state).gather(1, batch_action)
with torch.no_grad():
if double_dqn:
next_state_actions = network(batch_next_state).max(1, keepdim=True)[1]
next_Q = target_network(batch_next_state).gather(1, next_state_actions)
else:
next_Q = target_network(batch_next_state).max(1, keepdim=True)[0]
target_Q = batch_reward + (gamma * next_Q) * not_done_mask
loss = F.smooth_l1_loss(current_Q, target_Q)
all_params = torch.cat([x.view(-1) for x in network.parameters()])
loss += l1_regularization * torch.norm(all_params, 1)
optimizer.zero_grad()
loss.backward()
optimizer.step()
results[dropout]['losses'].append(loss.item())
results[dropout]['losses_steps'].append(step)
if step % target_update_freq == 0:
target_network.load_state_dict(network.state_dict())
if step % eval_freq == 0 and step > learn_start:
network.eval()
total_reward = 0
for eval_ep in range(eval_episodes):
eval_state_idx = eval_env.reset()
while True:
eval_state = torch.zeros([input_size], dtype=torch.float32)
eval_state[eval_state_idx] = 1
action = network(eval_state.unsqueeze(0)).max(1)[1].item()
if random.random() < 0.01:
action = random.randrange(output_size)
eval_state_idx, reward, done, _ = eval_env.step(action)
total_reward += reward
if done:
break
network.train()
average_reward = total_reward * 1.0 / eval_episodes
results[dropout]['average_rewards'].append(average_reward)
results[dropout]['avg_rew_steps'].append(step)
print('Step: ' + str(step) + ' Avg reward: ' + str(average_reward))
for key in results.keys():
pl.plot(results[key]['avg_rew_steps'], results[key]['average_rewards'], label=key)
pl.legend()
pl.title('Reward')
pl.show()
| 0.878933 | 0.694406 |
# Module 2, Lab 2 - Non-Linear Regression
## Import libraries
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
## Differnt types of linear regression
### Simple Linear
$$ y = ax + b $$
```
x = np.arange(-5.0, 5.0, 0.1)
y = 2*x + 3
y_noise = 2* np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
```
### Polynomial
Example of a $3^{rd}$ order ploynomial
$$ y = ax^3 + bx^2 + cx + d $$
```
x = np.arange(-5.0, 5.0, 0.1)
y = 1*(x**3) + 1*(x**2) + 1*x + 3
y_noise = 20 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
```
### Quadratic
$$ y = x^2 $$
```
x = np.arange(-5.0, 5.0, 0.1)
# y = x ** 2
y = np.power(x, 2)
y_noise = 2 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
```
### Exponential
$$ y = a + bc^x $$
```
x = np.arange(-5.0, 5.0, 0.1)
y = 1 + 2*1.5*np.exp(x)
y_noise = 20 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
```
### Logaritmic
$$ y = log(x) $$
```
x = np.arange(0.1, 10.0, 0.1)
y = np.log(x)
y_noise = 0.4 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
```
### Sigmoidal/Logistic
$$ y = a + \frac{b}{1 + c^{(x-d)}} $$
```
x = np.arange(0.1, 10.0, 0.1)
y = 1 + 4/(1 + np.power(3, x-2)) # a = 1, b = 4. c = 3, d = 2
y_noise = 0.4 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
```
## Non-Linear Regression
### Load data
```
import pandas as pd
data = pd.read_csv("../data/china_gdp.csv")
data.head(10)
```
### Plot data
```
x_data, y_data = (data["Year"].values, data["Value"].values)
plt.scatter(x_data, y_data)
plt.xlabel("Year")
plt.ylabel("GDP")
plt.grid()
plt.show()
```
### Choosing the model
The source recommends modeling the data as a logistical trend. The properties of a logistical curve is that is starts out with a slow growth, raises exponentially and then flattens out.
```
x = np.arange(-5.0, 5.0, 0.1)
# y = 1 + 1/(1 + np.power(3, -x)) # similar to previous example
y = 1/(1 + np.exp(-x)) # variation suggested to use for given sample data
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
```
### Building the model
The generic form of a Sigmoid can be expressed as:
$$ y = \frac{1}{1 + e^{\beta_1 (x - \beta_2)}} $$
Where:
* $\beta_1$ is the steepness
* $\beta_2$ slides the curve over the x-axis
```
def sigmoid(x, beta_1, beta_2):
y = 1 / (1 + np.exp(beta_1 * (x - beta_2)))
return y
# trial and error
beta_1 = -0.26
beta_2 = 2011.0
y_model = 15e12 * sigmoid(x_data, beta_1, beta_2)
plt.scatter(x_data, y_data)
plt.plot(x_data, y_model, "r")
plt.xlabel("Year")
plt.ylabel("GDP")
plt.grid()
plt.show()
```
### Curve fitting
```
from scipy.optimize import curve_fit
# normalize data, required for scipy->optimize (?)
x_norm = x_data / max(x_data)
y_norm = y_data / max(y_data)
# determine optimal beta_1&2
popt, _ = curve_fit(sigmoid, x_norm, y_norm)
beta_1 = popt[0]
beta_2 = popt[1]
print("Optimal paramters: beta_1: {:.4f}, beta_2: {:.4f}".format(beta_1, beta_2))
x_plot = np.linspace(x_data[0], x_data[-1], x_data.size)
y_plot = sigmoid(x_plot / max(x_data), beta_1, beta_2)
plt.figure(figsize=(10, 6))
plt.scatter(x_plot, y_norm, label="data")
plt.plot(x_plot, y_plot, "r", label="model")
plt.xlabel("Year")
plt.ylabel("Normalized GDP")
plt.legend()
plt.grid()
plt.show()
```
### Model using `Train` and `Test` method
```
# create Train and Test datasets
mask = np.random.rand(len(data)) < 0.8
x_train = x_norm[mask]
y_train = y_norm[mask]
x_test = x_norm[~mask]
y_test = y_norm[~mask]
# determine beta_1/2 from training data
beta_opt, _ = curve_fit(sigmoid, x_train, y_train)
beta_1 = beta_opt[0]
beta_2 = beta_opt[1]
print("Optimal paramters: beta_1: {:.4f}, beta_2: {:.4f}".format(beta_1, beta_2))
# evaluate model accuracy
from sklearn.metrics import r2_score
# predict y based on model
y_hat = sigmoid(x_test, beta_1, beta_2)
y_hat_plot = sigmoid(x_norm, beta_1, beta_2)
def evaluate_model(y_test, y_hat):
MAE = np.mean(np.absolute(y_hat - y_test))
MSE = np.mean((y_hat - y_test) ** 2)
R2 = r2_score(y_hat, y_test)
return (MAE, MSE, R2)
accuracy = evaluate_model(y_test, y_hat)
print("Mean absolute error: {:.2f}".format(accuracy[0]))
print("Mean Squared Error: {:.2f}".format(accuracy[1]))
print("R2 score: {:.2f}".format(accuracy[2]))
# plot results
plt.figure(figsize=(10, 6))
plt.scatter(x_train * max(x_data), y_train, label="Training dataset")
plt.scatter(x_test * max(x_data), y_test, color="g" , marker="x", label="Test dataset")
plt.plot(x_data, y_hat_plot, color="r", label="Model")
plt.xlabel("Year")
plt.ylabel("GPD")
plt.grid()
plt.legend()
plt.show()
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
x = np.arange(-5.0, 5.0, 0.1)
y = 2*x + 3
y_noise = 2* np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
x = np.arange(-5.0, 5.0, 0.1)
y = 1*(x**3) + 1*(x**2) + 1*x + 3
y_noise = 20 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
x = np.arange(-5.0, 5.0, 0.1)
# y = x ** 2
y = np.power(x, 2)
y_noise = 2 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
x = np.arange(-5.0, 5.0, 0.1)
y = 1 + 2*1.5*np.exp(x)
y_noise = 20 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
x = np.arange(0.1, 10.0, 0.1)
y = np.log(x)
y_noise = 0.4 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
x = np.arange(0.1, 10.0, 0.1)
y = 1 + 4/(1 + np.power(3, x-2)) # a = 1, b = 4. c = 3, d = 2
y_noise = 0.4 * np.random.normal(size = x.size)
y_data = y + y_noise
plt.scatter(x, y_data)
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
import pandas as pd
data = pd.read_csv("../data/china_gdp.csv")
data.head(10)
x_data, y_data = (data["Year"].values, data["Value"].values)
plt.scatter(x_data, y_data)
plt.xlabel("Year")
plt.ylabel("GDP")
plt.grid()
plt.show()
x = np.arange(-5.0, 5.0, 0.1)
# y = 1 + 1/(1 + np.power(3, -x)) # similar to previous example
y = 1/(1 + np.exp(-x)) # variation suggested to use for given sample data
plt.plot(x, y, "r")
plt.xlabel("Independent variable")
plt.ylabel("Dependent variable")
plt.grid()
plt.show()
def sigmoid(x, beta_1, beta_2):
y = 1 / (1 + np.exp(beta_1 * (x - beta_2)))
return y
# trial and error
beta_1 = -0.26
beta_2 = 2011.0
y_model = 15e12 * sigmoid(x_data, beta_1, beta_2)
plt.scatter(x_data, y_data)
plt.plot(x_data, y_model, "r")
plt.xlabel("Year")
plt.ylabel("GDP")
plt.grid()
plt.show()
from scipy.optimize import curve_fit
# normalize data, required for scipy->optimize (?)
x_norm = x_data / max(x_data)
y_norm = y_data / max(y_data)
# determine optimal beta_1&2
popt, _ = curve_fit(sigmoid, x_norm, y_norm)
beta_1 = popt[0]
beta_2 = popt[1]
print("Optimal paramters: beta_1: {:.4f}, beta_2: {:.4f}".format(beta_1, beta_2))
x_plot = np.linspace(x_data[0], x_data[-1], x_data.size)
y_plot = sigmoid(x_plot / max(x_data), beta_1, beta_2)
plt.figure(figsize=(10, 6))
plt.scatter(x_plot, y_norm, label="data")
plt.plot(x_plot, y_plot, "r", label="model")
plt.xlabel("Year")
plt.ylabel("Normalized GDP")
plt.legend()
plt.grid()
plt.show()
# create Train and Test datasets
mask = np.random.rand(len(data)) < 0.8
x_train = x_norm[mask]
y_train = y_norm[mask]
x_test = x_norm[~mask]
y_test = y_norm[~mask]
# determine beta_1/2 from training data
beta_opt, _ = curve_fit(sigmoid, x_train, y_train)
beta_1 = beta_opt[0]
beta_2 = beta_opt[1]
print("Optimal paramters: beta_1: {:.4f}, beta_2: {:.4f}".format(beta_1, beta_2))
# evaluate model accuracy
from sklearn.metrics import r2_score
# predict y based on model
y_hat = sigmoid(x_test, beta_1, beta_2)
y_hat_plot = sigmoid(x_norm, beta_1, beta_2)
def evaluate_model(y_test, y_hat):
MAE = np.mean(np.absolute(y_hat - y_test))
MSE = np.mean((y_hat - y_test) ** 2)
R2 = r2_score(y_hat, y_test)
return (MAE, MSE, R2)
accuracy = evaluate_model(y_test, y_hat)
print("Mean absolute error: {:.2f}".format(accuracy[0]))
print("Mean Squared Error: {:.2f}".format(accuracy[1]))
print("R2 score: {:.2f}".format(accuracy[2]))
# plot results
plt.figure(figsize=(10, 6))
plt.scatter(x_train * max(x_data), y_train, label="Training dataset")
plt.scatter(x_test * max(x_data), y_test, color="g" , marker="x", label="Test dataset")
plt.plot(x_data, y_hat_plot, color="r", label="Model")
plt.xlabel("Year")
plt.ylabel("GPD")
plt.grid()
plt.legend()
plt.show()
| 0.695855 | 0.978834 |
```
import random
import tensorflow as tf
import numpy as np
import matplotlib as plt
from tensorflow.keras import layers, models, regularizers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import json
tf.keras.backend.clear_session()
%precision 4
L2_WEIGHT_DECAY = 0.01
L1_WEIGHT_DECAY = 0.003
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def load_json_data(path):
with open(path, "r") as fp:
data = json.load(fp)
a = np.array(data["mfcc"])
label = np.array(data["label"])
mfcc = a[..., np.newaxis]
print(mfcc.shape)
return mfcc, label
train_path = "../parse_dataset_labels/parse_sound_files/tess_ravdess_train_norm.json"
validate_path = "../parse_dataset_labels/parse_sound_files/tess_ravdess_validation_norm.json"
test_path = "../parse_dataset_labels/parse_sound_files/tess_ravdess_test_norm.json"
# Gets the list from the json
train_mfcc_list, train_label_list = load_json_data(train_path)
validate_mfcc_list, validate_label_list = load_json_data(validate_path)
test_mfcc_list, test_label_list = load_json_data(test_path)
# Shuffles the list, unzips the list, creates numpy arrays from the lists
x = list(zip(train_mfcc_list, train_label_list))
random.shuffle(x)
train_mfcc_tuple, train_label_tuple = zip(*x)
train_mfcc = np.array(train_mfcc_tuple)
train_label = np.array(train_label_tuple)
# Shuffles the list, unzips the list, creates numpy arrays from the lists
x = list(zip(validate_mfcc_list, validate_label_list))
random.shuffle(x)
validate_mfcc_tuple, validate_label_tuple = zip(*x)
validate_mfcc = np.array(validate_mfcc_tuple)
validate_label = np.array(validate_label_tuple)
# Shuffles the list, unzips the list, creates numpy arrays from the lists
x = list(zip(test_mfcc_list, test_label_list))
random.shuffle(x)
test_mfcc_tuple, test_label_tuple = zip(*x)
test_mfcc = np.array(test_mfcc_tuple)
test_label = np.array(test_label_tuple)
# abc = tf.keras.utils.to_categorical(train_label)
# print(abc)
train_label = tf.keras.utils.to_categorical(train_label)
test_label = tf.keras.utils.to_categorical(test_label)
validate_label = tf.keras.utils.to_categorical(validate_label)
print(test_label.shape)
# using the hop length and fft params we have 87 time steps with 13 values for each
input_layer = tf.keras.layers.Input(shape=(87, 1, 1))
x = tf.keras.layers.Conv2D(96, (11,11), strides=4, padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_layer)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
x = tf.keras.layers.Conv2D(256, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
x = tf.keras.layers.Conv2D(384, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.Conv2D(384, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.Conv2D(256, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(4096, activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.Dense(4096, activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.Dense(5, activation="softmax", kernel_initializer='he_normal', bias_initializer="he_normal")(x)
model = Model(input_layer, x, name='alexNet')
model.summary()
# input_layer = tf.keras.layers.Input(shape=(train_mfcc.shape[1], train_mfcc.shape[2], train_mfcc.shape[3]))
# x = tf.keras.layers.Conv2D(96, (3,3), activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal")(input_layer)
# x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
# # Batch normalization standardizes the activations of the current layer and what activations get sent to the next layer. this helps
# # the model converge a lot faster because it has normalized values flowing through the model
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Conv2D(256, (3,3), activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal")(x)
# x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Conv2D(512, (3,3), activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal")(x)
# x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Flatten()(x)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, decay=0.0001), loss='categorical_crossentropy', metrics=['acc'])
check_points = "../checkpoint/checkpoint_sound.hb/"
check_point_dir = os.path.dirname(check_points)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=check_point_dir, verbose=1, monitor="val_acc", save_best_only=True)
model.fit(train_mfcc, train_label,
validation_data=(validate_mfcc, validate_label),
verbose=1,
batch_size=32,
epochs=100,
callbacks=[cp_callback])
```
|
github_jupyter
|
import random
import tensorflow as tf
import numpy as np
import matplotlib as plt
from tensorflow.keras import layers, models, regularizers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
import matplotlib.pyplot as plt
import json
tf.keras.backend.clear_session()
%precision 4
L2_WEIGHT_DECAY = 0.01
L1_WEIGHT_DECAY = 0.003
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def load_json_data(path):
with open(path, "r") as fp:
data = json.load(fp)
a = np.array(data["mfcc"])
label = np.array(data["label"])
mfcc = a[..., np.newaxis]
print(mfcc.shape)
return mfcc, label
train_path = "../parse_dataset_labels/parse_sound_files/tess_ravdess_train_norm.json"
validate_path = "../parse_dataset_labels/parse_sound_files/tess_ravdess_validation_norm.json"
test_path = "../parse_dataset_labels/parse_sound_files/tess_ravdess_test_norm.json"
# Gets the list from the json
train_mfcc_list, train_label_list = load_json_data(train_path)
validate_mfcc_list, validate_label_list = load_json_data(validate_path)
test_mfcc_list, test_label_list = load_json_data(test_path)
# Shuffles the list, unzips the list, creates numpy arrays from the lists
x = list(zip(train_mfcc_list, train_label_list))
random.shuffle(x)
train_mfcc_tuple, train_label_tuple = zip(*x)
train_mfcc = np.array(train_mfcc_tuple)
train_label = np.array(train_label_tuple)
# Shuffles the list, unzips the list, creates numpy arrays from the lists
x = list(zip(validate_mfcc_list, validate_label_list))
random.shuffle(x)
validate_mfcc_tuple, validate_label_tuple = zip(*x)
validate_mfcc = np.array(validate_mfcc_tuple)
validate_label = np.array(validate_label_tuple)
# Shuffles the list, unzips the list, creates numpy arrays from the lists
x = list(zip(test_mfcc_list, test_label_list))
random.shuffle(x)
test_mfcc_tuple, test_label_tuple = zip(*x)
test_mfcc = np.array(test_mfcc_tuple)
test_label = np.array(test_label_tuple)
# abc = tf.keras.utils.to_categorical(train_label)
# print(abc)
train_label = tf.keras.utils.to_categorical(train_label)
test_label = tf.keras.utils.to_categorical(test_label)
validate_label = tf.keras.utils.to_categorical(validate_label)
print(test_label.shape)
# using the hop length and fft params we have 87 time steps with 13 values for each
input_layer = tf.keras.layers.Input(shape=(87, 1, 1))
x = tf.keras.layers.Conv2D(96, (11,11), strides=4, padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_layer)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
x = tf.keras.layers.Conv2D(256, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
x = tf.keras.layers.Conv2D(384, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.Conv2D(384, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.Conv2D(256, (5,5), padding="same", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(4096, activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.Dense(4096, activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal", kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)
x = tf.keras.layers.Dense(5, activation="softmax", kernel_initializer='he_normal', bias_initializer="he_normal")(x)
model = Model(input_layer, x, name='alexNet')
model.summary()
# input_layer = tf.keras.layers.Input(shape=(train_mfcc.shape[1], train_mfcc.shape[2], train_mfcc.shape[3]))
# x = tf.keras.layers.Conv2D(96, (3,3), activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal")(input_layer)
# x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
# # Batch normalization standardizes the activations of the current layer and what activations get sent to the next layer. this helps
# # the model converge a lot faster because it has normalized values flowing through the model
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Conv2D(256, (3,3), activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal")(x)
# x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Conv2D(512, (3,3), activation="relu", kernel_initializer='he_normal', bias_initializer="he_normal")(x)
# x = tf.keras.layers.MaxPool2D((3,3), strides=2, padding="same")(x)
# x = tf.keras.layers.BatchNormalization()(x)
# x = tf.keras.layers.Flatten()(x)
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.01, momentum=0.9, decay=0.0001), loss='categorical_crossentropy', metrics=['acc'])
check_points = "../checkpoint/checkpoint_sound.hb/"
check_point_dir = os.path.dirname(check_points)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=check_point_dir, verbose=1, monitor="val_acc", save_best_only=True)
model.fit(train_mfcc, train_label,
validation_data=(validate_mfcc, validate_label),
verbose=1,
batch_size=32,
epochs=100,
callbacks=[cp_callback])
| 0.684159 | 0.498657 |
<a href="https://colab.research.google.com/github/JiaminJIAN/20MA573/blob/master/src/One_dimensional_Brownian_motion_path_simulation2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## **Exact sampling of Brownian path and GBM path**
Goal:
- Exact sampling of Brownian motion path
- Exact sampling of Geometric Brownian path
### **1. Brownian path**
Let time mesh $\Pi$ be of the form
$$\Pi = \{0 = t_{1} \leq t_{2} \leq \dots \leq t_{N} = T \}.$$
We use
$$<W, \Pi> = \{W(t): t \in \Pi \}$$
the projection of the Brownian path on $\Pi$. To have a simulation of Brownian path by random walk, one can iterate
$$W(t_{i+1}) = W(t_{i}) + \sqrt{t_{i+1} - t_{i}} Z_{i+1}.$$
#### **Exercise 1:**
Let uniform mesh be denoted by
$$\Pi_{T, N} = \{i T/N: i = 0, 1, \dots, N \} .$$
- Write the pseudocode.
- Prove that $\hat{W}$ is an exact sampling.
- Draw $10$ path simulations of $t \mapsto \frac{W(t)}{\sqrt{2t \log \log t}}$ on interval $t = [100, 110]$ with mesh size $h = 0.1$.
#### **Solution:**
(1) The pseudocode as follows:
[pseudocode](https://github.com/JiaminJIAN/20MA573/blob/master/src/HW6_pesudocode1.png)
(2) For the Brownian motiton $W_{t}$, we know that for any $s < t$, $W_{t} - W_{s}$ is independent of $\mathcal{F}_{s}$ and $W_{t} - W_{s} \sim \mathcal{N}(0, t-s)$.
Since $\{Z_{i}\}_{i = 1}^{N}$ is a sequence of identity independent distribution random variables, and when $\hat{W}(t_0) = 0$, then we have
$$\hat{W}(t_{i+1}) - \hat{W}(t_{i}) \sim \mathcal{N}(0, t_{i+1} - t_{i})$$
and $\hat{W}(t_{i+1}) - \hat{W}(t_{i})$ are independent of each other. Thus we know that for $i = 1, 2, \dots, N$, $\hat{W}(t_{i+1}) \sim \mathcal{N}(0, t_{i+1})$. So, $\hat{W}$ is exact sampling of Brownian motion.
(3) Draw $10$ path simulations of $t \mapsto \frac{W(t)}{\sqrt{2t \log \log t}}$ on interval $t = [100, 110]$ with mesh size $h = 0.1$.
Firstly, we import the basic packages we need.
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import scipy.stats as ss
from pylab import plt
plt.style.use('seaborn')
%matplotlib inline
```
Then we build a function to get the exact sampling of Brownian motion.
```
# define a function of BM path generator
def exsampling_bm_gen(T1, T2, n): # parameters: start time, end time, and the mesh number
t = np.linspace(T1, T2, num = n+1) # init mesh
h = (T2 - T1)/n # the scale of each step
W = np.zeros(n+1) # init BM with 0
W[0] = np.random.normal(0, np.sqrt(T1))
for i in range(n):
W[i+1] = W[i] + np.sqrt(h) * np.random.normal()
return t, W
```
We test the code and plot 10 Brownian motion paths strating from 0 as follows:
```
## Test the exsampling_bm_gen and plot
MeshN = 100 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(0., 10., MeshN)
plt.plot(t, W);
```
Then we draw 10 simulation paths based on the function $t \mapsto \frac{W(t)}{\sqrt{2t \log \log t}}$ on interval $t = [100, 110]$ with mesh size $h = 0.1$. When $t = 100$, we set $W_t \sim \mathcal{N} (0, 100)$ and the $W_{100}$ generate by a normal random variable with the distribution $\mathcal{N} (0, 100)$.
```
T1 = 100; T2 = 110; # The time interval
MeshN = 100 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
y = np.sqrt(2 * t * np.log(np.log(t)))
y1 = W/y
plt.plot(t, y1);
print("Using all of the path to calculate the mean of r_t:")
plt.axhline(1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.axhline(-1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.ylim(-2, 2)
plt.legend()
```
The path of Brownian motion may across the benchmark built by the function $\frac{W(t)}{\sqrt{2t \log \log t}}$, we can change the time scale to $[1000, 10000]$, and the figure is as follows:
```
T1 = 1000; T2 = 10000; # The time interval
MeshN = 100000 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
plt.plot(t, W);
y1 = np.sqrt(2 * t * np.log(np.log(t)))
y2 = - np.sqrt(2 * t * np.log(np.log(t)))
plt.plot(t, y1, color='r', ls='dashed', lw=1.5, label='benchmark1')
plt.plot(t, y2, color='r', ls='dashed', lw=1.5, label='benchmark2')
#plt.ylim(-2.5, 2.5)
plt.legend()
```
Then we draw 10 simulation paths based on the function $t \mapsto \frac{W(t)}{\sqrt{2t \log \log t}}$ on interval $t = [1000, 10000]$ with mesh size $h = 0.1$. When $t = 1000$, we set $W_t \sim \mathcal{N} (0, 1000)$ and the $W_{1000}$ generate by a normal random variable with the distribution $\mathcal{N} (0, 1000)$.
```
T1 = 1000; T2 = 10000; # The time interval
MeshN = 100000 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
y = np.sqrt(2 * t * np.log(np.log(t)))
y1 = W/y
plt.plot(t, y1);
print("Using all of the path to calculate the mean of r_t:")
plt.axhline(1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.axhline(-1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.ylim(-2, 2)
plt.legend()
```
### **2. Geometric Brownian path**
$GBM(x_{0}, r, \sigma)$ is given by
$$X(t) = x_{0} exp \{(r - \frac{1}{2} \sigma^{2})t + \sigma W(t)\}.$$
We can replace $W(t)$ by its exact simulation $\hat{W}(t)$ to get exact simulation of $X(t)$, i.e.
$$\hat{X}(t) = x_{0} exp \{(r - \frac{1}{2} \sigma^{2})t + \sigma \hat{W}(t)\}.$$
Basing on the a stock example, we can plot the path of GBM.
```
## An example of stock is as follows:
S0 = 100.0
K = 110.0
r=0.0475
sigma = 0.20
T = 1.
Otype='C'
T1 = 0; T2 = 100; # The time interval
MeshN = 1000 # Mesh number
SimN = 10 # Simulation number
S0 = 10 # the initial value of the stock
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
S = S0 * np.exp((r - sigma**2/2) * t + sigma * W) #GBM path
plt.plot(t, S);
```
### **3. Application to Arithmetic asian option price**
Arithmetic asian call option with maturity $T$ and strick price $K$ has its pay off as
$$C(T) = (A(T)- K)^{+},$$
where $A(T)$ is arithemtic average of the stock price at times $0 \leq t_{1} \leq t_{2} \leq \dots, t_{n} = T$, i.e.
$$A(T) = \frac{1}{n} \sum_{n = 1}^{n} S(t_{i}).$$
The call price can be thus written by
$$C_{0} = \mathbb{E}^{\mathbb{Q}} [e^{-rT} (A(T) - K)^{+}].$$
Unlike the geometric asian option, arithmetic counterpart dose not have explict formula for its price. In this below, we shall use MC. In practice, an arithmetic asian option with a given number n of time steps takes the price average at $n+1$ points
$$t_{i} = (i-1) \frac{T}{n}, \quad i = 1, 2, 3, \dots, (n+1).$$
#### **Exercise 3:**
Consider Arithmatic asian option price on $BSM$ by exact sampling.
- Write a pseudocode for arithmetic asian price on BSM
- To the GBM class, add a method
$$arasian(otype, strike, maturity, nstep, npath)$$
for the price by exact sampling.
- Use your code to compute Arithmetic asian option of
$$s_{0} = 100, \sigma = 0.20, r = 0.0475, K = 110.0, T = 1.0, otype = 1, nstep = 5.$$
(1) The pseudocode is as follows:
[pseudocode](https://github.com/JiaminJIAN/20MA573/blob/master/src/HW6_pesudocode2.png)
(2) To the GBM class, add a method $arasian(otype, strike, maturity, nstep, npath)$ for the price by exact sampling.
```
class VanillaOption:
def __init__(
self,
otype = 1, # 1: 'call'
# -1: 'put'
strike = 110.,
maturity = 1.,
market_price = 10.):
self.otype = otype
self.strike = strike
self.maturity = maturity
self.market_price = market_price #this will be used for calibration
def payoff(self, s): #s: excercise price
otype = self.otype
k = self.strike
maturity = self.maturity
return max([0, (s - k)*otype])
class Gbm:
def __init__(self,
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2,
steps = 5,
sim_number = 1000
):
self.init_state = init_state
self.drift_ratio = drift_ratio
self.vol_ratio = vol_ratio
self.steps = steps
self.sim_number = sim_number
def bsm_price(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2)
* maturity) / (sigma * np.sqrt(maturity))
d2 = d1 - sigma * np.sqrt(maturity)
return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis
- otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2))
#Gbm.bsm_price = bsm_price
def arasian(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
n = self.steps
sim_N = self.sim_number
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
price = 0
for j in range(sim_N):
t = np.linspace(0, maturity, n+1) # init mesh
h = 1/n # the scale of each step
W = np.zeros(n+1) # init BM with 0
## W[0] = np.random.normal(0, np.sqrt(T1))
for i in range(n):
W[i+1] = W[i] + np.sqrt(h) * np.random.normal()
S = s0 * np.exp((r - sigma**2/2.) * t + sigma * W) #GBM path
AT = np.mean(S)
price = price + np.exp(-r*maturity)*np.max([AT-k, 0])
return price/sim_N
```
(3) Use your code to compute Arithmetic asian option of
$$s_{0} = 100, \sigma = 0.20, r = 0.0475, K = 110.0, T = 1.0, otype = 1, nstep = 5.$$
Using the class above, we can get the price of Arithmetic asian call option.
```
gbm1 = Gbm()
option1 = VanillaOption()
gbm1.arasian(option1)
```
We can also get the price of the call option by Gbm.
```
option_price = gbm1.bsm_price(option1)
print(option_price)
```
We can see that the price of European call option is more larger than the Arithmetic asian call option with the same parameters.
```
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import scipy.stats as ss
from pylab import plt
plt.style.use('seaborn')
%matplotlib inline
# define a function of BM path generator
def exsampling_bm_gen(T1, T2, n): # parameters: start time, end time, and the mesh number
t = np.linspace(T1, T2, num = n+1) # init mesh
h = (T2 - T1)/n # the scale of each step
W = np.zeros(n+1) # init BM with 0
W[0] = np.random.normal(0, np.sqrt(T1))
for i in range(n):
W[i+1] = W[i] + np.sqrt(h) * np.random.normal()
return t, W
## Test the exsampling_bm_gen and plot
MeshN = 100 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(0., 10., MeshN)
plt.plot(t, W);
T1 = 100; T2 = 110; # The time interval
MeshN = 100 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
y = np.sqrt(2 * t * np.log(np.log(t)))
y1 = W/y
plt.plot(t, y1);
print("Using all of the path to calculate the mean of r_t:")
plt.axhline(1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.axhline(-1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.ylim(-2, 2)
plt.legend()
T1 = 1000; T2 = 10000; # The time interval
MeshN = 100000 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
plt.plot(t, W);
y1 = np.sqrt(2 * t * np.log(np.log(t)))
y2 = - np.sqrt(2 * t * np.log(np.log(t)))
plt.plot(t, y1, color='r', ls='dashed', lw=1.5, label='benchmark1')
plt.plot(t, y2, color='r', ls='dashed', lw=1.5, label='benchmark2')
#plt.ylim(-2.5, 2.5)
plt.legend()
T1 = 1000; T2 = 10000; # The time interval
MeshN = 100000 # Mesh number
SimN = 10 # Simulation number
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
y = np.sqrt(2 * t * np.log(np.log(t)))
y1 = W/y
plt.plot(t, y1);
print("Using all of the path to calculate the mean of r_t:")
plt.axhline(1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.axhline(-1, color='r', ls='dashed', lw=1.5, label='benchmark')
plt.ylim(-2, 2)
plt.legend()
## An example of stock is as follows:
S0 = 100.0
K = 110.0
r=0.0475
sigma = 0.20
T = 1.
Otype='C'
T1 = 0; T2 = 100; # The time interval
MeshN = 1000 # Mesh number
SimN = 10 # Simulation number
S0 = 10 # the initial value of the stock
for i in range(SimN):
[t, W] = exsampling_bm_gen(T1, T2, MeshN)
S = S0 * np.exp((r - sigma**2/2) * t + sigma * W) #GBM path
plt.plot(t, S);
class VanillaOption:
def __init__(
self,
otype = 1, # 1: 'call'
# -1: 'put'
strike = 110.,
maturity = 1.,
market_price = 10.):
self.otype = otype
self.strike = strike
self.maturity = maturity
self.market_price = market_price #this will be used for calibration
def payoff(self, s): #s: excercise price
otype = self.otype
k = self.strike
maturity = self.maturity
return max([0, (s - k)*otype])
class Gbm:
def __init__(self,
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2,
steps = 5,
sim_number = 1000
):
self.init_state = init_state
self.drift_ratio = drift_ratio
self.vol_ratio = vol_ratio
self.steps = steps
self.sim_number = sim_number
def bsm_price(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2)
* maturity) / (sigma * np.sqrt(maturity))
d2 = d1 - sigma * np.sqrt(maturity)
return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis
- otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2))
#Gbm.bsm_price = bsm_price
def arasian(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
n = self.steps
sim_N = self.sim_number
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
price = 0
for j in range(sim_N):
t = np.linspace(0, maturity, n+1) # init mesh
h = 1/n # the scale of each step
W = np.zeros(n+1) # init BM with 0
## W[0] = np.random.normal(0, np.sqrt(T1))
for i in range(n):
W[i+1] = W[i] + np.sqrt(h) * np.random.normal()
S = s0 * np.exp((r - sigma**2/2.) * t + sigma * W) #GBM path
AT = np.mean(S)
price = price + np.exp(-r*maturity)*np.max([AT-k, 0])
return price/sim_N
gbm1 = Gbm()
option1 = VanillaOption()
gbm1.arasian(option1)
option_price = gbm1.bsm_price(option1)
print(option_price)
| 0.619241 | 0.994157 |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
%matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
m = 1000 # 5, 10, 20, 100, 500, 1000
desired_num = 1000
tr_i = 0
tr_j = int(desired_num/2)
tr_k = desired_num
tr_i, tr_j, tr_k
```
# Generate dataset
```
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))
x[idx[0]][0], x[idx[5]][5]
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
bg_idx = [ np.where(idx[3] == True)[0],
np.where(idx[4] == True)[0],
np.where(idx[5] == True)[0],
np.where(idx[6] == True)[0],
np.where(idx[7] == True)[0],
np.where(idx[8] == True)[0],
np.where(idx[9] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
foreground_classes = {'class_0','class_1', 'class_2'}
background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
np.reshape(a,(2*m,1))
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
mosaic_list_of_images.shape, mosaic_list_of_images[0]
for j in range(m):
print(mosaic_list_of_images[0][2*j:2*j+2])
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([2], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
print("=="*40)
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("dataset4 CIN with alpha = 1/"+str(m))
x1 = (test_dataset).numpy() / m
y1 = np.array(labels)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("test dataset4")
test_dataset[0:10]/m
test_dataset = test_dataset/m
test_dataset[0:10]
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
avg_image_dataset_1[0].shape
avg_image_dataset_1[0]
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_11 = MosaicDataset(test_dataset, labels )
testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,3)
# self.linear2 = nn.Linear(50,10)
# self.linear3 = nn.Linear(10,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
def forward(self,x):
# x = F.relu(self.linear1(x))
# x = F.relu(self.linear2(x))
x = (self.linear1(x))
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1000
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
return loss_curi
train_loss_all=[]
testloader_list= [ testloader_1, testloader_11]
train_loss_all.append(train_all(trainloader_1, 1, testloader_list))
%matplotlib inline
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
```
|
github_jupyter
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
%matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
m = 1000 # 5, 10, 20, 100, 500, 1000
desired_num = 1000
tr_i = 0
tr_j = int(desired_num/2)
tr_k = desired_num
tr_i, tr_j, tr_k
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))
x[idx[0]][0], x[idx[5]][5]
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
bg_idx = [ np.where(idx[3] == True)[0],
np.where(idx[4] == True)[0],
np.where(idx[5] == True)[0],
np.where(idx[6] == True)[0],
np.where(idx[7] == True)[0],
np.where(idx[8] == True)[0],
np.where(idx[9] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
np.unique(bg_idx).shape
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
for i in range(10):
plt.scatter(x[idx[i],0],x[idx[i],1],label="class_"+str(i))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
foreground_classes = {'class_0','class_1', 'class_2'}
background_classes = {'class_3','class_4', 'class_5', 'class_6','class_7', 'class_8', 'class_9'}
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
np.reshape(a,(2*m,1))
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(2*m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
mosaic_list_of_images.shape, mosaic_list_of_images[0]
for j in range(m):
print(mosaic_list_of_images[0][2*j:2*j+2])
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m) #np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([2], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][2*j:2*j+2]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][2*j:2*j+2]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# avg_image_dataset_1 = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# test_dataset = (avg - torch.mean(avg, keepdims= True, axis = 0)) / torch.std(avg, keepdims= True, axis = 0)
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
print("=="*40)
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("dataset4 CIN with alpha = 1/"+str(m))
x1 = (test_dataset).numpy() / m
y1 = np.array(labels)
plt.scatter(x1[y1==0,0], x1[y1==0,1], label='class 0')
plt.scatter(x1[y1==1,0], x1[y1==1,1], label='class 1')
plt.scatter(x1[y1==2,0], x1[y1==2,1], label='class 2')
plt.legend()
plt.title("test dataset4")
test_dataset[0:10]/m
test_dataset = test_dataset/m
test_dataset[0:10]
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
avg_image_dataset_1[0].shape
avg_image_dataset_1[0]
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_11 = MosaicDataset(test_dataset, labels )
testloader_11 = DataLoader( testdata_11 , batch_size= batch ,shuffle=False)
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(2,3)
# self.linear2 = nn.Linear(50,10)
# self.linear3 = nn.Linear(10,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
def forward(self,x):
# x = F.relu(self.linear1(x))
# x = F.relu(self.linear2(x))
x = (self.linear1(x))
return x
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1000
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
return loss_curi
train_loss_all=[]
testloader_list= [ testloader_1, testloader_11]
train_loss_all.append(train_all(trainloader_1, 1, testloader_list))
%matplotlib inline
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
| 0.337204 | 0.736945 |
```
cd ../../
from src import settings
from tqdm import tqdm
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.toolbox.eval import evaluate, accumulate_metrics
from src.toolbox.utils import _nms
from src.toolbox.visualization import plot_performance_per_duration
from src.toolbox.data_converters import ActivityNetCap2Instances
import seaborn as sns
import pickle as pkl
import json
import neptune
sns.set_style("white")
# load ground truth test set
raw_data = json.load(open("data/raw/activitynet/val_2.json"))
test_data = ActivityNetCap2Instances(raw_data)
project = neptune.init("mayu-ot/moment-retrieval")
exp_id = "MOM-10"
if not os.path.exists(f"tmp/{exp_id}/{exp_id}76.json"):
exps = project.get_experiments(id=exp_id)
print("downloading results ...")
exps[0].download_artifact(f"{exp_id}76.json", f"tmp/{exp_id}")
print("done!")
exp_id = "MOM-9"
if not os.path.exists(f"tmp/{exp_id}/{exp_id}76.shuffle.json"):
exps = project.get_experiments(id=exp_id)
print("downloading results ...")
exps[0].download_artifact(f"{exp_id}76.shuffle.json", f"tmp/{exp_id}")
print("done!")
def get_duration(video_id):
for x in test_data:
if x[0][0] == video_id:
return x[1][-1]
def postproc_prediction(predictions):
preds = []
for p in tqdm(predictions, desc="postproc"):
query = (p[0], p[2])
video_duration = get_duration(query[0])
seg = [s + [video_duration] for s in p[3][:5]]
preds.append((query, seg, p[5][:5]))
return preds
def eval_preds(pred_file, data):
print(f"loading prediction file {pred_file}")
predictions = json.load(open(pred_file))
preds = postproc_prediction(predictions)
results = evaluate(data, preds)
summary = accumulate_metrics(results)
return results, summary
results, summary = eval_preds("tmp/MOM-10/MOM-1076.json", test_data)
randomized_results, randomized_summary = eval_preds("tmp/MOM-9/MOM-976.shuffle.json", test_data)
# The performances of the SCDM model when the original and randomized videos are fed to the model
def display_score(bar, color="w"):
plt.text(
bar.get_x()+bar.get_width()*.5,
bar.get_height()-6,
f"{bar.get_height():.1f}",
horizontalalignment="center",
fontsize=12,
color=color
)
def plot_performance_comparison(sum_a, sum_b, labels):
plt.figure(figsize=(6, 4))
c = ["#606060", "#d602ee"]
keys = [f"R@{k} IoU>0.5" for k in [1, 5]]
x = np.arange(2)+0.2
for score_summary, l in zip([sum_a, sum_b], labels):
vals = [score_summary[k] * 100 for k in keys]
bars = plt.bar(x, vals, width=0.3, color=c.pop(0), label=l)
for b in bars:
display_score(b, color="w")
x += 0.3
plt.xticks(ticks=np.arange(3)+0.4, labels=keys, ha="center")
plt.legend()
sns.despine(left=False)
plot_performance_comparison(summary, randomized_summary, labels = ["Original", "Randomized"])
# Check relation between success rates and iput video durations
def plot_comparison_per_duration(results, titles, data):
f, axes = plt.subplots(1, len(results), figsize=(20, 5))
plt.rcParams.update({'font.size': 14})
for ax, res, t in zip(axes, results, titles):
_ = plot_performance_per_duration(res, data, ax=ax)
ax.set_title(t)
plot_comparison_per_duration(
(results, randomized_results),
["Original video input", "Randomized video input"],
test_data)
```
|
github_jupyter
|
cd ../../
from src import settings
from tqdm import tqdm
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from src.toolbox.eval import evaluate, accumulate_metrics
from src.toolbox.utils import _nms
from src.toolbox.visualization import plot_performance_per_duration
from src.toolbox.data_converters import ActivityNetCap2Instances
import seaborn as sns
import pickle as pkl
import json
import neptune
sns.set_style("white")
# load ground truth test set
raw_data = json.load(open("data/raw/activitynet/val_2.json"))
test_data = ActivityNetCap2Instances(raw_data)
project = neptune.init("mayu-ot/moment-retrieval")
exp_id = "MOM-10"
if not os.path.exists(f"tmp/{exp_id}/{exp_id}76.json"):
exps = project.get_experiments(id=exp_id)
print("downloading results ...")
exps[0].download_artifact(f"{exp_id}76.json", f"tmp/{exp_id}")
print("done!")
exp_id = "MOM-9"
if not os.path.exists(f"tmp/{exp_id}/{exp_id}76.shuffle.json"):
exps = project.get_experiments(id=exp_id)
print("downloading results ...")
exps[0].download_artifact(f"{exp_id}76.shuffle.json", f"tmp/{exp_id}")
print("done!")
def get_duration(video_id):
for x in test_data:
if x[0][0] == video_id:
return x[1][-1]
def postproc_prediction(predictions):
preds = []
for p in tqdm(predictions, desc="postproc"):
query = (p[0], p[2])
video_duration = get_duration(query[0])
seg = [s + [video_duration] for s in p[3][:5]]
preds.append((query, seg, p[5][:5]))
return preds
def eval_preds(pred_file, data):
print(f"loading prediction file {pred_file}")
predictions = json.load(open(pred_file))
preds = postproc_prediction(predictions)
results = evaluate(data, preds)
summary = accumulate_metrics(results)
return results, summary
results, summary = eval_preds("tmp/MOM-10/MOM-1076.json", test_data)
randomized_results, randomized_summary = eval_preds("tmp/MOM-9/MOM-976.shuffle.json", test_data)
# The performances of the SCDM model when the original and randomized videos are fed to the model
def display_score(bar, color="w"):
plt.text(
bar.get_x()+bar.get_width()*.5,
bar.get_height()-6,
f"{bar.get_height():.1f}",
horizontalalignment="center",
fontsize=12,
color=color
)
def plot_performance_comparison(sum_a, sum_b, labels):
plt.figure(figsize=(6, 4))
c = ["#606060", "#d602ee"]
keys = [f"R@{k} IoU>0.5" for k in [1, 5]]
x = np.arange(2)+0.2
for score_summary, l in zip([sum_a, sum_b], labels):
vals = [score_summary[k] * 100 for k in keys]
bars = plt.bar(x, vals, width=0.3, color=c.pop(0), label=l)
for b in bars:
display_score(b, color="w")
x += 0.3
plt.xticks(ticks=np.arange(3)+0.4, labels=keys, ha="center")
plt.legend()
sns.despine(left=False)
plot_performance_comparison(summary, randomized_summary, labels = ["Original", "Randomized"])
# Check relation between success rates and iput video durations
def plot_comparison_per_duration(results, titles, data):
f, axes = plt.subplots(1, len(results), figsize=(20, 5))
plt.rcParams.update({'font.size': 14})
for ax, res, t in zip(axes, results, titles):
_ = plot_performance_per_duration(res, data, ax=ax)
ax.set_title(t)
plot_comparison_per_duration(
(results, randomized_results),
["Original video input", "Randomized video input"],
test_data)
| 0.392104 | 0.19163 |
| [Table of Contents](#table_of_contents) | [Data and model](#data_and_model) | [Modeling](#modeling) | [Residual diagnostics](#residual_diagnostics) | [Fitting summary](#fitting_summary) | [Session info](#session_info) | [References](#references) |
**Authors:** Andrej Gajdoš, Martina Hančová, Jozef Hanč <br> *[Faculty of Science](https://www.upjs.sk/en/faculty-of-science/?prefferedLang=EN), P. J. Šafárik University in Košice, Slovakia* <br> emails: [[email protected]](mailto:[email protected]), [[email protected]](mailto:[email protected])
***
**<font size=6 color=brown> FDSLRM applications - Temperatures </font>**
<font size=5> Maximum annual temperatures </font>
<a id=table_of_contents></a>
### Table of Contents
* [Data and model](#data_and_model) - data and model description, estimating parameters, software
* [Modeling](#modeling) - loading R functions and packages, data plot, periodogram
* [Residual diagnostics](#residual_diagnostics) - description of graphical tools, numerical tests
* [Fitting summary](#fitting_summary) - estimated model parameters, fit summary
* [Session info](#session_info) - list of applied R packages in computations
* [References](#references) - list of detailed references for data and applied methods
**To get back to the contents, use <font color=brown>the Home key</font>.**
***
<a id=data_and_model></a>
# <font color=brown>Data and model </font>
### Data description
In this FDSLRM application we model the time series data set, denoted as `maxtemp`, representing *maximum annual temperatures*. The number of time series observations is $n=46$, the correspoding plot with more details is shown in the following section **_Modeling_**. The data was adapted from *Hyndman & Athanasopoulos, 2018*.
### Model description
The insurance advertising expenditure data can be succesfully fitted by the FDSLRM of the form:
$$ X(t)=\beta_1+\beta_2\cos\left(\tfrac{2\pi t\cdot 1}{46}\right)+\beta_3\cos\left(\tfrac{2\pi t\cdot 15}{46}\right)
+Y_1\cos\left(\tfrac{2\pi t\cdot 23}{46}\right)
+w(t), \, t\in \mathbb{N}.$$
### Computational software
As for numerical calculations, we conducted our computations in _the R statistical computing language_ (https://www.r-project.org; _R Development Core Team, 2019_) and with R functions designed to work with FDSLRM programmed by authors of the Jupyter notebook included in _fdslrm_ (Gajdoš et. al., 2019) package. The complete list of used R libraries is included in **_Session info_**.
>### Important note
>The iterative model building was done analogically as in our extended application examples
>* [Tourism](Tourism.ipynb)
>* [Cyber attacks](Cyberattacks.ipynb)
>
>with more details about modelling procedure, diagnostic tools and technical information.
>
>These two illustrative examples also belong to real data set illustrative examples in our current paper **Hančová et al., 2019** about estimating FDSLRM variance parameters with detailed description of used procedures.
>
>* Hančová, M., Vozáriková, G., Gajdoš, A., Hanč, J. (2019). [Estimating variance components in time series
linear regression models using empirical BLUPs and convex optimization](https://arxiv.org/abs/1905.07771), https://arxiv.org/, 2019, suplementary materials - software, notebooks at GitHub, https://github.com/fdslrm/EBLUP-NE.
| [Table of Contents](#table_of_contents) | [Data and model](#data_and_model) | [Modeling](#modeling) | [Residual diagnostics](#residual_diagnostics) | [Fitting summary](#fitting_summary) | [Session info](#session_info) | [References](#references) |
***
<a id=modeling></a>
# <font color=brown>Modeling </font>
### Loading R functions and packages
```
# use this cell, if you started this notebook locally in your PC
library(fdslrm)
initialFDSLRM()
# use this cell, if you started this notebook in the Binder
devtools::source_url("https://github.com/fdslrm/fdslrmAllinOne/blob/master/fdslrmAllinOne.R?raw=TRUE")
initialFDSLRM()
```
### Read data
```
# reading data from Hyndman's package fpp2
x <- maxtemp
# times
t <- 1:length(x)
```
### Data plot
Maximum annual temperatures (degrees Celsius) for Moorabbin Airport, Melbourne. 1971-2016.
```
# IPython setting for output
options(repr.plot.res=120, repr.plot.height=4.5, repr.plot.width=6.5)
# plotting data from Hyndman's package fpp2
plot(x, type = "o", xlab = "time", ylab = "temperature")
```
### Spectral analysis - Periodogram
```
periodo <- spec.pgram(as.numeric(x), log="no")
```
#### Six most significant frequencies according to values of spectrum in periodogram
```
drawTable(type = "periodogram", periodogram = periodo)
# orders k for Fourier frequencies
print(round(length(x)*c(0.3333333,0.50000,0.0416667,0.0208333,0.18750,0.3958333)))
fnames= c("15/46", "$23/46$", "$2/46$", "$1/46$", "$9/46$", "$18/46$")
drawTable(type = "periodogram", periodogram = periodo, frequencies = fnames)
```
| [Table of Contents](#table_of_contents) | [Data and model](#data_and_model) | [Modeling](#modeling) | [Residual diagnostics](#residual_diagnostics) | [Fitting summary](#fitting_summary) | [Session info](#session_info) | [References](#references) |
***
<a id=residual_diagnostics></a>
# <font color=brown> Residual diagnostics </font>
### Graphical (exploratory) tools
>
>|$ $|$\large\mbox{Graphical-tools diagnostic matrix}$|$ $|
|---|------------------------------------------------|---|
| |
|$\mbox{linearity of fixed effects (L)}$| $\mbox{outlying observations (O1)}\hspace{0.75cm}$ | $\mbox{independence of cond. errors (ACF)} $ |
|**stand. marg. residuals vs marg. fitted values**|**stand. marg. residuals vs times**$\hspace{0.75cm}$|**ACF of cond. residuals**|
| |
|$\mbox{homoscedascity of cond. errors (H)}$|$\mbox{outlying observations (O2)}\hspace{0.75cm}$|$\mbox{independence of cond. errors (PACF)} $ |
|**stand. cond. residuals vs cond. predictions**|**stand. cond. residuals vs times**$\hspace{0.75cm}$|**PACF of cond. residuals**|
| |
|$\mbox{normality of cond. errors (N1)}$|$\mbox{normality of cond. errors (N2)}\hspace{0.75cm}$|$\mbox{normality of cond. errors (N3)} $ |
|**histogram of cond. residuals**|**histogram of stand. least conf. residuals**$\hspace{0.75cm}$|**stand. least conf. residuals vs $\mathcal{N}(0,1)$ quantiles**|
```
# Fitting the final FDSLRM
output <- fitDiagFDSLRM(as.numeric(x), t, c(1/46, 15/46), include_fixed_eff = c(1,0,1,0),
freq_random = c(23/46), include_random_eff = c(1,0),
poly_trend_degree = 0)
options(repr.plot.res=600, repr.plot.height=9, repr.plot.width=10)
drawDiagPlots("all", output)
```
### Numerical tests
#### Tests of residual independence
```
print(output$Box_test_lag10_resid)
print(output$BoxLjung_test_lag10_resid)
```
#### Test of residual normality
```
print(output$ShapiroWilk_test_norm_cond_resid)
print(output$ShapiroWilk_test_stand_least_conf_resid)
```
| [Table of Contents](#table_of_contents) | [Data and model](#data_and_model) | [Modeling](#modeling) | [Residual diagnostics](#residual_diagnostics) | [Fitting summary](#fitting_summary) | [Session info](#session_info) | [References](#references) |
***
<a id=fitting_summary></a>
# <font color=brown> Fitting summary </font>
### Parameter estimates
#### Estimates of regression coefficients
```
drawTable(type = "fixed", fixed_eff = output$fixed_effects)
```
#### Predictions of random effects
```
drawTable(type = "random", random_eff = output$random_effects)
```
#### Estimates of variance parameters
```
drawTable(type = "variance", variances = c(output$error_variance, diag(output$rand_eff_variance)))
```
### Fit summary
#### Graphical summary for the final model
* plot: **time series observations (black), fitted values (blue), estimated trend (red) vs times**
```
options(repr.plot.res=120, repr.plot.height=5, repr.plot.width=6.5)
drawDiagPlots(output$diagnostic_plots_names$FittedTimeSeries, output)
```
#### Numerical summary for the final model
```
print(output$fit_summary)
```
| [Table of Contents](#table_of_contents) | [Data and model](#data_and_model) | [Modeling](#modeling) | [Residual diagnostics](#residual_diagnostics) | [Fitting summary](#fitting_summary) | [Session info](#session_info) | [References](#references) |
***
<a id=session_info></a>
# <font color=brown> Session info </font>
```
print(sessionInfo())
```
| [Table of Contents](#table_of_contents) | [Data and model](#data_and_model) | [Modeling](#modeling) | [Residual diagnostics](#residual_diagnostics) | [Fitting summary](#fitting_summary) | [Session info](#session_info) | [References](#references) |
***
<a id=references></a>
# <font color=brown> References </font>
* Hyndman, R. J., Athanasopoulos, G. (2018). [Forecasting: Principles and Practice (2nd ed.)](https://otexts.com/fpp2/index.html). OTexts: Melbourne, Australia
* Gajdoš, A., Hanč, J., Hančová, M. (2019), [R package for modeling and prediction of time series using linear mixed models](https://github.com/fdslrm/R-package), GitHub repository https://github.com/fdslrm/R-package
* R Core Team (2019). R: A language and environment for statistical computing. R Foundation for
Statistical Computing, Vienna, Austria, https://www.R-project.org/
| [Table of Contents](#table_of_contents) | [Data and model](#data_and_model) | [Modeling](#modeling) | [Residual diagnostics](#residual_diagnostics) | [Fitting summary](#fitting_summary) | [Session info](#session_info) | [References](#references) |
|
github_jupyter
|
# use this cell, if you started this notebook locally in your PC
library(fdslrm)
initialFDSLRM()
# use this cell, if you started this notebook in the Binder
devtools::source_url("https://github.com/fdslrm/fdslrmAllinOne/blob/master/fdslrmAllinOne.R?raw=TRUE")
initialFDSLRM()
# reading data from Hyndman's package fpp2
x <- maxtemp
# times
t <- 1:length(x)
# IPython setting for output
options(repr.plot.res=120, repr.plot.height=4.5, repr.plot.width=6.5)
# plotting data from Hyndman's package fpp2
plot(x, type = "o", xlab = "time", ylab = "temperature")
periodo <- spec.pgram(as.numeric(x), log="no")
drawTable(type = "periodogram", periodogram = periodo)
# orders k for Fourier frequencies
print(round(length(x)*c(0.3333333,0.50000,0.0416667,0.0208333,0.18750,0.3958333)))
fnames= c("15/46", "$23/46$", "$2/46$", "$1/46$", "$9/46$", "$18/46$")
drawTable(type = "periodogram", periodogram = periodo, frequencies = fnames)
# Fitting the final FDSLRM
output <- fitDiagFDSLRM(as.numeric(x), t, c(1/46, 15/46), include_fixed_eff = c(1,0,1,0),
freq_random = c(23/46), include_random_eff = c(1,0),
poly_trend_degree = 0)
options(repr.plot.res=600, repr.plot.height=9, repr.plot.width=10)
drawDiagPlots("all", output)
print(output$Box_test_lag10_resid)
print(output$BoxLjung_test_lag10_resid)
print(output$ShapiroWilk_test_norm_cond_resid)
print(output$ShapiroWilk_test_stand_least_conf_resid)
drawTable(type = "fixed", fixed_eff = output$fixed_effects)
drawTable(type = "random", random_eff = output$random_effects)
drawTable(type = "variance", variances = c(output$error_variance, diag(output$rand_eff_variance)))
options(repr.plot.res=120, repr.plot.height=5, repr.plot.width=6.5)
drawDiagPlots(output$diagnostic_plots_names$FittedTimeSeries, output)
print(output$fit_summary)
print(sessionInfo())
| 0.662796 | 0.98394 |
# Formalia:
Please read the [assignment overview page](https://github.com/lalessan/comsocsci2021/wiki/Assignments) carefully before proceeding. This page contains information about formatting (including formats etc), group sizes, and many other aspects of handing in the assignment.
_If you fail to follow these simple instructions, it will negatively impact your grade!_
**Due date and time**: The assignment is due on Tuesday, April 6th at 23:55. Hand in your Jupyter notebook file (with extension `.ipynb`) via DTU Learn _(Course Content, Assignemnts, Assignment 2)_
## Part 1: Properties of the real-world network of Redditors
For this Exercise you need the network of redditors you built in Week3, Part 3. Feel free to upload it from file or compute it in the notebook.
> _Exercise_
> 2.1 _Random Network_: In this exercise we will create a Random Network as a null model to investigate some properties of the Redditors Network.
> * Compute the value of _p_ such that the number of expected edges of the random network equals the number of edges in the redditor network (see equation 3.2 in your Network Science Book). What is the value of p? Compute the average value of the degree < k > (using the formula).
> * Use NetworkX to create a Random network with the same number of nodes as the redditor networks, and _p_ as computed above. You can use the function [``erdos_renyi_graph``](https://networkx.org/documentation/stable/reference/generated/networkx.generators.random_graphs.erdos_renyi_graph.html#networkx.generators.random_graphs.erdos_renyi_graph) in NetworkX.
>
>
> 2.2 _Degree Distribution_: In this exercise, we will compare the degree distribution of the real network and its random counterpart.
> * Compute the distribution of degree for the random network using the numpy function ``np.histogram``. Bin your histogram using 10 linearly spaced bins. Remember to pass the parameter ``density=True``.
> * Compute the distribution of degree for the Redditors undirected network using the numpy function ``np.histogram``. Bin your histogram using 10 logarithmically spaced bins. Remember to pass the parameter ``density=True``.
> * Plot the two distributions you computed in steps 1. and 2. in the same figure as two line-graphs. Log-scale the x and y axes.
> * Comment the figure. What are the differences between the two distributions? Why did I ask you to use two different types of binning?
> * What is the average degree of the random and the real network? Comment on the relation between the two.
>
> 2.3 _Shortest Paths_: Here, we will check if the Redditors Network is a small-world Network.
> * Compute the average shortest path for the largest connected component of the Redditors network (for a reminder of the definition of connected components, check [section 2.9 of the Network Science book](http://networksciencebook.com/chapter/2#connectedness)). You can use the following steps:
> * Use [``nx.algorithms.connected_components``](https://networkx.org/documentation/stable//reference/algorithms/generated/networkx.algorithms.components.connected_components.html) to find all connected components (it returns a list of subsets of the original set of nodes).
> * Consider the largest subset of nodes found in the step above. Build the subgraph of your original network containing those nodes exclusively, using [``nx.Graph.subgraph``](https://networkx.org/documentation/stable/reference/classes/generated/networkx.Graph.subgraph.html). This corresponds to your largest connected component (a.k.a [giant component](https://en.wikipedia.org/wiki/Giant_component)).
> * Compute the average shortest path length of the giant component using [``nx.average_shortest_path_length``](https://networkx.org/documentation/networkx-1.3/reference/generated/networkx.average_shortest_path_length.html)
> * Compute the average shortest path length for the giant component of the random network you built in exercise 2.1.
> * Comment on the relation between the average shortest path length in the real and the random networks. Can you conclude that the small-world property apply to the Redditors network?
>
> 2.4 _Clustering_: Here, we will compare the clustering coefficient in the Redditors Network and its random counterpart.
> * Compute the clustering coefficient for all nodes in the random network, using networkx [``clustering``](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.cluster.clustering.html#networkx.algorithms.cluster.clustering) function. Compute the average across nodes. Is it consistent with the analytical prediction (network science book equation 3.21)?
> * Compute the average clustering coefficient for the Redditors network. How does it compare to its random counterpart? Is it something you would expect? Why?
## Part 2: TF-IDF
For this exercise, you need the following data:
* The r/wallstreetbets submissions (either the one provided by me [here](https://github.com/lalessan/comsocsci2021/blob/master/data/wallstreet_subs.csv.gz) or the one you downloaded in Week 6
* The list of 15 stocks you identified in Week 6, Exercise 2.
> _Exercise_
> 1. Tokenize the __text__ of each submission in the wallstreetbest submission dataset. Create a column __tokens__ in your dataframe containing the tokens.
> 2. Find submissions discussing at least one of the top 15 stocks you previously identified.
> 3. Now, we want to find out which words are important for each *stock*, so we're going to create several ***large documents, one for each stock***. Each document includes all the tokens related to the same stock. We will also have a document including discussions that do not relate to the top 15 stocks.
> 4. Now, we're ready to calculate the TF for each word. Use the method of your choice to find the top 5 terms within __5 stocks of your choice__.
> * Describe similarities and differences between the stocks.
> * Why aren't the TFs not necessarily a good description of the stocks?
> * Next, we calculate IDF for every word.
> * What base logarithm did you use? Is that important?
> 5. We're ready to calculate TF-IDF. Do that for the __5 stock of your choice__.
> * List the 10 top TF words for each stock.
> * List the 10 top TF-IDF words for each stock.
> * Are these 10 words more descriptive of the stock? If yes, what is it about IDF that makes the words more informative?
> 6. Now, create word-cloud for each stock.
> 7. Comment on the results. Are these words to be expected? Is there anything that is surprising?
## Part 3: Sentiment analysis
> _Exercise: Dictionary-based method to study sentiment on r/wallstreetbets_
>
> 1. Open the _wallstreetbets submissions dataframe_ we used in Week 6 (my version is [here](https://github.com/lalessan/comsocsci2021/blob/master/data/wallstreet_subs.csv.gz)).
> 2. Tokenize the _text_ of each submission (use the code you created in Week 6, exercise 3, point 2). Add it in a new column of your dataframe called "tokens".
> 3. Create one document per day in the dataset, containing all tokens for that day. Plot the length of the document (number of tokens) over time. Are documents long enough for using a dictionary-based method?
> 4. Plot the average daily happiness on r/wallstreetbets using the function you created in step 3 (remember the video lesson in Week 2, for plotting nice time-series).
> 5. Compare happiness on _r/wallstreetbets_ to happiness on Twitter using the [hedonometer](http://hedonometer.org/timeseries/en_all/?from=2019-09-16&to=2021-03-15). What is (approximatively) the average value of the two time-series? How do you explain any difference between the two?
> 6. Download the daily Close price of the [S&P 500 Index](https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC) and plot it over time. This index measures the stock performance of 500 large companies listed on stock exchanges in the United States, you can read more about it in the [S&P 500 Wikipedia page](https://en.wikipedia.org/wiki/S%26P_500).
> 7. Compare the figures you created in steps 8. and 10. What do you observe?
> 8. Discuss the limitations of your analysis.
## Part 4: Communities for the Zachary Karate Club Network
> _Exercise: The Zachary's karate club Network_: In this exercise, we will work on Zarachy's karate club graph (refer to the Introduction of Chapter 9). The dataset is available in NetworkX, by calling the function [karate_club_graph](https://networkx.org/documentation/stable//auto_examples/graph/plot_karate_club.html).
>
> 1. Visualize the graph using [netwulf](https://netwulf.readthedocs.io/en/latest/). Set the color of each node based on the club split (the information is stored as a node attribute). My version of the visualization is below.
>
> 2. Write a function to compute the __modularity__ of a graph partitioning (use **equation 9.12** in the book). The function should take a networkX Graph and a partitioning as inputs and return the modularity.
> 3. Explain in your own words the concept of _modularity_.
> 4. Compute the modularity of the Karate club split partitioning using the function you just wrote. Note: the Karate club split partitioning is avilable as a [node attribute](https://networkx.org/documentation/networkx-1.10/reference/generated/networkx.classes.function.get_node_attributes.html), called _"club"_.
> 5. We will now perform a small randomization experiment to assess if the modularity you just computed is statitically different from $0$. To do so, we will implement a [configuration model](https://en.wikipedia.org/wiki/Configuration_model). In short, we will create a new network, such that each node has exactly the same degree as in the original network, but different connections. Here is how the algorithm works.
> * __a.__ Create an identical copy of your original network.
> * __b.__ Consider the list of network edges. Create two lists: the list of source nodes and target nodes.
> * __c.__ Shuffle the list of target nodes. Create new edges that have as sources the original source nodes and as targets the shuffled target nodes.
> * __d.__ Remove all the original network edges from your network. Add all the new _shuffled_ edges you created in step __c.__.
> 6. Double check that your algorithm works well, by showing that the degree of nodes in the original network and in the the configuration model are the same.
> 7. Create $1000$ randomized version of the Karate Club network using the algorithm you wrote in step 5. For each of them, compute the modularity of the "club" split and store it in a list.
> 8. Compute the average and standard deviation of the modularity for the configuration model.
> 9. Plot the distribution of the configuration model modularity. Plot the actual modularity of the club split as a vertical line (use [axvline](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.axvline.html)).
> 10. Comment on the figure. Is the club split a good partitioning? Why do you think I asked you to compare with the configuration model? What is the reason why we preserved the nodes degree?
> 11. Use [the Python Louvain-algorithm implementation](https://anaconda.org/auto/python-louvain) to find communities in this graph. Report the value of modularity found by the algorithm. Is it higher or lower than what you found above for the club split? What does this comparison reveal?
> 12. Compare the communities found by the Louvain algorithm with the club split partitioning by creating a matrix **_D_** with dimension (2 times _A_), where _A_ is the number of communities found by Louvain. We set entry _D_(_i_,_j_) to be the number of nodes that community _i_ has in common with group split _j_. The matrix **_D_** is what we call a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). Use the confusion matrix to explain how well the communities you've detected correspond to the club split partitioning.
|
github_jupyter
|
# Formalia:
Please read the [assignment overview page](https://github.com/lalessan/comsocsci2021/wiki/Assignments) carefully before proceeding. This page contains information about formatting (including formats etc), group sizes, and many other aspects of handing in the assignment.
_If you fail to follow these simple instructions, it will negatively impact your grade!_
**Due date and time**: The assignment is due on Tuesday, April 6th at 23:55. Hand in your Jupyter notebook file (with extension `.ipynb`) via DTU Learn _(Course Content, Assignemnts, Assignment 2)_
## Part 1: Properties of the real-world network of Redditors
For this Exercise you need the network of redditors you built in Week3, Part 3. Feel free to upload it from file or compute it in the notebook.
> _Exercise_
> 2.1 _Random Network_: In this exercise we will create a Random Network as a null model to investigate some properties of the Redditors Network.
> * Compute the value of _p_ such that the number of expected edges of the random network equals the number of edges in the redditor network (see equation 3.2 in your Network Science Book). What is the value of p? Compute the average value of the degree < k > (using the formula).
> * Use NetworkX to create a Random network with the same number of nodes as the redditor networks, and _p_ as computed above. You can use the function [``erdos_renyi_graph``](https://networkx.org/documentation/stable/reference/generated/networkx.generators.random_graphs.erdos_renyi_graph.html#networkx.generators.random_graphs.erdos_renyi_graph) in NetworkX.
>
>
> 2.2 _Degree Distribution_: In this exercise, we will compare the degree distribution of the real network and its random counterpart.
> * Compute the distribution of degree for the random network using the numpy function ``np.histogram``. Bin your histogram using 10 linearly spaced bins. Remember to pass the parameter ``density=True``.
> * Compute the distribution of degree for the Redditors undirected network using the numpy function ``np.histogram``. Bin your histogram using 10 logarithmically spaced bins. Remember to pass the parameter ``density=True``.
> * Plot the two distributions you computed in steps 1. and 2. in the same figure as two line-graphs. Log-scale the x and y axes.
> * Comment the figure. What are the differences between the two distributions? Why did I ask you to use two different types of binning?
> * What is the average degree of the random and the real network? Comment on the relation between the two.
>
> 2.3 _Shortest Paths_: Here, we will check if the Redditors Network is a small-world Network.
> * Compute the average shortest path for the largest connected component of the Redditors network (for a reminder of the definition of connected components, check [section 2.9 of the Network Science book](http://networksciencebook.com/chapter/2#connectedness)). You can use the following steps:
> * Use [``nx.algorithms.connected_components``](https://networkx.org/documentation/stable//reference/algorithms/generated/networkx.algorithms.components.connected_components.html) to find all connected components (it returns a list of subsets of the original set of nodes).
> * Consider the largest subset of nodes found in the step above. Build the subgraph of your original network containing those nodes exclusively, using [``nx.Graph.subgraph``](https://networkx.org/documentation/stable/reference/classes/generated/networkx.Graph.subgraph.html). This corresponds to your largest connected component (a.k.a [giant component](https://en.wikipedia.org/wiki/Giant_component)).
> * Compute the average shortest path length of the giant component using [``nx.average_shortest_path_length``](https://networkx.org/documentation/networkx-1.3/reference/generated/networkx.average_shortest_path_length.html)
> * Compute the average shortest path length for the giant component of the random network you built in exercise 2.1.
> * Comment on the relation between the average shortest path length in the real and the random networks. Can you conclude that the small-world property apply to the Redditors network?
>
> 2.4 _Clustering_: Here, we will compare the clustering coefficient in the Redditors Network and its random counterpart.
> * Compute the clustering coefficient for all nodes in the random network, using networkx [``clustering``](https://networkx.org/documentation/stable/reference/algorithms/generated/networkx.algorithms.cluster.clustering.html#networkx.algorithms.cluster.clustering) function. Compute the average across nodes. Is it consistent with the analytical prediction (network science book equation 3.21)?
> * Compute the average clustering coefficient for the Redditors network. How does it compare to its random counterpart? Is it something you would expect? Why?
## Part 2: TF-IDF
For this exercise, you need the following data:
* The r/wallstreetbets submissions (either the one provided by me [here](https://github.com/lalessan/comsocsci2021/blob/master/data/wallstreet_subs.csv.gz) or the one you downloaded in Week 6
* The list of 15 stocks you identified in Week 6, Exercise 2.
> _Exercise_
> 1. Tokenize the __text__ of each submission in the wallstreetbest submission dataset. Create a column __tokens__ in your dataframe containing the tokens.
> 2. Find submissions discussing at least one of the top 15 stocks you previously identified.
> 3. Now, we want to find out which words are important for each *stock*, so we're going to create several ***large documents, one for each stock***. Each document includes all the tokens related to the same stock. We will also have a document including discussions that do not relate to the top 15 stocks.
> 4. Now, we're ready to calculate the TF for each word. Use the method of your choice to find the top 5 terms within __5 stocks of your choice__.
> * Describe similarities and differences between the stocks.
> * Why aren't the TFs not necessarily a good description of the stocks?
> * Next, we calculate IDF for every word.
> * What base logarithm did you use? Is that important?
> 5. We're ready to calculate TF-IDF. Do that for the __5 stock of your choice__.
> * List the 10 top TF words for each stock.
> * List the 10 top TF-IDF words for each stock.
> * Are these 10 words more descriptive of the stock? If yes, what is it about IDF that makes the words more informative?
> 6. Now, create word-cloud for each stock.
> 7. Comment on the results. Are these words to be expected? Is there anything that is surprising?
## Part 3: Sentiment analysis
> _Exercise: Dictionary-based method to study sentiment on r/wallstreetbets_
>
> 1. Open the _wallstreetbets submissions dataframe_ we used in Week 6 (my version is [here](https://github.com/lalessan/comsocsci2021/blob/master/data/wallstreet_subs.csv.gz)).
> 2. Tokenize the _text_ of each submission (use the code you created in Week 6, exercise 3, point 2). Add it in a new column of your dataframe called "tokens".
> 3. Create one document per day in the dataset, containing all tokens for that day. Plot the length of the document (number of tokens) over time. Are documents long enough for using a dictionary-based method?
> 4. Plot the average daily happiness on r/wallstreetbets using the function you created in step 3 (remember the video lesson in Week 2, for plotting nice time-series).
> 5. Compare happiness on _r/wallstreetbets_ to happiness on Twitter using the [hedonometer](http://hedonometer.org/timeseries/en_all/?from=2019-09-16&to=2021-03-15). What is (approximatively) the average value of the two time-series? How do you explain any difference between the two?
> 6. Download the daily Close price of the [S&P 500 Index](https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC) and plot it over time. This index measures the stock performance of 500 large companies listed on stock exchanges in the United States, you can read more about it in the [S&P 500 Wikipedia page](https://en.wikipedia.org/wiki/S%26P_500).
> 7. Compare the figures you created in steps 8. and 10. What do you observe?
> 8. Discuss the limitations of your analysis.
## Part 4: Communities for the Zachary Karate Club Network
> _Exercise: The Zachary's karate club Network_: In this exercise, we will work on Zarachy's karate club graph (refer to the Introduction of Chapter 9). The dataset is available in NetworkX, by calling the function [karate_club_graph](https://networkx.org/documentation/stable//auto_examples/graph/plot_karate_club.html).
>
> 1. Visualize the graph using [netwulf](https://netwulf.readthedocs.io/en/latest/). Set the color of each node based on the club split (the information is stored as a node attribute). My version of the visualization is below.
>
> 2. Write a function to compute the __modularity__ of a graph partitioning (use **equation 9.12** in the book). The function should take a networkX Graph and a partitioning as inputs and return the modularity.
> 3. Explain in your own words the concept of _modularity_.
> 4. Compute the modularity of the Karate club split partitioning using the function you just wrote. Note: the Karate club split partitioning is avilable as a [node attribute](https://networkx.org/documentation/networkx-1.10/reference/generated/networkx.classes.function.get_node_attributes.html), called _"club"_.
> 5. We will now perform a small randomization experiment to assess if the modularity you just computed is statitically different from $0$. To do so, we will implement a [configuration model](https://en.wikipedia.org/wiki/Configuration_model). In short, we will create a new network, such that each node has exactly the same degree as in the original network, but different connections. Here is how the algorithm works.
> * __a.__ Create an identical copy of your original network.
> * __b.__ Consider the list of network edges. Create two lists: the list of source nodes and target nodes.
> * __c.__ Shuffle the list of target nodes. Create new edges that have as sources the original source nodes and as targets the shuffled target nodes.
> * __d.__ Remove all the original network edges from your network. Add all the new _shuffled_ edges you created in step __c.__.
> 6. Double check that your algorithm works well, by showing that the degree of nodes in the original network and in the the configuration model are the same.
> 7. Create $1000$ randomized version of the Karate Club network using the algorithm you wrote in step 5. For each of them, compute the modularity of the "club" split and store it in a list.
> 8. Compute the average and standard deviation of the modularity for the configuration model.
> 9. Plot the distribution of the configuration model modularity. Plot the actual modularity of the club split as a vertical line (use [axvline](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.axvline.html)).
> 10. Comment on the figure. Is the club split a good partitioning? Why do you think I asked you to compare with the configuration model? What is the reason why we preserved the nodes degree?
> 11. Use [the Python Louvain-algorithm implementation](https://anaconda.org/auto/python-louvain) to find communities in this graph. Report the value of modularity found by the algorithm. Is it higher or lower than what you found above for the club split? What does this comparison reveal?
> 12. Compare the communities found by the Louvain algorithm with the club split partitioning by creating a matrix **_D_** with dimension (2 times _A_), where _A_ is the number of communities found by Louvain. We set entry _D_(_i_,_j_) to be the number of nodes that community _i_ has in common with group split _j_. The matrix **_D_** is what we call a [**confusion matrix**](https://en.wikipedia.org/wiki/Confusion_matrix). Use the confusion matrix to explain how well the communities you've detected correspond to the club split partitioning.
| 0.868199 | 0.98045 |
```
import matplotlib.pyplot as plt
import scipy.stats as st
import numpy as np
import pandas as pd
import pymc3 as pm
import arviz as az
import statsmodels.api as sm
import math
N = 15
p = 0.7
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
matplotlib.pyplot.bar(W,probs)
matplotlib.pyplot.xlabel("W")
matplotlib.pyplot.ylabel("Probability")
matplotlib.pyplot.title("Probability Mass Function of W")
matplotlib.pyplot.show()
sum(probs)
```
N = 100
p = 0.7
W = range(0,100)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
matplotlib.pyplot.bar(W,probs)
matplotlib.pyplot.xlabel("W")
matplotlib.pyplot.ylabel("Probability")
matplotlib.pyplot.title("Probability Mass Function of W")
matplotlib.pyplot.xlim([40, 90])
line = scipy.stats.norm.pdf(x=W, loc=N*p, range=math.sqrt(N*p*(1-p)))
matplotlib.pyplot.line(line)
matplotlib.pyplot.show()
```
mu = 10
sigma = 1
samples = np.random.lognormal(mean=mu,sigma=sigma,size=10000)
plt.hist(samples, 10,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 20,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 40,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 200,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 200,range=[0,250000],density=True)
plt.ylabel("Probability density")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 200,range=[0,250000],density=True)
points = range(0,250000)
wealth_pdf = st.lognorm.pdf(x=points,scale=math.exp(mu),s=sigma)
plt.plot(wealth_pdf)
plt.ylabel("Probability density")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
N = 15
p = 0.7
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.show()
N = 15
p = 0.7
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.axhline(y=probs[7],color='deeppink')
plt.show()
probs[7]
N = 15
p = 0.4
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.axhline(y=probs[7],color='deeppink')
plt.show()
probs[7]
N = 15
p = 0.2356
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.axhline(y=probs[7],color='deeppink')
plt.show()
probs[7]
likelihoods = scipy.stats.binom.pmf(k=7, n=N, p=np.linspace(0,1,100))
plt.plot(np.linspace(0,1,100),likelihoods)
plt.ylabel("Likelihood")
plt.xlabel("True value of p")
plt.title("likelihood function of p, given we observe 7 water probes out of 15")
plt.scatter([0.2356,0.4,0.7],[0.030223237303722852,0.17708366168064013,0.034770014284005064],
color='deeppink', s=200,marker='+')
plt.show()
d = pd.read_csv("Data/Howell1.csv", sep=";", header=0)
d.head()
plt.scatter(d.height,d.weight)
plt.xlabel("Height (cm)")
plt.ylabel("Weight (kg)")
plt.title("!Kung San anthropometric data, Howell")
plt.show()
d2 = d[d.age >= 18]
plt.scatter(d2.height,d2.weight)
plt.xlabel("Height (cm)")
plt.ylabel("Weight (kg)")
plt.title("!Kung San anthropometric data, Adults Only")
plt.show()
np.random.seed(2971)
N = 100 # 100 lines
a = st.norm.rvs(173, 20, N)
b = st.norm.rvs(0, 10, N)
xbar = d2.weight.mean()
x = np.linspace(d2.weight.min(), d2.weight.max(), N)
for i in range(N):
plt.plot(x,a[i] + b[i] * (x - xbar), "k", alpha=0.2)
plt.xlim(d2.weight.min(), d2.weight.max())
plt.ylim(-100, 400)
plt.axhline(0, c="k", ls="--")
plt.axhline(280, c="k")
plt.xlabel("weight (kg)")
plt.ylabel("height (cm)")
plt.show()
np.random.seed(2971)
N = 100 # 100 lines
a = st.norm.rvs(173, 20, N)
b = st.lognorm.rvs(s=1, scale=1, size=100)
xbar = d2.weight.mean()
x = np.linspace(d2.weight.min(), d2.weight.max(), N)
for i in range(N):
plt.plot(x,a[i] + b[i] * (x - xbar), "k", alpha=0.2)
plt.xlim(d2.weight.min(), d2.weight.max())
plt.ylim(-100, 400)
plt.axhline(0, c="k", ls="--")
plt.axhline(280, c="k")
plt.xlabel("weight (kg)")
plt.ylabel("height (cm)")
plt.show()
with pm.Model() as m4_3:
a = pm.Normal("a", mu=173, sd=20)
b = pm.Lognormal("b", mu=0, sd=1)
sigma = pm.Uniform("sigma", 0, 50)
mu = a + b * (d2.weight - xbar)
height = pm.Normal("height", mu=mu, sd=sigma, observed=d2.height)
trace_4_3 = pm.sample(1000, tune=1000)
az.summary(trace_4_3, kind="stats")
results = sm.OLS(d2.height,sm.add_constant(d2.weight-xbar)).fit()
print(results.summary())
plt.plot(d2.weight, d2.height, ".")
plt.plot(d2.weight, trace_4_3["a"].mean() + trace_4_3["b"].mean() * (d2.weight - xbar))
plt.xlabel(d2.columns[1])
plt.ylabel(d2.columns[0]);
weight_seq = np.arange(25, 71)
mu_pred = np.zeros((len(weight_seq), len(trace_4_3) * trace_4_3.nchains))
for i, w in enumerate(weight_seq):
mu_pred[i] = trace_4_3["a"] + trace_4_3["b"] * (w - d2.weight.mean())
plt.plot(weight_seq, mu_pred, "C0.", alpha=0.1)
plt.xlabel("weight")
plt.ylabel("height");
mu_mean = mu_pred.mean(1)
mu_hdi = az.hdi(mu_pred.T)
az.plot_hdi(weight_seq, mu_pred.T)
plt.scatter(d2.weight, d2.height)
plt.plot(weight_seq, mu_mean, "k")
plt.xlabel("weight")
plt.ylabel("height")
plt.xlim(d2.weight.min(), d2.weight.max());
height_pred = pm.sample_posterior_predictive(trace_4_3, 200, m4_3)
height_pred_hdi = az.hdi(height_pred["height"])
ax = az.plot_hdi(weight_seq, mu_pred.T)
az.plot_hdi(d2.weight, height_pred["height"], ax=ax,color="yellow")
plt.scatter(d2.weight, d2.height)
plt.plot(weight_seq, mu_mean, "k")
plt.xlabel("weight")
plt.ylabel("height")
plt.xlim(d2.weight.min(), d2.weight.max());
plt.scatter(d.weight,d.height)
plt.ylabel("Height (cm)")
plt.xlabel("Weight (kg)")
plt.title("!Kung San anthropometric data, Howell")
plt.show()
d["weight_std"] = (d.weight - d.weight.mean()) / d.weight.std()
d["weight_std2"] = d.weight_std ** 2
with pm.Model() as m_4_5:
a = pm.Normal("a", mu=173, sd=100)
b1 = pm.Lognormal("b1", mu=0, sd=1)
b2 = pm.Normal("b2", mu=0, sd=1)
sigma = pm.Uniform("sigma", lower=0, upper=50)
mu = pm.Deterministic("mu", a + b1 * d.weight_std + b2 * d.weight_std2)
height = pm.Normal("height", mu=mu, sd=sigma, observed=d.height)
trace_4_5 = pm.sample(1000, tune=1000)
mu_pred = trace_4_5["mu"]
height_pred = pm.sample_posterior_predictive(trace_4_5, 200, m_4_5)
ax = az.plot_hdi(d.weight_std, mu_pred)
az.plot_hdi(d.weight_std, height_pred["height"], ax=ax)
plt.scatter(d.weight_std, d.height, c="C0", alpha=0.3)
weight_m = np.vstack((d.weight_std, d.weight_std ** 2, d.weight_std ** 3))
with pm.Model() as m_4_6:
a = pm.Normal("a", mu=173, sd=100)
b = pm.Normal("b", mu=0, sd=10, shape=3)
sigma = pm.Uniform("sigma", lower=0, upper=50)
mu = pm.Deterministic("mu", a + pm.math.dot(b, weight_m))
height = pm.Normal("height", mu=mu, sd=sigma, observed=d.height)
trace_4_6 = pm.sample(1000, tune=1000)
mu_pred = trace_4_6["mu"]
height_pred = pm.sample_posterior_predictive(trace_4_6, 200, m_4_6)
ax = az.plot_hdi(d.weight_std, mu_pred)
az.plot_hdi(d.weight_std, height_pred["height"], ax=ax)
plt.scatter(d.weight_std, d.height, c="C0", alpha=0.3)
at = np.arange(-2, 3)
plt.xticks(at, np.round(at * d.weight.std() + d.weight.mean(), 1));
d = pd.read_csv("Data/cherry_blossoms.csv")
# nans are not treated as in the book
az.summary(d.dropna().to_dict(orient="list"), kind="stats")
d2 = d.dropna(subset=["doy"])
num_knots = 15
knot_list = np.quantile(d2.year, np.linspace(0, 1, num_knots))
from patsy import dmatrix
B = dmatrix(
"bs(year, knots=knots, degree=3, include_intercept=True) - 1",
{"year": d2.year.values, "knots": knot_list[1:-1]},
)
_, ax = plt.subplots(1, 1, figsize=(12, 4))
for i in range(17):
ax.plot(d2.year, (B[:, i]), color="C0")
ax.set_xlabel("year")
ax.set_ylabel("basis");
B
```
|
github_jupyter
|
import matplotlib.pyplot as plt
import scipy.stats as st
import numpy as np
import pandas as pd
import pymc3 as pm
import arviz as az
import statsmodels.api as sm
import math
N = 15
p = 0.7
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
matplotlib.pyplot.bar(W,probs)
matplotlib.pyplot.xlabel("W")
matplotlib.pyplot.ylabel("Probability")
matplotlib.pyplot.title("Probability Mass Function of W")
matplotlib.pyplot.show()
sum(probs)
mu = 10
sigma = 1
samples = np.random.lognormal(mean=mu,sigma=sigma,size=10000)
plt.hist(samples, 10,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 20,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 40,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 200,range=[0,250000],weights=np.ones_like(samples) / len(samples))
plt.ylabel("Probability")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 200,range=[0,250000],density=True)
plt.ylabel("Probability density")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
plt.hist(samples, 200,range=[0,250000],density=True)
points = range(0,250000)
wealth_pdf = st.lognorm.pdf(x=points,scale=math.exp(mu),s=sigma)
plt.plot(wealth_pdf)
plt.ylabel("Probability density")
plt.xlabel("Observed Wealth in Examplian dirhams (EXD)")
plt.show()
N = 15
p = 0.7
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.show()
N = 15
p = 0.7
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.axhline(y=probs[7],color='deeppink')
plt.show()
probs[7]
N = 15
p = 0.4
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.axhline(y=probs[7],color='deeppink')
plt.show()
probs[7]
N = 15
p = 0.2356
W = range(0,16)
probs = scipy.stats.binom.pmf(k=W, n=N, p=p)
plt.bar(W,probs)
plt.xlabel("W")
plt.ylabel("Probability")
plt.title("Probability Mass Function of W")
plt.axvline(x=7,color='deeppink')
plt.axhline(y=probs[7],color='deeppink')
plt.show()
probs[7]
likelihoods = scipy.stats.binom.pmf(k=7, n=N, p=np.linspace(0,1,100))
plt.plot(np.linspace(0,1,100),likelihoods)
plt.ylabel("Likelihood")
plt.xlabel("True value of p")
plt.title("likelihood function of p, given we observe 7 water probes out of 15")
plt.scatter([0.2356,0.4,0.7],[0.030223237303722852,0.17708366168064013,0.034770014284005064],
color='deeppink', s=200,marker='+')
plt.show()
d = pd.read_csv("Data/Howell1.csv", sep=";", header=0)
d.head()
plt.scatter(d.height,d.weight)
plt.xlabel("Height (cm)")
plt.ylabel("Weight (kg)")
plt.title("!Kung San anthropometric data, Howell")
plt.show()
d2 = d[d.age >= 18]
plt.scatter(d2.height,d2.weight)
plt.xlabel("Height (cm)")
plt.ylabel("Weight (kg)")
plt.title("!Kung San anthropometric data, Adults Only")
plt.show()
np.random.seed(2971)
N = 100 # 100 lines
a = st.norm.rvs(173, 20, N)
b = st.norm.rvs(0, 10, N)
xbar = d2.weight.mean()
x = np.linspace(d2.weight.min(), d2.weight.max(), N)
for i in range(N):
plt.plot(x,a[i] + b[i] * (x - xbar), "k", alpha=0.2)
plt.xlim(d2.weight.min(), d2.weight.max())
plt.ylim(-100, 400)
plt.axhline(0, c="k", ls="--")
plt.axhline(280, c="k")
plt.xlabel("weight (kg)")
plt.ylabel("height (cm)")
plt.show()
np.random.seed(2971)
N = 100 # 100 lines
a = st.norm.rvs(173, 20, N)
b = st.lognorm.rvs(s=1, scale=1, size=100)
xbar = d2.weight.mean()
x = np.linspace(d2.weight.min(), d2.weight.max(), N)
for i in range(N):
plt.plot(x,a[i] + b[i] * (x - xbar), "k", alpha=0.2)
plt.xlim(d2.weight.min(), d2.weight.max())
plt.ylim(-100, 400)
plt.axhline(0, c="k", ls="--")
plt.axhline(280, c="k")
plt.xlabel("weight (kg)")
plt.ylabel("height (cm)")
plt.show()
with pm.Model() as m4_3:
a = pm.Normal("a", mu=173, sd=20)
b = pm.Lognormal("b", mu=0, sd=1)
sigma = pm.Uniform("sigma", 0, 50)
mu = a + b * (d2.weight - xbar)
height = pm.Normal("height", mu=mu, sd=sigma, observed=d2.height)
trace_4_3 = pm.sample(1000, tune=1000)
az.summary(trace_4_3, kind="stats")
results = sm.OLS(d2.height,sm.add_constant(d2.weight-xbar)).fit()
print(results.summary())
plt.plot(d2.weight, d2.height, ".")
plt.plot(d2.weight, trace_4_3["a"].mean() + trace_4_3["b"].mean() * (d2.weight - xbar))
plt.xlabel(d2.columns[1])
plt.ylabel(d2.columns[0]);
weight_seq = np.arange(25, 71)
mu_pred = np.zeros((len(weight_seq), len(trace_4_3) * trace_4_3.nchains))
for i, w in enumerate(weight_seq):
mu_pred[i] = trace_4_3["a"] + trace_4_3["b"] * (w - d2.weight.mean())
plt.plot(weight_seq, mu_pred, "C0.", alpha=0.1)
plt.xlabel("weight")
plt.ylabel("height");
mu_mean = mu_pred.mean(1)
mu_hdi = az.hdi(mu_pred.T)
az.plot_hdi(weight_seq, mu_pred.T)
plt.scatter(d2.weight, d2.height)
plt.plot(weight_seq, mu_mean, "k")
plt.xlabel("weight")
plt.ylabel("height")
plt.xlim(d2.weight.min(), d2.weight.max());
height_pred = pm.sample_posterior_predictive(trace_4_3, 200, m4_3)
height_pred_hdi = az.hdi(height_pred["height"])
ax = az.plot_hdi(weight_seq, mu_pred.T)
az.plot_hdi(d2.weight, height_pred["height"], ax=ax,color="yellow")
plt.scatter(d2.weight, d2.height)
plt.plot(weight_seq, mu_mean, "k")
plt.xlabel("weight")
plt.ylabel("height")
plt.xlim(d2.weight.min(), d2.weight.max());
plt.scatter(d.weight,d.height)
plt.ylabel("Height (cm)")
plt.xlabel("Weight (kg)")
plt.title("!Kung San anthropometric data, Howell")
plt.show()
d["weight_std"] = (d.weight - d.weight.mean()) / d.weight.std()
d["weight_std2"] = d.weight_std ** 2
with pm.Model() as m_4_5:
a = pm.Normal("a", mu=173, sd=100)
b1 = pm.Lognormal("b1", mu=0, sd=1)
b2 = pm.Normal("b2", mu=0, sd=1)
sigma = pm.Uniform("sigma", lower=0, upper=50)
mu = pm.Deterministic("mu", a + b1 * d.weight_std + b2 * d.weight_std2)
height = pm.Normal("height", mu=mu, sd=sigma, observed=d.height)
trace_4_5 = pm.sample(1000, tune=1000)
mu_pred = trace_4_5["mu"]
height_pred = pm.sample_posterior_predictive(trace_4_5, 200, m_4_5)
ax = az.plot_hdi(d.weight_std, mu_pred)
az.plot_hdi(d.weight_std, height_pred["height"], ax=ax)
plt.scatter(d.weight_std, d.height, c="C0", alpha=0.3)
weight_m = np.vstack((d.weight_std, d.weight_std ** 2, d.weight_std ** 3))
with pm.Model() as m_4_6:
a = pm.Normal("a", mu=173, sd=100)
b = pm.Normal("b", mu=0, sd=10, shape=3)
sigma = pm.Uniform("sigma", lower=0, upper=50)
mu = pm.Deterministic("mu", a + pm.math.dot(b, weight_m))
height = pm.Normal("height", mu=mu, sd=sigma, observed=d.height)
trace_4_6 = pm.sample(1000, tune=1000)
mu_pred = trace_4_6["mu"]
height_pred = pm.sample_posterior_predictive(trace_4_6, 200, m_4_6)
ax = az.plot_hdi(d.weight_std, mu_pred)
az.plot_hdi(d.weight_std, height_pred["height"], ax=ax)
plt.scatter(d.weight_std, d.height, c="C0", alpha=0.3)
at = np.arange(-2, 3)
plt.xticks(at, np.round(at * d.weight.std() + d.weight.mean(), 1));
d = pd.read_csv("Data/cherry_blossoms.csv")
# nans are not treated as in the book
az.summary(d.dropna().to_dict(orient="list"), kind="stats")
d2 = d.dropna(subset=["doy"])
num_knots = 15
knot_list = np.quantile(d2.year, np.linspace(0, 1, num_knots))
from patsy import dmatrix
B = dmatrix(
"bs(year, knots=knots, degree=3, include_intercept=True) - 1",
{"year": d2.year.values, "knots": knot_list[1:-1]},
)
_, ax = plt.subplots(1, 1, figsize=(12, 4))
for i in range(17):
ax.plot(d2.year, (B[:, i]), color="C0")
ax.set_xlabel("year")
ax.set_ylabel("basis");
B
| 0.562417 | 0.885384 |
# **హిందవి ప్రస్తారణ వ్యవస్థ** 2021.9
Hindawi Programming System 2021.9<br>
Copyright (C) 2004-2021 Abhishek Choudhary, Dr Srija Katta<br>
AyeSPL license. NO WARRANTY.
## పరిచయం(Introduction)
**హిందవి ప్రోగ్రామింగ్ సిస్టమ్ మాతృభాషలో ప్రాధమిక స్థాయి నుండి గేట్ కంప్యూటర్ ఇంజనీరింగ్ ([GATE CSE](https://gate.iitb.ac.in/syllabi.php)) స్థాయి వరకు ప్రోగ్రామింగ్ను అనుమతిస్తుంది. సూపర్ కంప్యూటింగ్ [సూపర్కంప్యూటింగ్](https://hi.wikipedia.org/wiki/%E0%A4%AE%E0%A4%B9%E0%A4%BE%E0%A4%B8%E0%A4%82%E0%A4%97%E0%A4%A3%E0%A4%95) (HPC), ఐఒటి(IoT), క్వాంటం కంప్యూటింగ్ మరియు డిఎన్ఎ కంప్యూటింగ్ ([DNA computing](https://en.wikipedia.org/wiki/DNA_computing)) పరిశోధన మరియు ప్రోగ్రామింగ్ కూడా హిందవి ఉపయోగించి మాతృభాషలో సాధ్యమే. ఇది కల కాదు - ఇది నిజం! దీన్ని ఇప్పుడు ఆన్లైన్లో ధృవీకరించండి. రండి సాంకేతిక విభజన(digital divide), అభిజ్ఞా విభజన (cognitive divide - మాతృభాషను ఉపయోగించి AI ని అభివృద్ధి చేయలేకపోవడం) లేదా భాషా విభజన (linguistic divide) లేకుండా సాంకేతిక ఏకవచన (technical singularity) ప్రయాణాన్ని పూర్తి చేద్దాం.
**డిస్క్లైమర్ **: హిందవి ప్రోగ్రామింగ్ సిస్టమ్ యొక్క పరిపూర్ణతకు సూచనగా మాత్రమే గేట్ ప్రస్తావించబడింది. గేట్ పరీక్షల నిర్వాహణతో మాకు ఎటువంటి సంబంధం లేదు. 2021 వరకు గేట్ సిఎస్ఇ ప్రోగ్రామింగ్ భాషల ప్రామాణిక వైవిధ్యాలను మాత్రమే ఉపయోగిస్తుంది మరియు తెలుగు వెర్షన్ కాదు. ఈ యుటిలిటీ రచయితలచే బాహ్య లింక్లు నియంత్రించబడవు.
Hindawi Programming System makes programming feasible in mother tongue from primary levels to GATE Computer Engineering level. Research and programming is feasible using Hindawi in supercomputing, IoT, robotics, artificial intelligence, quantum computing and also DNA computing is possible in mother tongue. This is not a dream - its a reality! Verify it online right now. Lets approach the technological singularity without any digital divide, cognitive divide (inability of developing AI using mother tongue), or linguistic divide.
**Disclaimer**: GATE is only mentioned with reference to the completeness of Hindawi Programming System. We are not associated with conduct of the GATE examinations. As of 2021 GATE CSE only uses the standard forms of the programming languages and not the Hindi version. External links are not controlled by the authors of this utility.
# సంస్థాపన(Initialize)
ఇది హిందవిని నడపడానికి జూపిటర్ నోట్బుక్ వాతావరణాన్ని అనుమతిస్తుంది. నెమ్మదిగా ఉన్న సిస్టమ్లలో దీనికి కొన్ని నిమిషాలు పట్టవచ్చు. చాలా క్లౌడ్ పరిసరాలలో ఇది కొన్ని సెకన్లలో పూర్తవుతుంది.
This enables Hindawi in the Jupyter notebook environment. It may take a few minutes on slower systems. On most cloud environments this completes in few seconds.
👈 ఈ సెల్ని అమలు చేయడానికి ప్లే బటన్ని (▶) నొక్కండి
```
#@title సంస్థాపన (Installation) { vertical-output: true }
#@markdown 👈 ఈ సెల్ను ప్లే చేయడానికి బటన్ను ప్లే చేయండి (▶) నొక్కండి
#@markdown <br>
#@markdown ఇతర కణాలను ఉపయోగించటానికి ముందు ఇది కనీసం ఒక్కసారైనా అమలు చేయాలి
#@markdown <br>
#@markdown This must be run at least once before the other cells can be used
%%shell
#Installing prerequisites...
printf "ముందస్తు అవసరాలను ఇన్స్టాల్ చేస్తోంది ... "
sudo apt install gawk flex bison php-iconv screen &>/dev/null
#Done
echo "పూర్తయింది"
printf "Hindawi2020 రిపోజిటరీ క్లోన్ చేయబడుతోంది... "
git clone https://github.com/hindawiai/chintamani hindawi2020 &>/dev/null
#Done
echo "పూర్తయింది"
if [ 0 -lt $(pip3 freeze | grep google.colab | wc -l) ]
then
#Executing preamble for non-docker platforms...
echo "ఉపోద్ఘాతాన్ని అమలు చేస్తోంది"
cd hindawi2020
git checkout telugu
for n in Romenagri\
Hindawi/guru Hindawi/hindrv Hindawi/kritrima Hindawi/praatha\
Hindawi/shabda Hindawi/shraeni Hindawi/wyaaka Hindawi/yantra\
Hindawi/others/fasm Hindawi/soochee;
do
pushd $n &>/dev/null
#Building in $n...
printf "$n లో ప్రోగ్రామ్ను కంపైల్ చేస్తోంది ... "
make all &>/dev/null
make install &>/dev/null
make clean_all &>/dev/null
#Done
echo "పూర్తయింది"
popd &>/dev/null
done
#Completed preamble for non-docker platforms.
echo "ముందుమాట పూర్తయింది"
fi
#TBD: APCISR not built
```
# వివరణాత్మక వివరణ (Detailed description)
```
#@title **ప్రాథమిక శైలి యొక్క ప్రోగ్రామ్ ఎడిటింగ్(program editing)**
#@markdown సవరించడానికి డబుల్ క్లిక్ చేయండి లేదా __enter__ నొక్కండి
#@markdown <br>
#@markdown Reference:
#@markdown [Face detection with OpenCV and deep learning](https://www.pyimagesearch.com/2018/02/26/face-detection-with-opencv-and-deep-learning/)<br>
#@markdown Double-click or press __Enter__ to edit
%%writefile hindawi2020/Hindawi/soochee/myPY.uhin
దిగుమతి imutils
దిగుమతి numpy వంటి np
దిగుమతి cv2
నుండి google.colab.patches దిగుమతి cv2_imshow
నుండి IPython.dఉందిplay దిగుమతి dఉందిplay, Javవంటిcript
నుండి google.colab.output దిగుమతి eval_js
నుండి bవంటిe64 దిగుమతి b64decode
నిర్వచించు take_photo(filename='photo.jpg', quality=0.8):
js = Javవంటిcript('''
అసమకాలీక function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.dఉందిplay = 'block';
const stream = ఎదురు navigatలేదా.mediaDevices.getUserMedia({video: నిజం});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
ఎదురు video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, నిజం);
// Wait కోసం Capture to be clicked.
ఎదురు new Promఉందిe((resolve) => capture.onclick = resolve);
const canvవంటి = document.createElement('canvas');
canvవంటి.width = video.videoWidth;
canvవంటి.height = video.videoHeight;
canvవంటి.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
తిరిగి canvవంటి.toDataURL('image/jpeg', quality);
}
''')
dఉందిplay(js)
data = eval_js('takePhoto({})'.కోసంmat(quality))
bలోary = b64decode(data.split(',')[1])
తో open(filename, 'wb') వంటి f:
f.write(bలోary)
తిరిగి filename
image_file = take_photo()
#image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)
image = cv2.imread(image_file)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
(h, w) = image.shape[:2]
#ముద్రించు(w,h)
#cv2_imshow(image)
ముద్రించు("[INFO] loading model...")
prototxt = 'hindawi2020/Hindawi/soochee/deploy.prototxt'
moతొలగించు = "hindawi2020/Hindawi/soochee/res10_300x300_ssd_iter_140000.caffemodel"
net = cv2.dnn.readNetFromCaffe(prototxt, moతొలగించు)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
ముద్రించు("[INFO] computing object detections...")
net.setInput(blob)
detections = net.కోసంward()
కోసం i లో range(0, detections.shape[2]):
# extract the confidence (i.e., probability) వంటిsociated తో the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensurలోg the `confidence` ఉంది
# greater than the mలోimum confidence threshold
ఉంటే confidence > 0.5:
# compute the (x, y)-coలేదాdలోates of the boundలోg box కోసం the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.వంటిtype("int")
# draw the boundలోg box of the face along తో the వంటిsociated probability
text = "{:.2f}%".కోసంmat(confidence * 100)
y = startY - 10 ఉంటే startY - 10 > 10 వేరే startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2_imshow(image)
%%shell
#@title ### **స్టైల్ ప్రైమరీ యొక్క ప్రోగ్రామ్ ఎగ్జిక్యూషన్(program execution)**
#@markdown ### ఈ నోట్బుక్ యొక్క ఫైల్ బ్రౌజర్ ఉపయోగించి కంపైల్డ్ ప్రోగ్రామ్ను డౌన్లోడ్ చేసుకోవచ్చు. ఫైల్ hin.exe
#@markdown <br>
#Do not edit below this line
#ఈ పంక్తి క్రింద సవరించవద్దు
cd hindawi2020/Hindawi/soochee/
wget -q -N https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
wget -q -N https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel
#./hinpy myPY.uhin
echo '# coding=utf-8' >hintmp.py
cat myPY.uhin | ../../Romenagri/flatten_uni_dev | iconv -t utf16 | uni2acii | acii2cf | h2py | acii2uni | iconv -futf16 | ../../Romenagri/fltr_hi_te >>hintmp.py
%run hindawi2020/Hindawi/soochee/hintmp.py
```
|
github_jupyter
|
#@title సంస్థాపన (Installation) { vertical-output: true }
#@markdown 👈 ఈ సెల్ను ప్లే చేయడానికి బటన్ను ప్లే చేయండి (▶) నొక్కండి
#@markdown <br>
#@markdown ఇతర కణాలను ఉపయోగించటానికి ముందు ఇది కనీసం ఒక్కసారైనా అమలు చేయాలి
#@markdown <br>
#@markdown This must be run at least once before the other cells can be used
%%shell
#Installing prerequisites...
printf "ముందస్తు అవసరాలను ఇన్స్టాల్ చేస్తోంది ... "
sudo apt install gawk flex bison php-iconv screen &>/dev/null
#Done
echo "పూర్తయింది"
printf "Hindawi2020 రిపోజిటరీ క్లోన్ చేయబడుతోంది... "
git clone https://github.com/hindawiai/chintamani hindawi2020 &>/dev/null
#Done
echo "పూర్తయింది"
if [ 0 -lt $(pip3 freeze | grep google.colab | wc -l) ]
then
#Executing preamble for non-docker platforms...
echo "ఉపోద్ఘాతాన్ని అమలు చేస్తోంది"
cd hindawi2020
git checkout telugu
for n in Romenagri\
Hindawi/guru Hindawi/hindrv Hindawi/kritrima Hindawi/praatha\
Hindawi/shabda Hindawi/shraeni Hindawi/wyaaka Hindawi/yantra\
Hindawi/others/fasm Hindawi/soochee;
do
pushd $n &>/dev/null
#Building in $n...
printf "$n లో ప్రోగ్రామ్ను కంపైల్ చేస్తోంది ... "
make all &>/dev/null
make install &>/dev/null
make clean_all &>/dev/null
#Done
echo "పూర్తయింది"
popd &>/dev/null
done
#Completed preamble for non-docker platforms.
echo "ముందుమాట పూర్తయింది"
fi
#TBD: APCISR not built
#@title **ప్రాథమిక శైలి యొక్క ప్రోగ్రామ్ ఎడిటింగ్(program editing)**
#@markdown సవరించడానికి డబుల్ క్లిక్ చేయండి లేదా __enter__ నొక్కండి
#@markdown <br>
#@markdown Reference:
#@markdown [Face detection with OpenCV and deep learning](https://www.pyimagesearch.com/2018/02/26/face-detection-with-opencv-and-deep-learning/)<br>
#@markdown Double-click or press __Enter__ to edit
%%writefile hindawi2020/Hindawi/soochee/myPY.uhin
దిగుమతి imutils
దిగుమతి numpy వంటి np
దిగుమతి cv2
నుండి google.colab.patches దిగుమతి cv2_imshow
నుండి IPython.dఉందిplay దిగుమతి dఉందిplay, Javవంటిcript
నుండి google.colab.output దిగుమతి eval_js
నుండి bవంటిe64 దిగుమతి b64decode
నిర్వచించు take_photo(filename='photo.jpg', quality=0.8):
js = Javవంటిcript('''
అసమకాలీక function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.dఉందిplay = 'block';
const stream = ఎదురు navigatలేదా.mediaDevices.getUserMedia({video: నిజం});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
ఎదురు video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, నిజం);
// Wait కోసం Capture to be clicked.
ఎదురు new Promఉందిe((resolve) => capture.onclick = resolve);
const canvవంటి = document.createElement('canvas');
canvవంటి.width = video.videoWidth;
canvవంటి.height = video.videoHeight;
canvవంటి.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
తిరిగి canvవంటి.toDataURL('image/jpeg', quality);
}
''')
dఉందిplay(js)
data = eval_js('takePhoto({})'.కోసంmat(quality))
bలోary = b64decode(data.split(',')[1])
తో open(filename, 'wb') వంటి f:
f.write(bలోary)
తిరిగి filename
image_file = take_photo()
#image = cv2.imread(image_file, cv2.IMREAD_UNCHANGED)
image = cv2.imread(image_file)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
(h, w) = image.shape[:2]
#ముద్రించు(w,h)
#cv2_imshow(image)
ముద్రించు("[INFO] loading model...")
prototxt = 'hindawi2020/Hindawi/soochee/deploy.prototxt'
moతొలగించు = "hindawi2020/Hindawi/soochee/res10_300x300_ssd_iter_140000.caffemodel"
net = cv2.dnn.readNetFromCaffe(prototxt, moతొలగించు)
# resize it to have a maximum width of 400 pixels
image = imutils.resize(image, width=400)
blob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))
ముద్రించు("[INFO] computing object detections...")
net.setInput(blob)
detections = net.కోసంward()
కోసం i లో range(0, detections.shape[2]):
# extract the confidence (i.e., probability) వంటిsociated తో the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensurలోg the `confidence` ఉంది
# greater than the mలోimum confidence threshold
ఉంటే confidence > 0.5:
# compute the (x, y)-coలేదాdలోates of the boundలోg box కోసం the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.వంటిtype("int")
# draw the boundలోg box of the face along తో the వంటిsociated probability
text = "{:.2f}%".కోసంmat(confidence * 100)
y = startY - 10 ఉంటే startY - 10 > 10 వేరే startY + 10
cv2.rectangle(image, (startX, startY), (endX, endY), (0, 0, 255), 2)
cv2.putText(image, text, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
cv2_imshow(image)
%%shell
#@title ### **స్టైల్ ప్రైమరీ యొక్క ప్రోగ్రామ్ ఎగ్జిక్యూషన్(program execution)**
#@markdown ### ఈ నోట్బుక్ యొక్క ఫైల్ బ్రౌజర్ ఉపయోగించి కంపైల్డ్ ప్రోగ్రామ్ను డౌన్లోడ్ చేసుకోవచ్చు. ఫైల్ hin.exe
#@markdown <br>
#Do not edit below this line
#ఈ పంక్తి క్రింద సవరించవద్దు
cd hindawi2020/Hindawi/soochee/
wget -q -N https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
wget -q -N https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel
#./hinpy myPY.uhin
echo '# coding=utf-8' >hintmp.py
cat myPY.uhin | ../../Romenagri/flatten_uni_dev | iconv -t utf16 | uni2acii | acii2cf | h2py | acii2uni | iconv -futf16 | ../../Romenagri/fltr_hi_te >>hintmp.py
%run hindawi2020/Hindawi/soochee/hintmp.py
| 0.241668 | 0.388618 |
```
import sys
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pymysql
import config
%matplotlib inline
import seaborn as sns
conn = pymysql.connect(config.host, user=config.username,port=config.port,
passwd=config.password)
#gather all historical data to build model
RideWaits = pd.read_sql_query("call DisneyDB.RideWaitQuery('2,7,8,9')", conn)
```
# Disney Wait Times
This Jupyter notebook will serve as some exploratory analysis amongst wait times to gain any introductory information as to the relationship between wait times and other categories. This will also prove as a place to show the transformations that the data frame will undergo to gain some of the other wanted features that I believe to be useful in this exercise.
```
RideWaits
RideWaits["RideId"] = pd.Categorical(RideWaits["RideId"])
#RideWaits["Status"] = pd.Categorical(RideWaits["Status"])
RideWaits["ParkId"] = pd.Categorical(RideWaits["ParkId"])
RideWaits["Tier"] = pd.Categorical(RideWaits["Tier"])
RideWaits["ParkName"] = pd.Categorical(RideWaits["ParkName"])
RideWaits["IntellectualProp"] = pd.Categorical(RideWaits["IntellectualProp"])
RideWaits["Date"] = pd.to_datetime(RideWaits["Date"], infer_datetime_format = True)
RideWaits["OpeningDate"] = pd.to_datetime(RideWaits["OpeningDate"], infer_datetime_format = True)
RideWaits["Time"] = pd.to_datetime(RideWaits["Time"], format = '%H:%M').dt.time
RideWaits["ParkOpen"] = pd.to_datetime(RideWaits["ParkOpen"], format = '%I:%M %p').dt.strftime('%H:%M')
RideWaits["ParkOpen"] = pd.to_datetime(RideWaits["ParkOpen"], format = '%H:%M').dt.time
RideWaits["ParkClose"] = pd.to_datetime(RideWaits["ParkClose"], format = '%I:%M %p').dt.strftime('%H:%M')
RideWaits["ParkClose"] = pd.to_datetime(RideWaits["ParkClose"], format = '%H:%M').dt.time
RideWaits["DayOfWeek"] = [datetime.weekday(x) for x in RideWaits["Date"]]
RideWaits["EMHOpen"] = pd.to_datetime(RideWaits["EMHOpen"], format = '%I:%M %p', errors = 'coerce').dt.strftime('%H:%M')
RideWaits["EMHClose"] = pd.to_datetime(RideWaits["EMHClose"], format = '%I:%M %p', errors = 'coerce').dt.strftime('%H:%M')
RideWaits["EMHOpen"] = pd.to_datetime(RideWaits["EMHOpen"], format = '%H:%M', errors = 'coerce').dt.time
RideWaits["EMHClose"] = pd.to_datetime(RideWaits["EMHClose"], format = '%H:%M', errors = 'coerce').dt.time
RideWaits["Weekend"] = [0 if x == 0 or x == 1 or x ==2 or x==3 or x==4 else 1 for x in RideWaits["DayOfWeek"]]
RideWaits["Weekend"].value_counts()
RideWaits["CharacterExperience"] = [1 if ("Meet" in x) or ("Encounter" in x) or ("Visa" in x) else 0 for x in RideWaits["Name"]]
RideWaits["Date"].value_counts()
RideWaits
```
The transformations above get the data ready for analysis, however there may exist some times outside of our viable windows. This next loop should remove all those times, as well as let us know if a time is in Extra Magic hours or not.
```
validTime = []
inEMH = []
emhDay = []
timeSinceStart = []
timeSinceMidDay = []
magicHourType = []
timeSinceOpenMinutes = []
for index, row in RideWaits.iterrows():
#print(row)
tempTime = datetime.now()
cTime = row["Time"]
pOpen = row["ParkOpen"]
pClose = row["ParkClose"]
currentParkTime = tempTime.replace(hour = cTime.hour, minute = cTime.minute, second = 0, microsecond = 0)
parkOpen = tempTime.replace(hour = pOpen.hour, minute = pOpen.minute, second = 0, microsecond = 0)
parkClose = tempTime.replace(hour = pClose.hour, minute = pClose.minute, second = 0, microsecond = 0)
if parkClose < parkOpen:
parkClose = parkClose.replace(day = parkClose.day + 1)
if (pd.notnull(row["EMHOpen"])) & (pd.notnull(row["EMHClose"])):
eOpen = row["EMHOpen"]
#print(eOpen)
eClose = row["EMHClose"]
#print(eClose)
emhOpen = tempTime.replace(hour = eOpen.hour, minute = eOpen.minute, second = 0, microsecond = 0)
emhClose = tempTime.replace(hour = eClose.hour, minute = eClose.minute, second = 0, microsecond = 0)
if emhClose < emhOpen:
emhClose = emhClose.replace(day = emhClose.day + 1)
emh = "ok"
emhDay.append(1)
if emhClose.hour == parkOpen.hour:
magicHourType.append("Morning")
else:
magicHourType.append("Night")
else:
emh = "none"
emhDay.append(0)
magicHourType.append("None")
#print(emh)
if (currentParkTime < parkClose) & (currentParkTime > parkOpen):
#print("Current Time is: " + str(currentParkTime) + " and ParkHours are "+ str(parkOpen) +" to " + str(parkClose) + " " +str(validtime))
tSinceOpen = currentParkTime.hour - parkOpen.hour
tSinceOpenMinutes = currentParkTime - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
if currentParkTime.hour < parkOpen.hour:
tSinceOpen = currentParkTime.hour + 24 - parkOpen.hour
tSinceOpenMinutes = currentParkTime.replace(day = currentParkTime.day + 1) - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14 + 24)
validTime.append(1)
inEMH.append(0)
else:
if (emh == "ok"):
if (currentParkTime < emhClose) & (currentParkTime > emhOpen):
validTime.append(1)
inEMH.append(1)
if (emhClose.hour == parkOpen.hour):
tSinceOpen = currentParkTime.hour - emhOpen.hour
tSinceOpenMinutes = currentParkTime - emhOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
else:
if currentParkTime.hour < parkOpen.hour:
tSinceOpen = currentParkTime.hour + 24 - parkOpen.hour
tSinceOpenMinutes = currentParkTime.replace(day = currentParkTime.day + 1) - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14 + 24)
else:
tSinceOpen = currentParkTime.hour - parkOpen.hour
tSinceOpenMinutes = currentParkTime - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
else:
validTime.append(0)
inEMH.append(0)
else:
validTime.append(0)
inEMH.append(0)
timeSinceStart.append(tSinceOpen)
timeSinceMidDay.append(tSinceMidDay)
timeSinceOpenMinutes.append(tSinceOpenMinutes)
RideWaits["inEMH"] = inEMH
RideWaits["validTime"] = validTime
RideWaits["EMHDay"] = emhDay
RideWaits["TimeSinceOpen"] = timeSinceStart
RideWaits["TimeSinceMidday"] = timeSinceMidDay
RideWaits["MagicHourType"] = magicHourType
RideWaits["MinutesSinceOpen"] = [x.total_seconds()/60 for x in timeSinceOpenMinutes]
#RideWaits["SimpleStatus"] = pd.Categorical(RideWaits["SimpleStatus"])
RideWaits = RideWaits[RideWaits["validTime"] == 1]
RideWaits["Month"] = RideWaits["Date"].dt.month
RideWaits["TimeSinceRideOpen"] = (RideWaits["Date"] - RideWaits["OpeningDate"]).dt.days
```
The features above are generally transformations of the data with the exception of Day of the Week where that is extracted from the date object.
## Relationships between Different Categories and Wait Time
In this section we will explore the relationship directly to different group categories as shown below.
* Day of Week
* Ride Name
* Time of Day
* Park
* Ride/Time of Day
* Park/Time of Day
* Tier/Time of Day
* If it's an extra magic hour day
* In Extra Magic hours or not
```
RideWaits["Wait"].describe()
RideWaits["DayOfWeek"].value_counts()
RideWaits.groupby('DayOfWeek')['Wait'].mean()
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('DayOfWeek')['Wait'].mean().plot(ax = ax)
plt.show()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "DayOfWeek", y = "Wait", data = RideWaits)
plt.show()
RideWaits.groupby('Name')['Wait'].mean()
```
This extremely large data frame may not be particularly useful by itself. We'll dive into these figures in another cell later on.
```
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('Time')['Wait'].mean().plot(ax = ax)
plt.show()
```
We see that the wait time follows a general curve through the day, signaling that time of day is a huge factor in the wait times at the park. The wait time increases to a point, roughly 14:00, then decreases from that point to the end of the day. It mirrors a y = -|x| situation. We can attempt to recreate this by creating a value that emulates the time since midday, signaling midday as 14:00.
```
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('MinutesSinceOpen')['Wait'].mean().plot(ax = ax)
plt.show()
RideWaits.corr()["Wait"]["TimeSinceMidday"]
RideWaits.plot(x = "TimeSinceMidday", y = "Wait", kind = "scatter")
```
It's generally hard to see if there is any sort of trend with every single point. If we group by the number of hours since midday perhaps we can see a general trend.
```
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('TimeSinceMidday')['Wait'].mean().plot(ax = ax)
plt.show()
RideWaits.groupby('TimeSinceMidday')['Wait'].describe()
```
As suspected, the wait time decreases as the time moves away from the midpoint of the day.
```
RideWaits.groupby('ParkName')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "ParkName", y = "Wait", data = RideWaits)
plt.show()
```
We see quickly from this box and whisker plot as well as with the described data frame above that Epcot and Hollywood studios have nearly identical wait times, Animal Kingdom has the largest Interquartile range and the highest mean. This may be due to the addition of two new rides that have been recently experiencing some dramatically high wait times. This leads me to believe that 'newness' of ride contributes to wait time. This leads to that being an engineered feature later on.
```
from scipy import stats
t,p = stats.ttest_ind(RideWaits["Wait"], RideWaits[RideWaits["ParkName"]=="Disneys Animal Kingdom Theme Park"]["Wait"])
print(t, p)
```
This means that a ride in Animal kingdom will have a greater mean wait time than an average ride in Walt Disney World. This is most likely due to the addition of Pandora.
```
rideSummaryStats = RideWaits.groupby('Name')['Wait'].describe()
rideSummaryStats.head(15)
RideWaits.groupby('Tier')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "Tier", y = "Wait", data = RideWaits)
plt.show()
```
This follows mostly what our preconceived notions would be:
Super headliner attractions dominate the wait times followed by headlines. This leads to wanting inspect the data some more to understand what differentiates between headliner and major attraction or major attraction and minor attraction as those two have nearly identical wait times.
```
RideWaits.groupby('EMHDay')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "EMHDay", y = "Wait", data = RideWaits)
plt.show()
t, p = stats.ttest_ind(RideWaits[RideWaits["EMHDay"] == 0]["Wait"], RideWaits[RideWaits["EMHDay"]==1]["Wait"])
print(t)
print(p)
```
The working assumption in most Disney Park planning sites is that if you don't need to go to a park when it has extra magic hours, don't because it will be more packed. We see that there is statistical significance in the difference and can say with confidence that Extra Magic hours increase the wait time.
```
RideWaits.groupby('inEMH')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "inEMH", y = "Wait", data = RideWaits)
plt.show()
t, p = stats.ttest_ind(RideWaits[RideWaits["inEMH"] == 0]["Wait"], RideWaits[RideWaits["inEMH"]==1]["Wait"])
print(t)
print(p)
```
Again this goes with typical thought. If you ride attractions during extra magic hours, you will be subject to lower wait times than those outsie of extra magic hours. We see a difference of wait time by 14 minutes. We see that our p value is significantly small, much below our threshold. We can say with statistical significance that the mean wait time while in extra magic hours is lower than outside of extra magic hours.
```
RideWaits.groupby('MagicHourType')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "MagicHourType", y = "Wait", data = RideWaits)
plt.show()
```
## Descriptive Plots
Below we have some plots that show all the rides grouped by park. First the wait average wait times by ride and time slot are shown, followed by the average wait times by tier and time slot.
```
fig,ax = plt.subplots(figsize = (20,10))
RideWaits.groupby(['Time','ParkName'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits.groupby(['TimeSinceOpen','ParkName'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
```
By normalizing across the parks for 'TimeSinceOpen' we get ride of some of the noise potentially associated with minor fluctuations per 15 minutes as well as different opening times across the parks. We also see a single spike in Magic Kingdom times before park open, that is clearly associated with a high popularity ride being open during Extra Magic Hours.
```
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Magic Kingdom Park"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="EpCot"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Hollywood Studios"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Animal Kingdom Theme Park"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Magic Kingdom Park"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="EpCot"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Hollywood Studios"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Animal Kingdom Theme Park"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
```
# Investigating High Wait Time Attractions
```
rideSummaryStats = rideSummaryStats.sort_values(by = ['mean'], ascending = False)
topWaits = rideSummaryStats.iloc[:10]
topWaits
```
We can investigate these high wait time rides and see what commonalities they share.
```
topRides = RideWaits[RideWaits["Name"].isin(topWaits.index)]
information = topRides.groupby(['Name','ParkName','OpeningDate']).size().reset_index()
information
information["ParkName"].value_counts()
```
We see that the majority of our rides with long waits, 6 of the 10 are either in Magic Kingdom or Animal Kingdom. The Animal Kingdom Rides are the relatively new Avatar Rides in addition to The Kilimanjaro Safaris. Only Two of these rides I would consider as legacy experiences with Peter Pans Flight and Space Mountain. The opening dates of so many of these rides being after 1995 urges the inclusion of a feature that captures the time since the opening of the ride.
```
RideWaits.corr()["Wait"]["TimeSinceRideOpen"]
```
We see that there is a negative correlation which is what we expect here, as the novelty of a ride wears off, the wait should go down. However this doesn't capture if a ride is a classic, or if it was open the day the park first opened.
```
fig,ax = plt.subplots(figsize = (20,10))
topRides.groupby(["Time","Name"])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
```
The first thing that sticks out in this graph is the solid line that represents the character experience of Cinderella and other Princesses. Perhaps there should a feature that incorporates whether the experience is a character experience or not. Or, more generally, to what branch of IP (intellectual property) does this experience belong? This would help answer the question if rides associated with Princesses tend to have longer waits than rides that do not have a specific associated property image, like Jungle Cruise or the Haunted Mansion.
## Weather
Up until this point we have analyzed rides without a consideration towards the weather at the time. Orlando has common surprise thunderstorms that may drop wait times as people rush indoors, or it may suddenly increase wait times towards dark rides to avoid the outside. These are things we can look at in principal, then later when they are incorporated into a machine learning model.
```
RideWaits["Status"].value_counts()
RideWaits.groupby('Status')["Wait"].describe()
```
Initially we have had no conditions with rain, so we can't analyze if rain is or is not having an effect. We see, though that fog does have some effect, however fog mainly only occurs in the morning so that may be more a time constraint than directly related to fog.
```
RideWaits.groupby('SimpleStatus')["Wait"].describe()
```
We also have a simpler status to be able to group some weather statuses more easily. We see here again that Fog is the only substantial difference, but again is this due to morning hours or the fog.
We should now look at the temperature to see if there is any correlation between temperature and wait times.
```
RideWaits.plot(x = "Temperature", y = "Wait", kind = "scatter")
RideWaits.corr()["Wait"]["Temperature"]
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('Temperature')['Wait'].mean().plot(ax = ax)
plt.show()
```
We see almost the reverse effect we would expect to see with Temperature. I would anticipate that heat would cause people to leave the parks, but, since heat occurs in the middle of the day when the parks are most busy we see that this is presenting itself as a positive correlation, albeit not a strong one, between wait time and temperature. This again leads me to want to capture if a ride is outdoors or a dark ride, as well as whether the queue is outdoors or indoors.
These all lead to an understanding that Time of Day may be the most important factor in generalizing a wait time, in addition to the specific ride.
It may be easier to view Temperature as a daily high and the average wait time that day, as we can isolate temperature from the time of day considerations and pair all other aspects, such as day of week. This way the comparison will be more apples to apples and we can remove for potentially confounding variables.
## Intellectual Property
Disney owns a vast array of different Intellectual property that certain rides take advantage of. This is seen with the upcoming additions of Star Wars Land and Toy Story Land. The thought is that certain IP create very popular rides and will increase wait times.
```
RideWaits.groupby('IntellectualProp')["Wait"].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "IntellectualProp", y = "Wait", data = RideWaits)
plt.show()
```
This doesn't tell much of a story. It's hard to tell if the Intellectual property is causing Avatar to have such high wait times, or is it the tier of the ride, or is it the newness of those rides. All of these factors may be playing a part in the high wait times associated with Avatar related properties.
### Character Experiences
Character experiences are another type of experience that may effect wait times. Most are devoted to showing off Disney's favorite characters and because of this the wait times for these attractions will tend to soar.
```
RideWaits.groupby('CharacterExperience')["Wait"].describe()
RideWaits[RideWaits["CharacterExperience"] == 1]["Name"].unique()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "CharacterExperience", y = "Wait", data = RideWaits)
plt.show()
t, p = stats.ttest_ind(RideWaits[RideWaits["CharacterExperience"] == 0]["Wait"], RideWaits[RideWaits["CharacterExperience"]==1]["Wait"])
print(t)
print(p)
```
Again there is statistical Significance to suggest that a non character experience has a longer wait time than a character experience, leading us to believe that this would be an important aspect to include in a model.
## Looking at the cyclical nature of one ride
We can get some good nature of the cyclical nature of wait times by looking at just a few rides to see how they operate cyclically. I'm going to choose 1 older standard roller coaster, a new roller coaster, and one character experience to see how they manage cyclically.
```
big_thunder = RideWaits[RideWaits['Name'] == "Big Thunder Mountain Railroad"]
#let's look at a specific two week period to see if there is a weekly eb and flow and an hourly ebb and flow.
big_thunder_recent = big_thunder.tail(400)
big_thunder_recent
plt.subplots(figsize = (20,10))
big_thunder_recent = big_thunder_recent.reset_index()
plt.plot(big_thunder_recent['Wait'])
#there is a cyclical pattern to these rides that can be tracked every day. There also exists a larger cyclical weekly pattern
slinky_dog = RideWaits[RideWaits['Name'] == "Slinky Dog Dash"]
slinky_dog
slinky_dog_recent = slinky_dog.tail(200)
plt.subplots(figsize = (20,10))
plt.plot(slinky_dog_recent['Wait'])
slinky_one_day = slinky_dog[slinky_dog['Date'].isin(["2018-10-12","2018-10-13","2018-10-14"])]
slinky_one_day
plt.subplots(figsize = (20,10))
slinky_one_day = slinky_one_day.reset_index()
plt.plot(slinky_one_day['Wait'])
slinky_one_day=slinky_one_day.reset_index()
slinky_one_day
#we see some predictable cyclical nature of each daily period. This will help us make predictions on wait times in the future.
#further this means we can use past times to predict forward times, and implement a shift. So the time at 2pm is influenced by
#the time at 1pm
```
|
github_jupyter
|
import sys
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import pymysql
import config
%matplotlib inline
import seaborn as sns
conn = pymysql.connect(config.host, user=config.username,port=config.port,
passwd=config.password)
#gather all historical data to build model
RideWaits = pd.read_sql_query("call DisneyDB.RideWaitQuery('2,7,8,9')", conn)
RideWaits
RideWaits["RideId"] = pd.Categorical(RideWaits["RideId"])
#RideWaits["Status"] = pd.Categorical(RideWaits["Status"])
RideWaits["ParkId"] = pd.Categorical(RideWaits["ParkId"])
RideWaits["Tier"] = pd.Categorical(RideWaits["Tier"])
RideWaits["ParkName"] = pd.Categorical(RideWaits["ParkName"])
RideWaits["IntellectualProp"] = pd.Categorical(RideWaits["IntellectualProp"])
RideWaits["Date"] = pd.to_datetime(RideWaits["Date"], infer_datetime_format = True)
RideWaits["OpeningDate"] = pd.to_datetime(RideWaits["OpeningDate"], infer_datetime_format = True)
RideWaits["Time"] = pd.to_datetime(RideWaits["Time"], format = '%H:%M').dt.time
RideWaits["ParkOpen"] = pd.to_datetime(RideWaits["ParkOpen"], format = '%I:%M %p').dt.strftime('%H:%M')
RideWaits["ParkOpen"] = pd.to_datetime(RideWaits["ParkOpen"], format = '%H:%M').dt.time
RideWaits["ParkClose"] = pd.to_datetime(RideWaits["ParkClose"], format = '%I:%M %p').dt.strftime('%H:%M')
RideWaits["ParkClose"] = pd.to_datetime(RideWaits["ParkClose"], format = '%H:%M').dt.time
RideWaits["DayOfWeek"] = [datetime.weekday(x) for x in RideWaits["Date"]]
RideWaits["EMHOpen"] = pd.to_datetime(RideWaits["EMHOpen"], format = '%I:%M %p', errors = 'coerce').dt.strftime('%H:%M')
RideWaits["EMHClose"] = pd.to_datetime(RideWaits["EMHClose"], format = '%I:%M %p', errors = 'coerce').dt.strftime('%H:%M')
RideWaits["EMHOpen"] = pd.to_datetime(RideWaits["EMHOpen"], format = '%H:%M', errors = 'coerce').dt.time
RideWaits["EMHClose"] = pd.to_datetime(RideWaits["EMHClose"], format = '%H:%M', errors = 'coerce').dt.time
RideWaits["Weekend"] = [0 if x == 0 or x == 1 or x ==2 or x==3 or x==4 else 1 for x in RideWaits["DayOfWeek"]]
RideWaits["Weekend"].value_counts()
RideWaits["CharacterExperience"] = [1 if ("Meet" in x) or ("Encounter" in x) or ("Visa" in x) else 0 for x in RideWaits["Name"]]
RideWaits["Date"].value_counts()
RideWaits
validTime = []
inEMH = []
emhDay = []
timeSinceStart = []
timeSinceMidDay = []
magicHourType = []
timeSinceOpenMinutes = []
for index, row in RideWaits.iterrows():
#print(row)
tempTime = datetime.now()
cTime = row["Time"]
pOpen = row["ParkOpen"]
pClose = row["ParkClose"]
currentParkTime = tempTime.replace(hour = cTime.hour, minute = cTime.minute, second = 0, microsecond = 0)
parkOpen = tempTime.replace(hour = pOpen.hour, minute = pOpen.minute, second = 0, microsecond = 0)
parkClose = tempTime.replace(hour = pClose.hour, minute = pClose.minute, second = 0, microsecond = 0)
if parkClose < parkOpen:
parkClose = parkClose.replace(day = parkClose.day + 1)
if (pd.notnull(row["EMHOpen"])) & (pd.notnull(row["EMHClose"])):
eOpen = row["EMHOpen"]
#print(eOpen)
eClose = row["EMHClose"]
#print(eClose)
emhOpen = tempTime.replace(hour = eOpen.hour, minute = eOpen.minute, second = 0, microsecond = 0)
emhClose = tempTime.replace(hour = eClose.hour, minute = eClose.minute, second = 0, microsecond = 0)
if emhClose < emhOpen:
emhClose = emhClose.replace(day = emhClose.day + 1)
emh = "ok"
emhDay.append(1)
if emhClose.hour == parkOpen.hour:
magicHourType.append("Morning")
else:
magicHourType.append("Night")
else:
emh = "none"
emhDay.append(0)
magicHourType.append("None")
#print(emh)
if (currentParkTime < parkClose) & (currentParkTime > parkOpen):
#print("Current Time is: " + str(currentParkTime) + " and ParkHours are "+ str(parkOpen) +" to " + str(parkClose) + " " +str(validtime))
tSinceOpen = currentParkTime.hour - parkOpen.hour
tSinceOpenMinutes = currentParkTime - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
if currentParkTime.hour < parkOpen.hour:
tSinceOpen = currentParkTime.hour + 24 - parkOpen.hour
tSinceOpenMinutes = currentParkTime.replace(day = currentParkTime.day + 1) - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14 + 24)
validTime.append(1)
inEMH.append(0)
else:
if (emh == "ok"):
if (currentParkTime < emhClose) & (currentParkTime > emhOpen):
validTime.append(1)
inEMH.append(1)
if (emhClose.hour == parkOpen.hour):
tSinceOpen = currentParkTime.hour - emhOpen.hour
tSinceOpenMinutes = currentParkTime - emhOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
else:
if currentParkTime.hour < parkOpen.hour:
tSinceOpen = currentParkTime.hour + 24 - parkOpen.hour
tSinceOpenMinutes = currentParkTime.replace(day = currentParkTime.day + 1) - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14 + 24)
else:
tSinceOpen = currentParkTime.hour - parkOpen.hour
tSinceOpenMinutes = currentParkTime - parkOpen
tSinceMidDay = abs(currentParkTime.hour - 14)
else:
validTime.append(0)
inEMH.append(0)
else:
validTime.append(0)
inEMH.append(0)
timeSinceStart.append(tSinceOpen)
timeSinceMidDay.append(tSinceMidDay)
timeSinceOpenMinutes.append(tSinceOpenMinutes)
RideWaits["inEMH"] = inEMH
RideWaits["validTime"] = validTime
RideWaits["EMHDay"] = emhDay
RideWaits["TimeSinceOpen"] = timeSinceStart
RideWaits["TimeSinceMidday"] = timeSinceMidDay
RideWaits["MagicHourType"] = magicHourType
RideWaits["MinutesSinceOpen"] = [x.total_seconds()/60 for x in timeSinceOpenMinutes]
#RideWaits["SimpleStatus"] = pd.Categorical(RideWaits["SimpleStatus"])
RideWaits = RideWaits[RideWaits["validTime"] == 1]
RideWaits["Month"] = RideWaits["Date"].dt.month
RideWaits["TimeSinceRideOpen"] = (RideWaits["Date"] - RideWaits["OpeningDate"]).dt.days
RideWaits["Wait"].describe()
RideWaits["DayOfWeek"].value_counts()
RideWaits.groupby('DayOfWeek')['Wait'].mean()
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('DayOfWeek')['Wait'].mean().plot(ax = ax)
plt.show()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "DayOfWeek", y = "Wait", data = RideWaits)
plt.show()
RideWaits.groupby('Name')['Wait'].mean()
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('Time')['Wait'].mean().plot(ax = ax)
plt.show()
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('MinutesSinceOpen')['Wait'].mean().plot(ax = ax)
plt.show()
RideWaits.corr()["Wait"]["TimeSinceMidday"]
RideWaits.plot(x = "TimeSinceMidday", y = "Wait", kind = "scatter")
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('TimeSinceMidday')['Wait'].mean().plot(ax = ax)
plt.show()
RideWaits.groupby('TimeSinceMidday')['Wait'].describe()
RideWaits.groupby('ParkName')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "ParkName", y = "Wait", data = RideWaits)
plt.show()
from scipy import stats
t,p = stats.ttest_ind(RideWaits["Wait"], RideWaits[RideWaits["ParkName"]=="Disneys Animal Kingdom Theme Park"]["Wait"])
print(t, p)
rideSummaryStats = RideWaits.groupby('Name')['Wait'].describe()
rideSummaryStats.head(15)
RideWaits.groupby('Tier')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "Tier", y = "Wait", data = RideWaits)
plt.show()
RideWaits.groupby('EMHDay')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "EMHDay", y = "Wait", data = RideWaits)
plt.show()
t, p = stats.ttest_ind(RideWaits[RideWaits["EMHDay"] == 0]["Wait"], RideWaits[RideWaits["EMHDay"]==1]["Wait"])
print(t)
print(p)
RideWaits.groupby('inEMH')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "inEMH", y = "Wait", data = RideWaits)
plt.show()
t, p = stats.ttest_ind(RideWaits[RideWaits["inEMH"] == 0]["Wait"], RideWaits[RideWaits["inEMH"]==1]["Wait"])
print(t)
print(p)
RideWaits.groupby('MagicHourType')['Wait'].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "MagicHourType", y = "Wait", data = RideWaits)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits.groupby(['Time','ParkName'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits.groupby(['TimeSinceOpen','ParkName'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Magic Kingdom Park"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="EpCot"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Hollywood Studios"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Animal Kingdom Theme Park"].groupby(['Time','Name'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Magic Kingdom Park"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="EpCot"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Hollywood Studios"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
fig,ax = plt.subplots(figsize = (20,10))
RideWaits[RideWaits["ParkName"]=="Disneys Animal Kingdom Theme Park"].groupby(['Time','Tier'])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
rideSummaryStats = rideSummaryStats.sort_values(by = ['mean'], ascending = False)
topWaits = rideSummaryStats.iloc[:10]
topWaits
topRides = RideWaits[RideWaits["Name"].isin(topWaits.index)]
information = topRides.groupby(['Name','ParkName','OpeningDate']).size().reset_index()
information
information["ParkName"].value_counts()
RideWaits.corr()["Wait"]["TimeSinceRideOpen"]
fig,ax = plt.subplots(figsize = (20,10))
topRides.groupby(["Time","Name"])['Wait'].mean().unstack().plot(ax = ax)
plt.show()
RideWaits["Status"].value_counts()
RideWaits.groupby('Status')["Wait"].describe()
RideWaits.groupby('SimpleStatus')["Wait"].describe()
RideWaits.plot(x = "Temperature", y = "Wait", kind = "scatter")
RideWaits.corr()["Wait"]["Temperature"]
fig,ax= plt.subplots(figsize = (20,10))
RideWaits.groupby('Temperature')['Wait'].mean().plot(ax = ax)
plt.show()
RideWaits.groupby('IntellectualProp')["Wait"].describe()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "IntellectualProp", y = "Wait", data = RideWaits)
plt.show()
RideWaits.groupby('CharacterExperience')["Wait"].describe()
RideWaits[RideWaits["CharacterExperience"] == 1]["Name"].unique()
plt.subplots(figsize = (20,10))
ax = sns.boxplot(x = "CharacterExperience", y = "Wait", data = RideWaits)
plt.show()
t, p = stats.ttest_ind(RideWaits[RideWaits["CharacterExperience"] == 0]["Wait"], RideWaits[RideWaits["CharacterExperience"]==1]["Wait"])
print(t)
print(p)
big_thunder = RideWaits[RideWaits['Name'] == "Big Thunder Mountain Railroad"]
#let's look at a specific two week period to see if there is a weekly eb and flow and an hourly ebb and flow.
big_thunder_recent = big_thunder.tail(400)
big_thunder_recent
plt.subplots(figsize = (20,10))
big_thunder_recent = big_thunder_recent.reset_index()
plt.plot(big_thunder_recent['Wait'])
#there is a cyclical pattern to these rides that can be tracked every day. There also exists a larger cyclical weekly pattern
slinky_dog = RideWaits[RideWaits['Name'] == "Slinky Dog Dash"]
slinky_dog
slinky_dog_recent = slinky_dog.tail(200)
plt.subplots(figsize = (20,10))
plt.plot(slinky_dog_recent['Wait'])
slinky_one_day = slinky_dog[slinky_dog['Date'].isin(["2018-10-12","2018-10-13","2018-10-14"])]
slinky_one_day
plt.subplots(figsize = (20,10))
slinky_one_day = slinky_one_day.reset_index()
plt.plot(slinky_one_day['Wait'])
slinky_one_day=slinky_one_day.reset_index()
slinky_one_day
#we see some predictable cyclical nature of each daily period. This will help us make predictions on wait times in the future.
#further this means we can use past times to predict forward times, and implement a shift. So the time at 2pm is influenced by
#the time at 1pm
| 0.074855 | 0.606091 |
# Module 4: Deep Learning - Apprentissage supervisé
* Partie 4.1: Perceptron [[Notebook]](4_1_perceptron.ipynb)
* **Partie 4.2: Perceptron Multi Couches** [[Notebook]](4_2_mlp.ipynb)
* Partie 4.3: Réseaux de neurones convolutionnels [[Notebook]](4_3_cnn.ipynb)
# Partie 4.1: Réseau de neurones
Le perceptron, encore appelé neurone artificiel ou neurone formel est un modèle mathématique qui permet de résoudre des problèmes de classification binaire.
Le perceptron est composé de
* une fonction somme pondérée
* une fonction d'activation
## Importation des données
Le jeu de donnés contient des donnés récuellies chez des patients, il comporte 100 lignes et 9 colonnes. La colonne <i>diagnosis_result</i> est la variable à expliquer qui represente le resultat du diagnostic. Les 8 dernières colonnes représentent les variables explicatives.
```
from sklearn.datasets import fetch_openml
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train.shape
y = dt.iloc[:, 0].values
X = dt.iloc[:, 1:].values
n_features = X.shape[1]
X_train, X_val, y_train, y_val = train_test_split(X, y, stratify=y, test_size=.2)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import RMSprop
model = Sequential()
model.add(Dense(units=8, input_dim=n_features))
model.add(Dense(units=8))
model.add(Dense(units=4))
model.add(Activation(activation='sigmoid'))
model.summary()
tf.keras.utils.plot_model(
model, to_file=None, show_shapes=True, show_layer_names=True,
rankdir='TB'
)
optimizer = RMSprop(learning_rate=1e-4)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.fit(x=X_train, y=y_train, batch_size=10, epochs=10, validation_data=[X_val, y_val])
import numpy as np
test_ = [1., 5, 0.2, 0.14, 1, 0.65, 0.4, 0.4]
proba_ = model.predict_proba(np.array([test_]))[0]
class_ = model.predict_classes(np.array([test_]))[0]
print(f'X: {test_}')
print(f'Proba: {proba_}')
print(f'Class: {class_}')
```
Si la probabilté est supérieure à 0.5 la classe 1 est associée à la donnée sinon la classe 0
### Limites
Un seul neurone ne permet pas de répondre à des problèmes complexes. Ainsi, lorsque les exemples ne peuvent pas
être séparés par une droite, un seul neurone échoue.
Dans le prochain cahier, nous aborderons les réseaux de neurones qui permettent de s'affranchier de cette limite.
**Références**
**Contribuer à ce projet**
Toute contribution de votre part serait vivement appréciée. Si vous souhaiter contribuer à ce projet, merci de consulter ce guide [CONTRIBUTING](CONTRIBUTING.md)
**Signaler un bug**
Si vous avez rencontré un bug durant l'éxecution de ce notebook, pour le signaler, il suffit d'ouvrir une _issue_.
**Sponsors**
* [Rintio]()
* [Solidar'IT]()
**Copyright & Licence**
Sous licence MIT
|
github_jupyter
|
from sklearn.datasets import fetch_openml
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train.shape
y = dt.iloc[:, 0].values
X = dt.iloc[:, 1:].values
n_features = X.shape[1]
X_train, X_val, y_train, y_val = train_test_split(X, y, stratify=y, test_size=.2)
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.optimizers import RMSprop
model = Sequential()
model.add(Dense(units=8, input_dim=n_features))
model.add(Dense(units=8))
model.add(Dense(units=4))
model.add(Activation(activation='sigmoid'))
model.summary()
tf.keras.utils.plot_model(
model, to_file=None, show_shapes=True, show_layer_names=True,
rankdir='TB'
)
optimizer = RMSprop(learning_rate=1e-4)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
model.fit(x=X_train, y=y_train, batch_size=10, epochs=10, validation_data=[X_val, y_val])
import numpy as np
test_ = [1., 5, 0.2, 0.14, 1, 0.65, 0.4, 0.4]
proba_ = model.predict_proba(np.array([test_]))[0]
class_ = model.predict_classes(np.array([test_]))[0]
print(f'X: {test_}')
print(f'Proba: {proba_}')
print(f'Class: {class_}')
| 0.858214 | 0.962462 |
## Making Flux Ratio maps between H$\alpha$ and CO(2-1) from MUSE and ALMA data
### 1. Data Preparation (WCS Alignment & Registration)
I quickly aligned the ALMA and MUSE WCS by eye (best one can do) using ```pyraf```
```
pyraf --> imdel alma_shift.fits
pyraf --> imshift A2597_mom0.fits alma_shift.fits -4 7
pyraf --> wcscopy alma_shift.fits A2597_mom0.fits
```
Note that I'm using the A2597 ALMA Mom0 map from Tim Davis' code, because the spectral and polarization axes have already been collapsed, and their correspoinding WCS axes have been dropped. Besides, you should use it anyway, as this is the most "believable" flux we can use.
This provided good alignment between (a) the brightest knot of CO(2-1) and H$\alpha$ emission and (b) that northwestern blob at +400 km/s. Within the physical resolution of our observations, this alignment is good enough.
On to imports:
```
import os
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.data import get_pkg_data_filename
import numpy as np
from reproject import reproject_interp
import matplotlib.pyplot as plt
%matplotlib inline
def styleplots():
"""
Make plots pretty and labels clear.
"""
plt.style.use('ggplot')
labelsizes = 25
plt.rc('font', family='Arial')
plt.rcParams['font.size'] = labelsizes
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['axes.labelsize'] = labelsizes
plt.rcParams['xtick.labelsize'] = labelsizes
plt.rcParams['ytick.labelsize'] = labelsizes
styleplots()
```
We need to reproject (resample) one image onto the pixel grid of another, trusting the WCS alignment. This can be done [with the reproject package](https://reproject.readthedocs.io/en/stable/).
```
os.listdir('.')
museHDU = fits.open(get_pkg_data_filename('Ha_flux_map.fits'))
almaHDU = fits.open(get_pkg_data_filename('alma_shift.fits'))
museHDU.info()
almaHDU.info()
```
Let's look at their WCS structures
```
w_alma = WCS(almaHDU[0])
w_muse = WCS(museHDU[0])
w_muse
w_alma
```
There are 4 ALMA WCS axes, including the 2 for frequency and polarization (meaningless in a Mom0 image). We need to collapse along both axes, and drop those WCS axes in order to match the MUSE data.
```
alma_3wcs = w_alma.dropaxis(3)
alma_2wcs = alma_3wcs.dropaxis(2)
alma_2wcs
```
So now we're matching the MUSE data. You need to make a fresh header and HDU out of this.
```
newalmaHeader = alma_2wcs.to_header()
newalmaHeader
```
Now make a "new" ALMA HDU with our old data and new header
```
newalmaHDU = fits.PrimaryHDU(data=almaHDU[0].data, header=newalmaHeader)
ax1 = plt.subplot(1,2,1, projection=WCS(museHDU[0].header))
ax1.imshow(museHDU[0].data, origin='lower')
ax1.coords['ra'].set_axislabel('Right Ascension')
ax1.coords['dec'].set_axislabel('Declination')
ax1.set_title('MUSE Original')
ax2 = plt.subplot(1,2,2, projection=alma_2wcs)
ax2.imshow(newalmaHDU.data, origin='lower')
ax2.coords['ra'].set_axislabel('Right Ascension')
ax2.coords['dec'].set_axislabel('Declination')
ax2.set_title('ALMA Original')
registered_alma_data, registered_alma_footprint = reproject_interp(newalmaHDU, museHDU[0].header)
ax1 = plt.subplot(1,2,1, projection=WCS(newalmaHDU.header))
ax1.imshow(registered_alma_data, origin='lower', vmin=-2.e-4, vmax=5.e-4)
ax1.coords.grid(color='white')
ax1.coords['ra'].set_axislabel('Right Ascension')
ax1.coords['dec'].set_axislabel('Declination')
ax1.set_title('Reprojected ALMA image')
ax2 = plt.subplot(1,2,2, projection=WCS(newalmaHDU.header))
ax2.imshow(registered_alma_footprint, origin='lower', vmin=0, vmax=1.5)
ax2.coords.grid(color='white')
ax1.coords['ra'].set_axislabel('Right Ascension')
ax1.coords['dec'].set_axislabel('Declination')
ax2.coords['dec'].set_axislabel_position('r')
ax2.coords['dec'].set_ticklabel_position('r')
ax2.set_title('Reprojected ALMA footprint')
ax1 = plt.subplot(1,2,1, projection=WCS(museHDU[0].header))
ax1.imshow(museHDU[0].data, origin='lower')
ax1.coords['ra'].set_axislabel('Right Ascension')
ax1.coords['dec'].set_axislabel('Declination')
ax1.set_title('MUSE Original')
ax2 = plt.subplot(1,2,2, projection=WCS(newalmaHDU.header))
ax2.imshow(registered_alma_data, origin='lower')
ax2.coords['ra'].set_axislabel('Right Ascension')
ax2.coords['dec'].set_axislabel('Declination')
ax2.set_title('Registered ALMA Image')
```
Finally, convert the 0s in the reprojected ALMA array to NaNs.
```
registered_alma_data[registered_alma_data==0] = np.nan
ax1 = plt.subplot(1,2,1, projection=WCS(museHDU[0].header))
ax1.imshow(museHDU[0].data, origin='lower')
ax1.coords['ra'].set_axislabel('Right Ascension')
ax1.coords['dec'].set_axislabel('Declination')
ax1.set_title('MUSE Original')
ax2 = plt.subplot(1,2,2, projection=WCS(newalmaHDU.header))
ax2.imshow(registered_alma_data, origin='lower')
ax2.coords['ra'].set_axislabel('Right Ascension')
ax2.coords['dec'].set_axislabel('Declination')
ax2.set_title('Registered ALMA Image')
```
## 2. Make functions to do all of this trivially
```
def fusemusealma(muse_moment_fits, alma_moment_fits):
'''
Open MUSE and ALMA HDUs, pick up their WCSs,
drop needless ALMA WCS axes, reproject
ALMA to MUSE, then output new HDUs.
'''
museHDU = fits.open(get_pkg_data_filename(muse_moment_fits))
almaHDU = fits.open(get_pkg_data_filename(alma_moment_fits))
w_alma = WCS(almaHDU[0])
w_muse = WCS(museHDU[0])
alma_3wcs = w_alma.dropaxis(3)
alma_2wcs = alma_3wcs.dropaxis(2)
newalmaHeader = alma_2wcs.to_header()
newalmaHDU = fits.PrimaryHDU(data=almaHDU[0].data, header=newalmaHeader)
registered_alma_data, registered_alma_footprint = reproject_interp(newalmaHDU, museHDU[0].header)
# Make the Zeros NaNs
registered_alma_data[registered_alma_data==0] = np.nan
ax1 = plt.subplot(1,2,1, projection=WCS(museHDU[0].header))
ax1.imshow(museHDU[0].data, origin='lower')
ax1.coords['ra'].set_axislabel('Right Ascension')
ax1.coords['dec'].set_axislabel('Declination')
ax1.set_title('MUSE Original')
ax2 = plt.subplot(1,2,2, projection=WCS(newalmaHDU.header))
ax2.imshow(registered_alma_data, origin='lower')
ax2.coords['ra'].set_axislabel('Right Ascension')
ax2.coords['dec'].set_axislabel('Declination')
ax2.set_title('Registered ALMA Image')
ax1.set_xlim(100, 220)
ax1.set_ylim(100,300)
ax2.set_xlim(100, 220)
ax2.set_ylim(100,300)
plt.show()
print("Successfully reprojected {} to {}".format(alma_moment_fits, muse_moment_fits))
header = w_muse.to_header()
return museHDU[0].data, registered_alma_data, header
#museflux, almaflux, fluxheader = fusemusealma('Ha_flux_map', 'alma_flux_shift.fits')
musevel, almavel, velheader = fusemusealma('Ha_vel_map.fits', 'alma_vel_shift.fits')
velratio = musevel / almavel
velratio_hdu = fits.PrimaryHDU(data=velratio, header=velheader)
velratio_hdu.writeto("velratio.fits", clobber=True)
musedisp, almadisp, dispheader = fusemusealma('Ha_fwhm_map.fits', 'alma_disp_shift.fits')
dispratio = (musedisp/2.35) / almadisp
dispratio_hdu = fits.PrimaryHDU(data=dispratio, header=dispheader)
dispratio_hdu.writeto("dispratio.fits", clobber=True)
musevel
```
#### Quantify distance from a center point
np.argmax(museflux)
```
musedisp[musedisp == 0] = np.nan
almadisp[almadisp == 0] = np.nan
np.nanmean(musevel/almavel)
fig, ax = plt.subplots(figsize=(10,10))
styleplots()
#mask = (musevel > 0) & (almavel>0)
#frame1 = ax.scatter(musevel-np.nanmedian(musevel), almavel, c=almadisp, cmap='viridis', s=25)
frame1 = ax.scatter(musevel-np.nanmedian(musevel), musevel-np.nanmedian(musevel), c=almadisp, cmap='viridis', s=25)
#frame1 = ax.scatter(velratio, dispratio)
#frame2 = ax.scatter(musedisp, almadisp, c=musevel, cmap='plasma', s=22)
frame2 = ax.scatter(musedisp/2.35, almadisp, c=musevel, cmap='plasma', s=22)
ax.set_ylabel("ALMA CO(2-1) Velocity")
ax.set_xlabel(r"MUSE H$\alpha$ Velocity")
x = np.arange(-500,500)
y = np.arange(-500,500)
ax.fill_between(x, y, y/2, facecolor='gray', alpha=0.4)
ax.plot(x,y, label="1:1")
ax.plot(x,y/2, label="1:2")
ax.plot(x,y/3, label="1:3")
ax.plot(x,y/4, label="1:4")
ax.set_ylim(-200,200)
ax.set_xlim(-200,200)
ax.legend()
plt.show()
from scipy.stats import kendalltau
import seaborn as sns
fig, ax = plt.subplots(figsize=(12,12))
styleplots()
#sns.set()
#sns.jointplot(musevel, almavel, kind="hex", stat_func=kendalltau, cmap=sns.cubehelix_palette(light=1, as_cmap=True))
lim=300
# sns.jointplot(musevel, almavel, kind="hex", cmap=sns.cubehelix_palette(light=1, as_cmap=True), xlim=(-lim,lim), ylim=(-lim,lim))
cmap1 = sns.cubehelix_palette(light=1, as_cmap=True)
cmap2 = sns.cubehelix_palette(start=2.8, rot=.1, as_cmap=True)
# ax.plot(x,y, label="1:1")
# ax.plot(x,y/2, label="1:2")
# #ax.plot(x,y/3, label="1:3")
# ax.plot(x,y/4, label="1:4")
ax.set_xlim(-lim, lim)
ax.set_ylim(-lim, lim)
ax.fill_between(x, y, y/2, facecolor='darkgray', alpha=0.4, label="Ratio of 1:1 - 2:1")
ax.fill_between(x, y/2, y/4, facecolor='gray', alpha=0.4, label="Ratio of 2:1 - 4:1")
ax.legend(prop={'size': 22}, loc=2)
#sns.jointplot(musevel-np.nanmedian(musevel), almavel-np.nanmedian(almavel), ax=ax, kind="kde", cmap=cmap2,xlim=(-lim,lim), ylim=(-lim,lim), kernel='gau', shade_lowest=False, dropna=True)
ax.set_xlabel(r"MUSE H$\alpha$ Velocity & Velocity Dispersion (km s$^{-1}$)")
ax.set_ylabel(r"ALMA CO(2-1) Velocity & Velocity Dispersion (km s$^{-1}$)")
sns.jointplot(musevel-np.nanmedian(musevel), almavel-np.nanmedian(almavel), ax=ax, kind="kde", cmap=cmap2,xlim=(-lim,lim), ylim=(-lim,lim), kernel='gau', shade_lowest=False, dropna=True)
sns.jointplot(musedisp/2.35, almadisp, ax=ax, kind="kde", cmap=cmap1, xlim=(-lim,lim), ylim=(-lim,lim), shade_lowest=False, kernel='gau', shade=True, dropna=True)
ax.text(-40, 110, "Velocity", size=26)
ax.text(130, -40, "Dispersion", size=26)
fig.savefig("muse_alma_kde_plot.pdf", bbox_inches="tight")
#sns.jointplot(musedisp, almadisp, kind="hex", cmap=sns.cubehelix_palette(light=1, as_cmap=True))
import os
os.gcd()
# fig, ax = plt.subplots(figsize=(12,12))
sns.kdeplot(musevel_reshape, almavel_reshape)
sns.set(style="darkgrid")
iris = sns.load_dataset("iris")
# Subset the iris dataset by species
setosa = iris.query("species == 'setosa'")
virginica = iris.query("species == 'virginica'")
# Set up the figure
f, ax = plt.subplots(figsize=(8, 8))
ax.set_aspect("equal")
# Draw the two density plots
ax = sns.kdeplot(musedisp, almadisp,
cmap="Reds", shade=True, shade_lowest=False)
ax = sns.kdeplot(virginica.sepal_width, virginica.sepal_length,
cmap="Blues", shade=True, shade_lowest=False)
# Add labels to the plot
red = sns.color_palette("Reds")[-2]
blue = sns.color_palette("Blues")[-2]
ax.text(2.5, 8.2, "virginica", size=16, color=blue)
ax.text(3.8, 4.5, "setosa", size=16, color=red)
plt.imshow(almavel, origin='lower')
plt.imshow(musevel, origin='lower', cmap='plasma', alpha=0.5)
plt.imshow(dispratio, origin='lower')
```
## 2. Map Making
We now have two registered MUSE and ALMA flux density maps for H$\alpha$ and CO(2-1), respectively.
* The ALMA pixels are in Jy beam$^{-1}$ $\times$ km s$^{-1}$
* The MUSE pixels are in $\times 10^{-20}$ erg s$^{-1}$ cm$^{-2}$ Ang$^{-1}$
```
ax1 = plt.subplot(1,2,1)
ax1.imshow(museHDU[0].data, origin='lower')
ax1.set_xlim(100, 220)
ax1.set_ylim(100,300)
ax2 = plt.subplot(1,2,2)
ax2.imshow(registered_alma_data, origin='lower')
ax2.set_xlim(100, 220)
ax2.set_ylim(100,300)
registered_muse_data = museHDU[0].data
np.nanmax(registered_muse_data) # We need to use np.nanmax to ignore nans
np.nanmax(registered_alma_data)
```
Now you can divide and make the ratio map. This will be especially awesome because of the NaNs - you will automatically crop to the ALMA map, because Number/NaN = NaN. Great!
```
peakratio = np.nanmax(registered_muse_data) / np.nanmax(registered_alma_data)
peakratio
normalized_alma_data = registered_alma_data * peakratio
Ha_to_CO_norm_data = registered_muse_data / normalized_alma_data
Ha_to_CO_norm_wcs_header = w_muse.to_header()
Ha_to_CO_norm_hdu = fits.PrimaryHDU(data=ha_to_co_norm_data, header=ha_to_co_norm_wcs_header)
Ha_to_CO_norm_hdu.writeto("Ha_to_CO_norm.fits", clobber=True)
ax = plt.subplot()
ax.imshow(Ha_to_CO_norm_data, origin='Lower')
ax.set_xlim = (150, 250)
ax.set_ylim = (150, 250)
```
|
github_jupyter
|
Note that I'm using the A2597 ALMA Mom0 map from Tim Davis' code, because the spectral and polarization axes have already been collapsed, and their correspoinding WCS axes have been dropped. Besides, you should use it anyway, as this is the most "believable" flux we can use.
This provided good alignment between (a) the brightest knot of CO(2-1) and H$\alpha$ emission and (b) that northwestern blob at +400 km/s. Within the physical resolution of our observations, this alignment is good enough.
On to imports:
We need to reproject (resample) one image onto the pixel grid of another, trusting the WCS alignment. This can be done [with the reproject package](https://reproject.readthedocs.io/en/stable/).
Let's look at their WCS structures
There are 4 ALMA WCS axes, including the 2 for frequency and polarization (meaningless in a Mom0 image). We need to collapse along both axes, and drop those WCS axes in order to match the MUSE data.
So now we're matching the MUSE data. You need to make a fresh header and HDU out of this.
Now make a "new" ALMA HDU with our old data and new header
Finally, convert the 0s in the reprojected ALMA array to NaNs.
## 2. Make functions to do all of this trivially
#### Quantify distance from a center point
np.argmax(museflux)
## 2. Map Making
We now have two registered MUSE and ALMA flux density maps for H$\alpha$ and CO(2-1), respectively.
* The ALMA pixels are in Jy beam$^{-1}$ $\times$ km s$^{-1}$
* The MUSE pixels are in $\times 10^{-20}$ erg s$^{-1}$ cm$^{-2}$ Ang$^{-1}$
Now you can divide and make the ratio map. This will be especially awesome because of the NaNs - you will automatically crop to the ALMA map, because Number/NaN = NaN. Great!
| 0.567457 | 0.91302 |
# Decision Trees
```
from PIL import Image
import numpy as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from sklearn import datasets, tree
matplotlib.style.use('bmh')
matplotlib.rcParams['figure.figsize']=(10,7)
```
簡易的 決策樹 實驗
使用 Iris dataset
https://zh.wikipedia.org/wiki/安德森鸢尾花卉数据集
```
# windows only hack for graphviz path
import os
for path in os.environ['PATH'].split(os.pathsep):
if path.endswith("Library\\bin"):
os.environ['PATH']+=os.pathsep+os.path.join(path, 'graphviz')
# Iris dataset
iris = datasets.load_iris()
X, y = iris.data, iris.target
clf=tree.DecisionTreeClassifier()
clf.fit(X, y)
iris_feature_names = ["花萼長度", "花萼寬度", "花瓣長度","花瓣寬度"]
iris_target_names = ["山鳶尾", "變色鳶尾", "維吉尼亞鳶尾"]
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=iris_feature_names,
class_names=iris_target_names,
filled=True, rounded=True,
special_characters=True)
import pydot_ng as pydot
from IPython.display import SVG
SVG(pydot.graph_from_dot_data(dot_data).create_svg())
# 只取 X 的兩個 features
X = iris.data[:, [0, 1]]
clf.fit(X, y)
# 邊界
x_min, y_min = X.min(axis=0)-1
x_max, y_max = X.max(axis=0)+1
# 座標點
grid = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
# grid.shape = (2, 200, 200)
# 在座標點 算出 logistic 的預測
Z = clf.predict(grid.reshape(2, -1).T)
Z = Z.reshape(grid.shape[1:])
# 畫出顏色和邊界
plt.pcolormesh(grid[0], grid[1], Z, cmap=plt.cm.rainbow, alpha=0.02)
plt.contour(grid[0], grid[1], Z, colors=['k', 'k', 'k'], linestyles=['-', '-', '-'],
levels=[0, 1, 2])
# 標出 sample 點
plt.scatter(X[:,0], X[:, 1], c=y, cmap=plt.cm.rainbow, zorder=10, s=50);
```
## Q
* 不同的設定
* 不同的圖
* 參考 http://scikit-learn.org/stable/auto_examples/tree/plot_iris.html#sphx-glr-auto-examples-tree-plot-iris-py
## MNIST
```
import gzip
import pickle
with gzip.open('mnist.pkl.gz', 'rb') as f:
train_set, validation_set, test_set = pickle.load(f, encoding='latin1')
train_X, train_y = train_set
test_X, test_y = test_set
#PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=60)
train_X = pca.fit_transform(train_set[0])
test_X = pca.transform(test_set[0])
# use only first 10000 samples
#idx = np.random.choice(np.arange(train_X.shape[0]), 30000, replace=False)
#train_X = train_X[idx]
#train_y = train_y[idx]
clf = tree.DecisionTreeClassifier()
%%timeit -n 1 -r 1
clf.fit(train_X, train_y)
%%timeit -n 1 -r 1
print(np.mean(clf.predict(train_X) == train_y))
%%timeit -n 1 -r 1
print(np.mean(clf.predict(test_X) == test_y))
```
## Q
* 不用 PCA, 直接從圖片像素
* 縮小圖片
```
%%timeit -n 1 -r 1
%run -i q_dtree_halfsize.py
```
|
github_jupyter
|
from PIL import Image
import numpy as np
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from sklearn import datasets, tree
matplotlib.style.use('bmh')
matplotlib.rcParams['figure.figsize']=(10,7)
# windows only hack for graphviz path
import os
for path in os.environ['PATH'].split(os.pathsep):
if path.endswith("Library\\bin"):
os.environ['PATH']+=os.pathsep+os.path.join(path, 'graphviz')
# Iris dataset
iris = datasets.load_iris()
X, y = iris.data, iris.target
clf=tree.DecisionTreeClassifier()
clf.fit(X, y)
iris_feature_names = ["花萼長度", "花萼寬度", "花瓣長度","花瓣寬度"]
iris_target_names = ["山鳶尾", "變色鳶尾", "維吉尼亞鳶尾"]
dot_data = tree.export_graphviz(clf, out_file=None,
feature_names=iris_feature_names,
class_names=iris_target_names,
filled=True, rounded=True,
special_characters=True)
import pydot_ng as pydot
from IPython.display import SVG
SVG(pydot.graph_from_dot_data(dot_data).create_svg())
# 只取 X 的兩個 features
X = iris.data[:, [0, 1]]
clf.fit(X, y)
# 邊界
x_min, y_min = X.min(axis=0)-1
x_max, y_max = X.max(axis=0)+1
# 座標點
grid = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
# grid.shape = (2, 200, 200)
# 在座標點 算出 logistic 的預測
Z = clf.predict(grid.reshape(2, -1).T)
Z = Z.reshape(grid.shape[1:])
# 畫出顏色和邊界
plt.pcolormesh(grid[0], grid[1], Z, cmap=plt.cm.rainbow, alpha=0.02)
plt.contour(grid[0], grid[1], Z, colors=['k', 'k', 'k'], linestyles=['-', '-', '-'],
levels=[0, 1, 2])
# 標出 sample 點
plt.scatter(X[:,0], X[:, 1], c=y, cmap=plt.cm.rainbow, zorder=10, s=50);
import gzip
import pickle
with gzip.open('mnist.pkl.gz', 'rb') as f:
train_set, validation_set, test_set = pickle.load(f, encoding='latin1')
train_X, train_y = train_set
test_X, test_y = test_set
#PCA
from sklearn.decomposition import PCA
pca = PCA(n_components=60)
train_X = pca.fit_transform(train_set[0])
test_X = pca.transform(test_set[0])
# use only first 10000 samples
#idx = np.random.choice(np.arange(train_X.shape[0]), 30000, replace=False)
#train_X = train_X[idx]
#train_y = train_y[idx]
clf = tree.DecisionTreeClassifier()
%%timeit -n 1 -r 1
clf.fit(train_X, train_y)
%%timeit -n 1 -r 1
print(np.mean(clf.predict(train_X) == train_y))
%%timeit -n 1 -r 1
print(np.mean(clf.predict(test_X) == test_y))
%%timeit -n 1 -r 1
%run -i q_dtree_halfsize.py
| 0.376738 | 0.821259 |
```
import keras
from keras import models
from keras.preprocessing import image
import tensorflow as tf
import numpy as np
from keras import backend as K
from keras.applications import vgg16
import matplotlib.pyplot as plt
%matplotlib inline
tf.logging.set_verbosity(tf.logging.ERROR)
model = vgg16.VGG16(weights='imagenet',
include_top=False)
model.summary()
# Load a source image
img = image.load_img("/tmp/workspace/Pictures/cat_1700.jpg", target_size=(150, 150))
plt.imshow(img)
plt.show()
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor = vgg16.preprocess_input(img_tensor)
# Set here the layer name and filter number for analysis
layer_name = 'block1_conv1'
filter_num = 0
# Display raw kernel values of a filter in a convolutional layer
kernel = model.get_layer(layer_name).kernel
c_filter = kernel[:,:,:,filter_num]
print("Shape of filter No. {} in layer {}: {}".format(filter_num, layer_name, c_filter.shape))
print()
print("Raw kernel values:")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(c_filter))
# Display the activation of a single filter in a specific convolutional layer
layer_output = [model.get_layer(layer_name).output]
# Creates a model that will return these outputs, given the model input:
activation_model = models.Model(inputs=model.input, outputs=layer_output)
activation = activation_model.predict(img_tensor)
print("Activation map shape:", activation.shape)
plt.matshow(activation[0, :, :, filter_num])
plt.show()
# Create a map of all filter activations in a specific layer
images_per_row = 16
n_features = activation.shape[-1]
size = activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = activation[0, :, :, col * images_per_row + row]
# Post-process the feature to make it visually palatable
channel_image -= channel_image.mean()
channel_image /= (channel_image.std() + 1e-5)
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size : (col + 1) * size,
row * size : (row + 1) * size] = channel_image
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto')
plt.show()
```
|
github_jupyter
|
import keras
from keras import models
from keras.preprocessing import image
import tensorflow as tf
import numpy as np
from keras import backend as K
from keras.applications import vgg16
import matplotlib.pyplot as plt
%matplotlib inline
tf.logging.set_verbosity(tf.logging.ERROR)
model = vgg16.VGG16(weights='imagenet',
include_top=False)
model.summary()
# Load a source image
img = image.load_img("/tmp/workspace/Pictures/cat_1700.jpg", target_size=(150, 150))
plt.imshow(img)
plt.show()
img_tensor = image.img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis=0)
img_tensor = vgg16.preprocess_input(img_tensor)
# Set here the layer name and filter number for analysis
layer_name = 'block1_conv1'
filter_num = 0
# Display raw kernel values of a filter in a convolutional layer
kernel = model.get_layer(layer_name).kernel
c_filter = kernel[:,:,:,filter_num]
print("Shape of filter No. {} in layer {}: {}".format(filter_num, layer_name, c_filter.shape))
print()
print("Raw kernel values:")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print(sess.run(c_filter))
# Display the activation of a single filter in a specific convolutional layer
layer_output = [model.get_layer(layer_name).output]
# Creates a model that will return these outputs, given the model input:
activation_model = models.Model(inputs=model.input, outputs=layer_output)
activation = activation_model.predict(img_tensor)
print("Activation map shape:", activation.shape)
plt.matshow(activation[0, :, :, filter_num])
plt.show()
# Create a map of all filter activations in a specific layer
images_per_row = 16
n_features = activation.shape[-1]
size = activation.shape[1]
n_cols = n_features // images_per_row
display_grid = np.zeros((size * n_cols, images_per_row * size))
for col in range(n_cols):
for row in range(images_per_row):
channel_image = activation[0, :, :, col * images_per_row + row]
# Post-process the feature to make it visually palatable
channel_image -= channel_image.mean()
channel_image /= (channel_image.std() + 1e-5)
channel_image *= 64
channel_image += 128
channel_image = np.clip(channel_image, 0, 255).astype('uint8')
display_grid[col * size : (col + 1) * size,
row * size : (row + 1) * size] = channel_image
scale = 1. / size
plt.figure(figsize=(scale * display_grid.shape[1],
scale * display_grid.shape[0]))
plt.title(layer_name)
plt.grid(False)
plt.imshow(display_grid, aspect='auto')
plt.show()
| 0.854703 | 0.616041 |
[View in Colaboratory](https://colab.research.google.com/github/ylongqi/openrec/blob/master/tutorials/OpenRec_Tutorial_1.ipynb)
Get Started
---
by *[Longqi@Cornell](http://www.cs.cornell.edu/~ylongqi)* licensed under [Creative Commons Attribution 4.0 International License](https://creativecommons.org/licenses/by/4.0/)
This tutorial demonstrates the process of training and evaluating recommendation algorithms using OpenRec (>=0.2.0):
* Prepare training and evaluation datasets.
* Instantiate samplers for training and evaluation.
* Instantiate a recommender.
* Instantiate evaluators.
* Instantiate a model trainer.
* TRAIN AND EVALUATE!
Prepare training and evaluation datasets
---
* Download your favorite dataset from the web. In this tutorial, we use [a relatively small citeulike dataset](http://www.wanghao.in/CDL.htm) for demonstration purpose.
```
!sudo apt-get install unrar
!pip install openrec
import os
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
urlretrieve('http://www.wanghao.in/data/ctrsr_datasets.rar', 'ctrsr_datasets.rar')
os.system('unrar x ctrsr_datasets.rar')
```
* Convert raw data into [numpy structured array](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.rec.html). As required by the [Dataset](https://github.com/ylongqi/openrec/blob/master/openrec/utils/dataset.py) class, two keys **user_id** and **item_id** are required. Each row in the converted numpy array represents an interaction. The array might contain additional keys based on the use cases.
```
import numpy as np
import random
total_users = 0
interactions_count = 0
with open('ctrsr_datasets/citeulike-a/users.dat', 'r') as fin:
for line in fin:
interactions_count += int(line.split()[0])
total_users += 1
# radomly hold out an item per user for validation and testing respectively.
val_structured_arr = np.zeros(total_users, dtype=[('user_id', np.int32),
('item_id', np.int32)])
test_structured_arr = np.zeros(total_users, dtype=[('user_id', np.int32),
('item_id', np.int32)])
train_structured_arr = np.zeros(interactions_count-total_users * 2,
dtype=[('user_id', np.int32),
('item_id', np.int32)])
interaction_ind = 0
next_user_id = 0
next_item_id = 0
map_to_item_id = dict() # Map item id from 0 to len(items)-1
with open('ctrsr_datasets/citeulike-a/users.dat', 'r') as fin:
for line in fin:
item_list = line.split()[1:]
random.shuffle(item_list)
for ind, item in enumerate(item_list):
if item not in map_to_item_id:
map_to_item_id[item] = next_item_id
next_item_id += 1
if ind == 0:
val_structured_arr[next_user_id] = (next_user_id,
map_to_item_id[item])
elif ind == 1:
test_structured_arr[next_user_id] = (next_user_id,
map_to_item_id[item])
else:
train_structured_arr[interaction_ind] = (next_user_id,
map_to_item_id[item])
interaction_ind += 1
next_user_id += 1
```
* Instantiate training, validation, and testing datasets using the Dataset class.
```
from openrec.utils import Dataset
train_dataset = Dataset(raw_data=train_structured_arr,
total_users=total_users,
total_items=len(map_to_item_id),
name='Train')
val_dataset = Dataset(raw_data=val_structured_arr,
total_users=total_users,
total_items=len(map_to_item_id),
num_negatives=500,
name='Val')
test_dataset = Dataset(raw_data=test_structured_arr,
total_users=total_users,
total_items=len(map_to_item_id),
num_negatives=500,
name='Test')
```
Instantiate samplers
---
* For training, **RandomPairwiseSampler** is used, i.e., each instance contains an user, an item that the user interacts, and an item that the user did NOT interact.
* For evaluation, **EvaluationSampler** is used. It feeds in user interaction data one user at a time. For a user, (relevant and irrelevant) items are divided into batches and evaluated seperately.
```
from openrec.utils.samplers import RandomPairwiseSampler
from openrec.utils.samplers import EvaluationSampler
train_sampler = RandomPairwiseSampler(batch_size=1000,
dataset=train_dataset,
num_process=5)
val_sampler = EvaluationSampler(batch_size=1000,
dataset=val_dataset)
test_sampler = EvaluationSampler(batch_size=1000,
dataset=test_dataset)
```
Instantiate a recommender
---
* We use the [BPR recommender](https://github.com/ylongqi/openrec/blob/master/openrec/recommenders/bpr.py) that implements the pure Baysian Personalized Ranking (BPR) algorithm.
```
from openrec.recommenders import BPR
bpr_model = BPR(batch_size=1000,
total_users=train_dataset.total_users(),
total_items=train_dataset.total_items(),
dim_user_embed=50,
dim_item_embed=50,
save_model_dir='bpr_recommender/',
train=True, serve=True)
```
Instantiate evaluators
---
* Define evaluators that you plan to use. This tutorial evaluate the recommender against Area Under Curve (AUC).
```
from openrec.utils.evaluators import AUC
auc_evaluator = AUC()
```
Instantiate a model trainer
---
* The model trainer wraps a recommender and makes it ready for training and evaluation.
```
from openrec import ModelTrainer
model_trainer = ModelTrainer(model=bpr_model)
```
TRAIN AND EVALUATE
---
```
model_trainer.train(total_iter=10000, # Total number of training iterations
eval_iter=1000, # Evaluate the model every "eval_iter" iterations
save_iter=10000, # Save the model every "save_iter" iterations
train_sampler=train_sampler,
eval_samplers=[val_sampler, test_sampler],
evaluators=[auc_evaluator])
```
|
github_jupyter
|
!sudo apt-get install unrar
!pip install openrec
import os
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
urlretrieve('http://www.wanghao.in/data/ctrsr_datasets.rar', 'ctrsr_datasets.rar')
os.system('unrar x ctrsr_datasets.rar')
import numpy as np
import random
total_users = 0
interactions_count = 0
with open('ctrsr_datasets/citeulike-a/users.dat', 'r') as fin:
for line in fin:
interactions_count += int(line.split()[0])
total_users += 1
# radomly hold out an item per user for validation and testing respectively.
val_structured_arr = np.zeros(total_users, dtype=[('user_id', np.int32),
('item_id', np.int32)])
test_structured_arr = np.zeros(total_users, dtype=[('user_id', np.int32),
('item_id', np.int32)])
train_structured_arr = np.zeros(interactions_count-total_users * 2,
dtype=[('user_id', np.int32),
('item_id', np.int32)])
interaction_ind = 0
next_user_id = 0
next_item_id = 0
map_to_item_id = dict() # Map item id from 0 to len(items)-1
with open('ctrsr_datasets/citeulike-a/users.dat', 'r') as fin:
for line in fin:
item_list = line.split()[1:]
random.shuffle(item_list)
for ind, item in enumerate(item_list):
if item not in map_to_item_id:
map_to_item_id[item] = next_item_id
next_item_id += 1
if ind == 0:
val_structured_arr[next_user_id] = (next_user_id,
map_to_item_id[item])
elif ind == 1:
test_structured_arr[next_user_id] = (next_user_id,
map_to_item_id[item])
else:
train_structured_arr[interaction_ind] = (next_user_id,
map_to_item_id[item])
interaction_ind += 1
next_user_id += 1
from openrec.utils import Dataset
train_dataset = Dataset(raw_data=train_structured_arr,
total_users=total_users,
total_items=len(map_to_item_id),
name='Train')
val_dataset = Dataset(raw_data=val_structured_arr,
total_users=total_users,
total_items=len(map_to_item_id),
num_negatives=500,
name='Val')
test_dataset = Dataset(raw_data=test_structured_arr,
total_users=total_users,
total_items=len(map_to_item_id),
num_negatives=500,
name='Test')
from openrec.utils.samplers import RandomPairwiseSampler
from openrec.utils.samplers import EvaluationSampler
train_sampler = RandomPairwiseSampler(batch_size=1000,
dataset=train_dataset,
num_process=5)
val_sampler = EvaluationSampler(batch_size=1000,
dataset=val_dataset)
test_sampler = EvaluationSampler(batch_size=1000,
dataset=test_dataset)
from openrec.recommenders import BPR
bpr_model = BPR(batch_size=1000,
total_users=train_dataset.total_users(),
total_items=train_dataset.total_items(),
dim_user_embed=50,
dim_item_embed=50,
save_model_dir='bpr_recommender/',
train=True, serve=True)
from openrec.utils.evaluators import AUC
auc_evaluator = AUC()
from openrec import ModelTrainer
model_trainer = ModelTrainer(model=bpr_model)
model_trainer.train(total_iter=10000, # Total number of training iterations
eval_iter=1000, # Evaluate the model every "eval_iter" iterations
save_iter=10000, # Save the model every "save_iter" iterations
train_sampler=train_sampler,
eval_samplers=[val_sampler, test_sampler],
evaluators=[auc_evaluator])
| 0.360264 | 0.941061 |
# A Conceptual, Practical Introduction to Trax Layers
This notebook introduces the core concepts of the Trax library through a series of code samples and explanations. The topics covered in following sections are:
1. **Layers**: the basic building blocks and how to combine them into networks
1. **Data Streams**: how individual layers manage inputs and outputs
1. **Data Stack**: how the Trax runtime manages data streams for the layers
1. **Defining New Layer Classes**: how to define and test your own layer classes
1. **Models**: how to train, evaluate, and run predictions with Trax models
## General Setup
Execute the following few cells (once) before running any of the code samples in this notebook.
```
# Copyright 2018 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as onp # np used below for trax.math.numpy
# Import Trax
! pip install -q -U trax
! pip install -q tensorflow
from trax import math
from trax import layers as tl
from trax import shapes
from trax.math import numpy as np # For use in defining new layer types.
from trax.shapes import ShapeDtype
from trax.shapes import signature
# Settings and utilities for handling inputs, outputs, and object properties.
onp.set_printoptions(precision=3) # Reduce visual noise from extra digits.
def show_layer_properties(layer_obj, layer_name):
template = ('{}.n_in: {}\n'
'{}.n_out: {}\n'
'{}.sublayers: {}\n'
'{}.weights: {}\n')
print(template.format(layer_name, layer_obj.n_in,
layer_name, layer_obj.n_out,
layer_name, layer_obj.sublayers,
layer_name, layer_obj.weights))
def floats_in_range(start, end):
return onp.arange(start, end).astype(onp.float32)
```
# 1. Layers
The Layer class represents Trax's basic building blocks:
```
"""Base class for composable layers in a deep learning network.
Layers are the basic building blocks for deep learning models. A Trax layer
computes a function from zero or more inputs to zero or more outputs,
optionally using trainable weights (common) and non-parameter state (not
common). Authors of new layer subclasses typically override at most two
methods of the base `Layer` class:
forward(inputs, weights):
Computes this layer's output as part of a forward pass through the model.
new_weights(self, input_signature):
Returns new weights suitable for inputs with the given signature.
```
## A layer computes a function.
A layer computes a function from zero or more inputs to zero or more outputs. The inputs and outputs are NumPy arrays or JAX objects behaving as NumPy arrays.
The simplest layers, those with no weights or sublayers, can be used without initialization. You can think of them (and test them) like simple mathematical functions. For ease of testing and interactive exploration, layer
objects implement the `__call__ ` method, so you can call them directly on input data:
```
y = my_layer(x)
```
Layers are also objects, so you can inspect their properties. For example:
```
print('Number of inputs expected by this layer: {}'.format(my_layer.n_in))
```
### Example 1. tl.Relu $[n_{in} = 1, n_{out} = 1]$
```
relu = tl.Relu()
x = floats_in_range(-7, 8).reshape(3, -1)
y = relu(x)
# Show input, output, and two layer properties.
template = ('x:\n{}\n\n'
'relu(x):\n{}\n\n'
'number of inputs expected by this layer: {}\n'
'number of outputs promised by this layer: {}')
print(template.format(x, y, relu.n_in, relu.n_out))
```
### Example 2. tl.Concatenate $[n_{in} = 2, n_{out} = 1]$
```
concat_axis_0 = tl.Concatenate(axis=0)
concat_axis_1 = tl.Concatenate(axis=1)
x1 = floats_in_range(-7, 8).reshape(3, -1)
x2 = x1 * 10.
y0 = concat_axis_0([x1, x2])
y1 = concat_axis_1([x1, x2])
template = ('x1:\n{}\n\n'
'x2:\n{}\n\n'
'concat_axis_0([x1, x2]):\n{}\n\n'
'concat_axis_1([x1, x2]):\n{}\n')
print(template.format(x1, x2, y0, y1))
# Print abbreviated object representations (useful for debugging).
print('concat_axis_0: {}'.format(concat_axis_0))
print('concat_axis_1: {}'.format(concat_axis_1))
```
## Layers are trainable.
Most layer types include weights that affect the computation of outputs from inputs, and they use back-progagated gradients to update those weights.
🚧🚧 *A very small subset of layer types, such as `BatchNorm`, also include weights (called `state`) that are updated based on forward-pass inputs/computation rather than back-propagated gradients.*
### Initialization
Trainable layers must be initialized before use. Trax's model trainers take care of this as part of the overall training process. In other settings (e.g., in tests or interactively in a Colab notebook), you need to initialize the *outermost/topmost* layer explicitly. For this, use `init`:
```
def init(self, input_signature, rng=None):
"""Initializes this layer and its sublayers recursively.
This method is designed to initialize each layer instance once, even if the
same layer instance occurs in multiple places in the network. This enables
weight sharing to be implemented as layer sharing.
Args:
input_signature: A `ShapeDtype` instance (if this layer takes one input)
or a list/tuple of `ShapeDtype` instances.
rng: A single-use random number generator (JAX PRNG key). If none is
provided, a default rng based on the integer seed 0 will be used.
Returns:
A (weights, state) tuple, in which weights contains newly created weights
on the first call and `EMPTY_WEIGHTS` on all subsequent calls.
"""
```
Input signatures can be built from scratch using `ShapeDType` objects, or can
be derived from data via the `signature` function:
```
def signature(obj):
"""Returns a `ShapeDtype` signature for the given `obj`.
A signature is either a `ShapeDtype` instance or a tuple of `ShapeDtype`
instances. Note that this function is permissive with respect to its inputs
(accepts lists or tuples, and underlying objects can be any type as long as
they have shape and dtype attributes), but strict with respect to its outputs
(only `ShapeDtype`, and only tuples).
Args:
obj: An object that has `shape` and `dtype` attributes, or a list/tuple
of such objects.
"""
```
### Example 3. tl.LayerNorm $[n_{in} = 1, n_{out} = 1]$
```
layer_norm = tl.LayerNorm()
x = floats_in_range(-7, 8).reshape(3, -1)
layer_norm.init(signature(x))
y = layer_norm(x)
template = ('x:\n{}\n\n'
'layer_norm(x):\n{}\n')
print(template.format(x, y))
print('layer_norm.weights:\n{}'.format(layer_norm.weights))
```
## Layers combine into layers.
The Trax library authors encourage users to build new layers as combinations of existing layers. Hence, the library provides a small set of _combinator_ layers: layer objects that make a list of layers behave as a single layer.
The new layer, like other layers, can:
* compute outputs from inputs,
* update parameters from gradients, and
* combine with yet more layers.
### Combine with `Serial`
The most common way to combine layers is with the `Serial` class:
```
class Serial(base.Layer):
"""Combinator that applies layers serially (by function composition).
A Serial combinator uses stack semantics to manage data for its sublayers.
Each sublayer sees only the inputs it needs and returns only the outputs it
has generated. The sublayers interact via the data stack. For instance, a
sublayer k, following sublayer j, gets called with the data stack in the
state left after layer j has applied. The Serial combinator then:
- takes n_in items off the top of the stack (n_in = k.n_in) and calls
layer k, passing those items as arguments; and
- takes layer k's n_out return values (n_out = k.n_out) and pushes
them onto the data stack.
...
```
If one layer has the same number of outputs as the next layer has inputs (which is the usual case), the successive layers behave like function composition:
```
# h(.) = g(f(.))
layer_h = Serial(
layer_f,
layer_g,
)
```
Note how, inside `Serial`, function composition is expressed naturally as a succession of operations, so that no nested parentheses are needed.
### Example 4. y = layer_norm(relu(x)) $[n_{in} = 1, n_{out} = 1]$
```
layer_block = tl.Serial(
tl.Relu(),
tl.LayerNorm(),
)
x = floats_in_range(-7, 8).reshape(3, -1)
layer_block.init(signature(x))
y = layer_block(x)
template = ('x:\n{}\n\n'
'layer_block(x):\n{}')
print(template.format(x, y,))
```
And we can inspect the block as a whole, as if it were just another layer:
### Example 4'. Inspecting a Serial layer.
```
print('layer_block:\n{}\n'.format(layer_block))
print('layer_block.weights:\n{}'.format(layer_block.weights))
```
### Combine with `Branch`
The `Branch` combinator arranges layers into parallel computational channels:
```
def Branch(*layers):
"""Combinator that applies a list of layers in parallel to copies of inputs.
Each layer in the input list is applied to as many inputs from the stack
as it needs, and their outputs are successively combined on stack.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Branch(F, G, H) will take 3 inputs and give 4 outputs:
- inputs: a, b, c
- outputs: F(a), G(a, b, c), h1, h2 where h1, h2 = H(a, b)
```
Residual blocks, for example, are implemented using `Branch`:
```
def Residual(*layers, **kwargs):
"""Wraps a series of layers with a residual connection.
Args:
*layers: One or more layers, to be applied in series.
**kwargs: If empty (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If non-empty, the only key should be 'shortcut', whose value is
a layer that applies to a copy of the inputs and (elementwise) adds its
output to the output from the main layer series.
Returns:
A layer representing a residual connection paired with a layer series.
"""
shortcut = kwargs.get('shortcut') # default None signals no-op (copy inputs)
layers = _ensure_flat(layers)
layer = layers[0] if len(layers) == 1 else Serial(layers)
return Serial(
Branch(shortcut, layer),
Add(), # pylint: disable=no-value-for-parameter
)
```
Here's a simple code example to highlight the mechanics.
### Example 5. Branch
```
relu = tl.Relu()
times_10 = tl.Fn(lambda x: x * 10.0)
branch_relu_t10 = tl.Branch(relu, times_10)
x = floats_in_range(-7, 8).reshape(3, -1)
branch_relu_t10.init(signature(x))
y1, y2 = branch_relu_t10(x)
# Show input, outputs, and two layer properties.
template = ('x:\n{}\n\n'
'y1:\n{}\n\n'
'y2:\n{}\n\n'
'number of inputs expected by this layer: {}\n'
'number of outputs promised by this layer: {}')
print(template.format(x, y1, y2, branch_relu_t10.n_in, branch_relu_t10.n_out))
```
# 2. Data Streams
The Trax runtime supports the concept of multiple data streams, which gives individual layers flexibility to:
- process a single data stream ($n_{in} = n_{out} = 1$),
- process multiple parallel data streams ($n_{in} = n_{out} = 2, 3, ... $),
- split data streams ($n_{in} < n_{out}$), or
- merge data streams ($n_{in} > n_{out}$).
We saw in section 1 the example of `Residual`, which involves both a split and a merge:
```
...
return Serial(
Branch(shortcut, layer),
Add(),
)
```
In other words, layer by layer:
- `Branch(shortcut, layers)`: makes two copies of the single incoming data stream, passes one copy via the shortcut (typically a no-op), and processes the other copy via the given layers, applied in series. [$n_{in} = 1$, $n_{out} = 2$]
- `Add()`: combines the two streams back into one by adding elementwise. [$n_{in} = 2$, $n_{out} = 1$]
# 3. Data Stack
# 4. Defining New Layer Classes
## Simpler layers, with the `@layer` decorator
## Full subclass definitions, where necessary
# 5. Models
|
github_jupyter
|
# Copyright 2018 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as onp # np used below for trax.math.numpy
# Import Trax
! pip install -q -U trax
! pip install -q tensorflow
from trax import math
from trax import layers as tl
from trax import shapes
from trax.math import numpy as np # For use in defining new layer types.
from trax.shapes import ShapeDtype
from trax.shapes import signature
# Settings and utilities for handling inputs, outputs, and object properties.
onp.set_printoptions(precision=3) # Reduce visual noise from extra digits.
def show_layer_properties(layer_obj, layer_name):
template = ('{}.n_in: {}\n'
'{}.n_out: {}\n'
'{}.sublayers: {}\n'
'{}.weights: {}\n')
print(template.format(layer_name, layer_obj.n_in,
layer_name, layer_obj.n_out,
layer_name, layer_obj.sublayers,
layer_name, layer_obj.weights))
def floats_in_range(start, end):
return onp.arange(start, end).astype(onp.float32)
"""Base class for composable layers in a deep learning network.
Layers are the basic building blocks for deep learning models. A Trax layer
computes a function from zero or more inputs to zero or more outputs,
optionally using trainable weights (common) and non-parameter state (not
common). Authors of new layer subclasses typically override at most two
methods of the base `Layer` class:
forward(inputs, weights):
Computes this layer's output as part of a forward pass through the model.
new_weights(self, input_signature):
Returns new weights suitable for inputs with the given signature.
y = my_layer(x)
print('Number of inputs expected by this layer: {}'.format(my_layer.n_in))
relu = tl.Relu()
x = floats_in_range(-7, 8).reshape(3, -1)
y = relu(x)
# Show input, output, and two layer properties.
template = ('x:\n{}\n\n'
'relu(x):\n{}\n\n'
'number of inputs expected by this layer: {}\n'
'number of outputs promised by this layer: {}')
print(template.format(x, y, relu.n_in, relu.n_out))
concat_axis_0 = tl.Concatenate(axis=0)
concat_axis_1 = tl.Concatenate(axis=1)
x1 = floats_in_range(-7, 8).reshape(3, -1)
x2 = x1 * 10.
y0 = concat_axis_0([x1, x2])
y1 = concat_axis_1([x1, x2])
template = ('x1:\n{}\n\n'
'x2:\n{}\n\n'
'concat_axis_0([x1, x2]):\n{}\n\n'
'concat_axis_1([x1, x2]):\n{}\n')
print(template.format(x1, x2, y0, y1))
# Print abbreviated object representations (useful for debugging).
print('concat_axis_0: {}'.format(concat_axis_0))
print('concat_axis_1: {}'.format(concat_axis_1))
def init(self, input_signature, rng=None):
"""Initializes this layer and its sublayers recursively.
This method is designed to initialize each layer instance once, even if the
same layer instance occurs in multiple places in the network. This enables
weight sharing to be implemented as layer sharing.
Args:
input_signature: A `ShapeDtype` instance (if this layer takes one input)
or a list/tuple of `ShapeDtype` instances.
rng: A single-use random number generator (JAX PRNG key). If none is
provided, a default rng based on the integer seed 0 will be used.
Returns:
A (weights, state) tuple, in which weights contains newly created weights
on the first call and `EMPTY_WEIGHTS` on all subsequent calls.
"""
def signature(obj):
"""Returns a `ShapeDtype` signature for the given `obj`.
A signature is either a `ShapeDtype` instance or a tuple of `ShapeDtype`
instances. Note that this function is permissive with respect to its inputs
(accepts lists or tuples, and underlying objects can be any type as long as
they have shape and dtype attributes), but strict with respect to its outputs
(only `ShapeDtype`, and only tuples).
Args:
obj: An object that has `shape` and `dtype` attributes, or a list/tuple
of such objects.
"""
layer_norm = tl.LayerNorm()
x = floats_in_range(-7, 8).reshape(3, -1)
layer_norm.init(signature(x))
y = layer_norm(x)
template = ('x:\n{}\n\n'
'layer_norm(x):\n{}\n')
print(template.format(x, y))
print('layer_norm.weights:\n{}'.format(layer_norm.weights))
class Serial(base.Layer):
"""Combinator that applies layers serially (by function composition).
A Serial combinator uses stack semantics to manage data for its sublayers.
Each sublayer sees only the inputs it needs and returns only the outputs it
has generated. The sublayers interact via the data stack. For instance, a
sublayer k, following sublayer j, gets called with the data stack in the
state left after layer j has applied. The Serial combinator then:
- takes n_in items off the top of the stack (n_in = k.n_in) and calls
layer k, passing those items as arguments; and
- takes layer k's n_out return values (n_out = k.n_out) and pushes
them onto the data stack.
...
# h(.) = g(f(.))
layer_h = Serial(
layer_f,
layer_g,
)
layer_block = tl.Serial(
tl.Relu(),
tl.LayerNorm(),
)
x = floats_in_range(-7, 8).reshape(3, -1)
layer_block.init(signature(x))
y = layer_block(x)
template = ('x:\n{}\n\n'
'layer_block(x):\n{}')
print(template.format(x, y,))
print('layer_block:\n{}\n'.format(layer_block))
print('layer_block.weights:\n{}'.format(layer_block.weights))
def Branch(*layers):
"""Combinator that applies a list of layers in parallel to copies of inputs.
Each layer in the input list is applied to as many inputs from the stack
as it needs, and their outputs are successively combined on stack.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Branch(F, G, H) will take 3 inputs and give 4 outputs:
- inputs: a, b, c
- outputs: F(a), G(a, b, c), h1, h2 where h1, h2 = H(a, b)
def Residual(*layers, **kwargs):
"""Wraps a series of layers with a residual connection.
Args:
*layers: One or more layers, to be applied in series.
**kwargs: If empty (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If non-empty, the only key should be 'shortcut', whose value is
a layer that applies to a copy of the inputs and (elementwise) adds its
output to the output from the main layer series.
Returns:
A layer representing a residual connection paired with a layer series.
"""
shortcut = kwargs.get('shortcut') # default None signals no-op (copy inputs)
layers = _ensure_flat(layers)
layer = layers[0] if len(layers) == 1 else Serial(layers)
return Serial(
Branch(shortcut, layer),
Add(), # pylint: disable=no-value-for-parameter
)
relu = tl.Relu()
times_10 = tl.Fn(lambda x: x * 10.0)
branch_relu_t10 = tl.Branch(relu, times_10)
x = floats_in_range(-7, 8).reshape(3, -1)
branch_relu_t10.init(signature(x))
y1, y2 = branch_relu_t10(x)
# Show input, outputs, and two layer properties.
template = ('x:\n{}\n\n'
'y1:\n{}\n\n'
'y2:\n{}\n\n'
'number of inputs expected by this layer: {}\n'
'number of outputs promised by this layer: {}')
print(template.format(x, y1, y2, branch_relu_t10.n_in, branch_relu_t10.n_out))
...
return Serial(
Branch(shortcut, layer),
Add(),
)
| 0.943971 | 0.980091 |
# Transfer Learning
In this notebook, you'll learn how to use pre-trained networks to solved challenging problems in computer vision. Specifically, you'll use networks trained on [ImageNet](http://www.image-net.org/) [available from torchvision](http://pytorch.org/docs/0.3.0/torchvision/models.html).
ImageNet is a massive dataset with over 1 million labeled images in 1000 categories. It's used to train deep neural networks using an architecture called convolutional layers. I'm not going to get into the details of convolutional networks here, but if you want to learn more about them, please [watch this](https://www.youtube.com/watch?v=2-Ol7ZB0MmU).
Once trained, these models work astonishingly well as feature detectors for images they weren't trained on. Using a pre-trained network on images not in the training set is called transfer learning. Here we'll use transfer learning to train a network that can classify our cat and dog photos with near perfect accuracy.
With `torchvision.models` you can download these pre-trained networks and use them in your applications. We'll include `models` in our imports now.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
```
Most of the pretrained models require the input to be 224x224 images. Also, we'll need to match the normalization used when the models were trained. Each color channel was normalized separately, the means are `[0.485, 0.456, 0.406]` and the standard deviations are `[0.229, 0.224, 0.225]`.
```
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
```
We can load in a model such as [DenseNet](http://pytorch.org/docs/0.3.0/torchvision/models.html#id5). Let's print out the model architecture so we can see what's going on.
```
model = models.densenet121(pretrained=True)
model
```
This model is built out of two main parts, the features and the classifier. The features part is a stack of convolutional layers and overall works as a feature detector that can be fed into a classifier. The classifier part is a single fully-connected layer `(classifier): Linear(in_features=1024, out_features=1000)`. This layer was trained on the ImageNet dataset, so it won't work for our specific problem. That means we need to replace the classifier, but the features will work perfectly on their own. In general, I think about pre-trained networks as amazingly good feature detectors that can be used as the input for simple feed-forward classifiers.
```
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('relu', nn.ReLU()),
('fc2', nn.Linear(500, 2)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
```
With our model built, we need to train the classifier. However, now we're using a **really deep** neural network. If you try to train this on a CPU like normal, it will take a long, long time. Instead, we're going to use the GPU to do the calculations. The linear algebra computations are done in parallel on the GPU leading to 100x increased training speeds. It's also possible to train on multiple GPUs, further decreasing training time.
PyTorch, along with pretty much every other deep learning framework, uses [CUDA](https://developer.nvidia.com/cuda-zone) to efficiently compute the forward and backwards passes on the GPU. In PyTorch, you move your model parameters and other tensors to the GPU memory using `model.to('cuda')`. You can move them back from the GPU with `model.to('cpu')` which you'll commonly do when you need to operate on the network output outside of PyTorch. As a demonstration of the increased speed, I'll compare how long it takes to perform a forward and backward pass with and without a GPU.
```
import time
for device in ['cpu', 'cuda']:
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to(device)
for ii, (inputs, labels) in enumerate(trainloader):
# Move input and label tensors to the GPU
inputs, labels = inputs.to(device), labels.to(device)
start = time.time()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if ii==3:
break
print(f"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds")
```
You can write device agnostic code which will automatically use CUDA if it's enabled like so:
```python
# at beginning of the script
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
...
# then whenever you get a new Tensor or Module
# this won't copy if they are already on the desired device
input = data.to(device)
model = MyModule(...).to(device)
```
From here, I'll let you finish training the model. The process is the same as before except now your model is much more powerful. You should get better than 95% accuracy easily.
>**Exercise:** Train a pretrained models to classify the cat and dog images. Continue with the DenseNet model, or try ResNet, it's also a good model to try out first. Make sure you are only training the classifier and the parameters for the features part are frozen.
```
# Use GPU if it's available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 2),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.003)
model.to(device);
epochs = 1
steps = 0
running_loss = 0
print_every = 5
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(testloader):.3f}.. "
f"Test accuracy: {accuracy/len(testloader):.3f}")
running_loss = 0
model.train()
```
|
github_jupyter
|
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=64, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=64)
model = models.densenet121(pretrained=True)
model
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(1024, 500)),
('relu', nn.ReLU()),
('fc2', nn.Linear(500, 2)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
import time
for device in ['cpu', 'cuda']:
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
model.to(device)
for ii, (inputs, labels) in enumerate(trainloader):
# Move input and label tensors to the GPU
inputs, labels = inputs.to(device), labels.to(device)
start = time.time()
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if ii==3:
break
print(f"Device = {device}; Time per batch: {(time.time() - start)/3:.3f} seconds")
# at beginning of the script
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
...
# then whenever you get a new Tensor or Module
# this won't copy if they are already on the desired device
input = data.to(device)
model = MyModule(...).to(device)
# Use GPU if it's available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 2),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.003)
model.to(device);
epochs = 1
steps = 0
running_loss = 0
print_every = 5
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(testloader):.3f}.. "
f"Test accuracy: {accuracy/len(testloader):.3f}")
running_loss = 0
model.train()
| 0.74158 | 0.985328 |
# Numpy
Numpy is a general-purpose array-processing package. It provides a high-performance multidimensional array object, and tools for working with these arrays. It is the fundamental package for scientific computing with Python.
Arbitrary data-types can be defined using Numpy which allows NumPy to seamlessly and speedily integrate with a wide variety of databases.
It is the fundamental package for scientific computing with Python. It contains various features including these important ones:
<ul>
<li> A powerful N-dimensional array object. </li>
<li> Sophisticated (broadcasting) functions.</li>
<li>Tools for integrating C/C++ and Fortran code.</li>
<li>Useful linear algebra, Fourier transform, and random number capabilities.</li>
</ul>
### NumPy – A Replacement for MatLab
NumPy is often used along with packages like SciPy (Scientific Python) and Mat−plotlib
(plotting library). This combination is widely used as a replacement for MatLab, a popular
platform for technical computing. However, Python alternative to MatLab is now seen as a
more modern and complete programming language.
It is open source, which is an added advantage of NumPy.
The most important object defined in NumPy is an N-dimensional array type called ndarray.
It describes the collection of items of the same type. Items in the collection can be accessed
using a zero-based index.
Every item in an ndarray takes the same size of block in the memory. Each element in ndarray
is an object of data-type object (called dtype).
Any item extracted from ndarray object (by slicing) is represented by a Python object of one
of array scalar types. The following diagram shows a relationship between ndarray, data type
object (dtype) and array scalar type
```
import numpy as np
import pandas as pd
```
# NUMPY − NDARRAY OBJECT
The most important object defined in NumPy is an N-dimensional array type called ndarray.It describes the collection of items of the same type. Items in the collection can be accessed
using a zero-based index.
Every item in an ndarray takes the same size of block in the memory. Each element in ndarray
is an object of data-type object (called dtype).
Any item extracted from ndarray object (by slicing) is represented by a Python object of one
of array scalar types.
An instance of ndarray class can be constructed by different array creation routines described
later in the tutorial. The basic ndarray is created using an array function in NumPy as follows:
```
np.array
```
It creates an ndarray from any object exposing array interface, or from any method that
returns an array.
```
a=np.array([1,2,3])
print(a)
# More than one dimension
b=np.array([[1,2],[3,4]])
print(b)
# minimum dimensions
a=np.array([1, 2, 3,4,5], ndmin=2)
print (a)
a=np.array([1,2,3],dtype=complex)
print(a)
```
The ndarray object consists of contiguous one-dimensional segment of computer memory,
combined with an indexing scheme that maps each item to a location in the memory block.
The memory block holds the elements in a row-major order (C style) or a column-major order
(FORTRAN or MatLab style).
# NUMPY − DATA TYPES
NumPy supports a much greater variety of numerical types than Python does.
NumPy numerical types are instances of dtype (data-type) objects, each having unique
characteristics. The dtypes are available as np.bool_, np.float32, etc.
# NUMPY − ARRAY ATTRIBUTES
we will discuss the various array attributes of NumPy.
### ndarray.shape
This array attribute returns a tuple consisting of array dimensions. It can also be used to
resize the array
```
a=np.array([[1,2,3],[4,5,6]])
print (a.shape)
# Resize the array
a.shape=(3,2)
a
print(a.shape)
# NumPy also provides a reshape function to resize an array.
a = np.array([[1,2,3],[4,5,6]])
b=a.reshape(3,2)
print(b)
```
### ndarray.ndim
This array attribute returns the number of array dimensions.
```
# an array of evenly spaced numbers
a=np.arange(24)
a
# This is one dimensional array
a.ndim
# Now Reshape it
b=a.reshape(2,4,3)
b
# b is having three dimension
```
### numpy.itemsize
This array attribute returns the length of each element of array in bytes.
```
# dtype of array is int8 (1 byte)
x = np.array([1,2,3,4,5], dtype=np.int8)
x.itemsize
# dtype of array is float32 (4 bytes)
x = np.array([1,2,3,4,5], dtype=np.float32)
x.itemsize
```
### numpy.flags
The ndarray object has the following attributes. Its current values are returned by this
function.
|flags | Properties |
|-----------------|-----------------------------------------------------|
|C_CONTIGUOUS (C) | The data is in a single, C-style contiguous segment.|
|F_CONTIGUOUS (F) | The data is in a single, Fortran-style contiguous segment.|
|OWNDATA (O) | The array owns the memory it uses or borrows it from another object.|
|WRITEABLE (W) | The data area can be written to. Setting this to False locks the data,making it read-only.|
|ALIGNED (A) | The data and all elements are aligned appropriately for the hardware.|
|UPDATEIFCOPY (U)| This array is a copy of some other array. When this array is deallocated, the base array will be updated with the contents of this array.|
```
x = np.array([1,2,3,4,5])
x.flags
```
# NUMPY − ARRAY CREATION ROUTINES
A new ndarray object can be constructed by any of the following array creation routines or
using a low-level ndarray constructor
#### numpy.empty
It creates an uninitialized array of specified shape and dtype. It uses the following constructor:
```
# The elements in an array show random values as they are not initialized.
x = np.empty([3,2], dtype=int)
x
```
### numpy.zeros
Returns a new array of specified size, filled with zeros.
```
x=np.zeros(5)
x
x = np.zeros((5,), dtype=np.int)
x
x = np.zeros((5,5), dtype=np.int )
x
```
### numpy.ones
Returns a new array of specified size and type, filled with ones.
```
# array of five ones. Default dtype is float
x=np.ones(5)
x
x=np.ones((4,4))
x
```
# NUMPY − ARRAY FROM EXISTING DATA
### numpy.asarray
This function is similar to numpy.array except for the fact that it has fewer parameters. This
routine is useful for converting Python sequence into ndarray.
```
# convert list to ndarray
x = [1,2,3]
a = np.asarray(x)
a
x = [1,2,3]
a = np.asarray(x, dtype=float)
a
# ndarray from tuple
x=(1,2,3,4,5)
a=np.asarray(x)
a
# ndarray from list of tuples
x = [(1,2,3),(4,5)]
a = np.asarray(x)
a
```
# NUMPY − ARRAY FROM NUMERICAL RANGES
### numpy.arange
This function returns an ndarray object containing evenly spaced values within a given range.
The format of the function is as follows:
```
x = np.arange(5)
x
# dtype set
x = np.arange(5, dtype=float)
x
# Start And Stop Parameters
x = np.arange(10,20,2)
x
```
### numpy.linspace
This function is similar to arange() function. In this function, instead of step size, the number
of evenly spaced values between the interval is specified. The usage of this function is as
follows:
```
x=np.linspace(2.0, 3.0, num=5, retstep=True)
x
# endpoint set to false
x = np.linspace(10,20, 5, endpoint = False)
x
```
### numpy.logspace
This function returns an ndarray object that contains the numbers that are evenly spaced on a log scale. Start and stop endpoints of the scale are indices of the base, usually 10.
```
# default base is 10
a = np.logspace(1.0, 2.0, num = 10)
a
# set base of log space to 2
a = np.logspace(1,10,num = 10, base = 2)
a
```
|
github_jupyter
|
import numpy as np
import pandas as pd
np.array
a=np.array([1,2,3])
print(a)
# More than one dimension
b=np.array([[1,2],[3,4]])
print(b)
# minimum dimensions
a=np.array([1, 2, 3,4,5], ndmin=2)
print (a)
a=np.array([1,2,3],dtype=complex)
print(a)
a=np.array([[1,2,3],[4,5,6]])
print (a.shape)
# Resize the array
a.shape=(3,2)
a
print(a.shape)
# NumPy also provides a reshape function to resize an array.
a = np.array([[1,2,3],[4,5,6]])
b=a.reshape(3,2)
print(b)
# an array of evenly spaced numbers
a=np.arange(24)
a
# This is one dimensional array
a.ndim
# Now Reshape it
b=a.reshape(2,4,3)
b
# b is having three dimension
# dtype of array is int8 (1 byte)
x = np.array([1,2,3,4,5], dtype=np.int8)
x.itemsize
# dtype of array is float32 (4 bytes)
x = np.array([1,2,3,4,5], dtype=np.float32)
x.itemsize
x = np.array([1,2,3,4,5])
x.flags
# The elements in an array show random values as they are not initialized.
x = np.empty([3,2], dtype=int)
x
x=np.zeros(5)
x
x = np.zeros((5,), dtype=np.int)
x
x = np.zeros((5,5), dtype=np.int )
x
# array of five ones. Default dtype is float
x=np.ones(5)
x
x=np.ones((4,4))
x
# convert list to ndarray
x = [1,2,3]
a = np.asarray(x)
a
x = [1,2,3]
a = np.asarray(x, dtype=float)
a
# ndarray from tuple
x=(1,2,3,4,5)
a=np.asarray(x)
a
# ndarray from list of tuples
x = [(1,2,3),(4,5)]
a = np.asarray(x)
a
x = np.arange(5)
x
# dtype set
x = np.arange(5, dtype=float)
x
# Start And Stop Parameters
x = np.arange(10,20,2)
x
x=np.linspace(2.0, 3.0, num=5, retstep=True)
x
# endpoint set to false
x = np.linspace(10,20, 5, endpoint = False)
x
# default base is 10
a = np.logspace(1.0, 2.0, num = 10)
a
# set base of log space to 2
a = np.logspace(1,10,num = 10, base = 2)
a
| 0.379493 | 0.990971 |
```
from datetime import datetime
import pycountry
import re
from functools import reduce
import os
# os.environ["MODIN_ENGINE"] = "dask"
import pandas as pd
# papermill parameters
output_folder = "../output/"
```
### Read data with pandas CSV Reader function
```
df = pd.read_csv("https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv ")
# parse date field
df['date'] = pd.to_datetime(df['date'].astype(str), format='%Y-%m-%d')
# initialize subdivisions dict
subdivisions = {}
# declare helper functions
def resolve_iso3166_1_by_name(name):
# get iso_3166_1 from country name
return pycountry.countries.get(name=name).alpha_2
def resolve_name(row):
# get name from iso_3166_1
lookup = pycountry.countries.get(alpha_2=row)
return lookup.name
def resolve_iso3166_2(row):
region_code = row['country_region_code']
sub_region_name = row['sub_region_helper']
if sub_region_name and (type(sub_region_name) is str) and len(sub_region_name):
sub_region_name = sub_region_name.lower()
try:
if sub_region_name not in list(subdivisions[region_code]):
return row
row['ISO_3166_2'] = subdivisions[region_code][sub_region_name]
return row
except KeyError:
return row
return row
df["Last_Update_Date"] = datetime.utcnow()
df['Last_Reported_Flag'] = df['date'].max() == df['date']
df['sub_region_helper'] = df['sub_region_1']
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'AR') & (df['sub_region_helper'] != 'Buenos Aires Province')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'AR') & (df["sub_region_helper"] != 'Buenos Aires Province')].str.replace(" Province", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BG')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BG')].str.replace(" Province", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BR')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BR')].str.replace("State of ", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CZ')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CZ')].str.replace(" Region", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CO')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CO')].str.replace(" Department", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'EE')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'EE')].str.replace(" County", "maa", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')].str.replace("(?i) Council", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')].str.replace("Greater ", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')].str.replace(";.*", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'HU')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'HU')].str.replace(" County", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'IE')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'IE')].str.replace("County ", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')].str.replace(" Governorate", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')].str.replace(" Governate", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')].str.replace(" Municipality", "s novads", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')].str.replace("ss novads", "s novads", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'MU')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'MU')].str.replace(" District", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'NL')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'NL')].str.replace("North ", "noord-", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PL')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PL')].str.replace(" Voivodeship", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PT')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PT')].str.replace(" District", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'RO')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'RO')].str.replace(" County", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SA')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SA')].str.replace(" Province", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SE')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SE')].str.replace(" County", "s lan", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'UY')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'UY')].str.replace(" Department", "", regex=True)
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)á|ã|à|â|ä|æ|ã|å|ā|Ā|ă", "a", regex=True) # a
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)é|è|ê|ë|ē|ė|ę|ė|ě", "e", regex=True) # e
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)í|î|ï|í|ī|į|ì", "i", regex=True) # i
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ô|õ|ö|ò|ó|ó|œ|ø|ō|õ|ő", "o", regex=True) # o
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)Ñ|ň|ń|ņ", "n", regex=True) # n
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)Ú|ü|û|ù|ū|ů", "u", regex=True) # u
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ß|ś|š|ș|ş", "s", regex=True) # s
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ž|ź|ż|Ȥ", "z", regex=True) # z
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ļ|Ł", "l", regex=True) # l
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ț", "t", regex=True) # t
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)č|ç|ć", "c", regex=True) # c
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ý", "y", regex=True) # y
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ř", "r", regex=True) # r
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ď|ḍ|đ", "d", regex=True) # d
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)Ḩ|Ḥ", "h", regex=True) # h
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ķ", "k", regex=True) # k
char_replace_map = {
"á|ã|à|â|ä|æ|ã|å|ā|Ā|ă": "a",
"é|è|ê|ë|ē|ė|ę|ė|ě": "e",
"í|î|ï|í|ī|į|ì": "i",
"ô|õ|ö|ò|ó|ó|œ|ø|ō|õ|ő": "o",
"Ñ|ň|ń|ņ": "n",
"Ú|ü|û|ù|ū|ů|ų": "u",
"ß|ś|š|ș|ş": "s",
"ž|ź|ż": "z",
"ļ|Ł": "l",
"ț": "t",
"č|ç|ć": "c",
"ý": "y",
"ř": "r",
"ď|ḍ|đ": "d",
"Ḩ|Ḥ": "h",
";.*": "",
"ķ": "k"
}
def format_subdivision_name(subdivision_name):
for pattern, replacement in char_replace_map.items():
subdivision_name = re.sub(pattern=pattern, repl=replacement, string=subdivision_name, flags=re.IGNORECASE)
return subdivision_name.lower()
def get_subdivisions(region):
try:
subdivs = pycountry.subdivisions.get(country_code=region)
if subdivs:
return {format_subdivision_name(i.name): i.code.replace(f"{region}-", "") for i in subdivs}
return {}
except LookupError:
return {}
for region in df['country_region_code'].loc[df['sub_region_1'].notna()].unique():
_res = get_subdivisions(region)
if len(_res) > 0:
subdivisions[region] = _res
df['country_region_code'].loc[df['country_region_code'].isna()] = df['country_region'].loc[df['country_region_code'].isna()].apply(lambda row: resolve_iso3166_1_by_name(row))
```
### Resolve subdivision codes manually
```
subdivisions['AE']['dubai'] = 'DU'
subdivisions['AE']['umm al quawain'] = 'UQ'
subdivisions['AE']['fujairah'] = 'FU'
subdivisions['AE']['ras al khaimah'] = 'RK'
subdivisions['AE']['ajman'] = 'AJ'
subdivisions['AE']['sharjah'] = 'SH'
subdivisions['AE']['abu dhabi'] = 'AZ'
subdivisions['AR']['la rioja'] = 'F'
subdivisions['AR']['buenos aires province'] = 'B'
subdivisions['AR']['buenos aires'] = 'C'
subdivisions['AT']['vienna'] = '9'
subdivisions['AT']['carinthia'] = '2'
subdivisions['AT']['lower austria'] = '3'
subdivisions['AT']['tyrol'] = '7'
subdivisions['AT']['upper austria'] = '4'
subdivisions['AT']['styria'] = '6'
subdivisions['BG']['smoljan'] = '21'
subdivisions['BG']['sofia city'] = '22'
subdivisions['BG']['vraca'] = '06'
subdivisions['BE']['wallonia'] = 'WAL'
subdivisions['BE']['brussels'] = 'BRU'
subdivisions['BE']['flanders'] = 'VLG'
subdivisions['BR']['federal district'] = 'DF'
subdivisions['CA']['yukon'] = 'YT'
subdivisions['CH']['canton of zug'] = 'ZG'
subdivisions['CH']['canton of bern'] = 'BE'
subdivisions['CH']['basel city'] = 'BS'
subdivisions['CH']['geneva'] = 'GE'
subdivisions['CH']['st. gallen'] = 'SG'
subdivisions['CH']['grisons'] = 'GR'
subdivisions['CH']['lucerne'] = 'LU'
subdivisions['CL']['bio bio'] = 'BI'
subdivisions['CL']['magallanes and chilean antarctica'] = 'MA'
subdivisions['CL']['santiago metropolitan region'] = 'RM'
subdivisions['CL']['aysen'] = 'AI'
subdivisions['CL']['nuble'] = 'NB'
subdivisions['CL']['o\'higgins'] = 'LI'
subdivisions['CO']['bogota'] = 'DC'
subdivisions['CO']['san andres and providencia'] = 'SAP'
subdivisions['CO']['north santander'] = 'NSA'
subdivisions['CZ']['hradec kralove region'] = '52'
subdivisions['CZ']['central bohemian'] = '20'
subdivisions['CZ']['moravian-silesian'] = '80'
subdivisions['CZ']['plzen'] = '32'
subdivisions['CZ']['prague'] = '10'
subdivisions['CZ']['south bohemian'] = '31'
subdivisions['CZ']['south moravian'] = '64'
subdivisions['CZ']['vysocina'] = '63'
subdivisions['DE']['hesse'] = 'HE'
subdivisions['DE']['north rhine-westphalia'] = 'NW'
subdivisions['DE']['saxony-anhalt'] = 'ST'
subdivisions['DE']['saxony'] = 'SN'
subdivisions['DE']['thuringia'] = 'TH'
subdivisions['DE']['rhineland-palatinate'] = 'RP'
subdivisions['DE']['lower saxony'] = 'NI'
subdivisions['DE']['bavaria'] = 'BY'
subdivisions['DK']['north denmark region'] = '81'
subdivisions['DK']['region zealand'] = '85'
subdivisions['DK']['region of southern denmark'] = '83'
subdivisions['DK']['central denmark region'] = '82'
subdivisions['DK']['capital region of denmark'] = '84'
subdivisions['ES']['andalusia'] = 'AN'
subdivisions['ES']['balearic islands'] = 'IB'
subdivisions['ES']['basque country'] = 'PV'
subdivisions['ES']['canary islands'] = 'CN'
subdivisions['ES']['castile and leon'] = 'CL'
subdivisions['ES']['castile-la mancha'] = 'CM'
subdivisions['ES']['catalonia'] = 'CT'
subdivisions['ES']['community of madrid'] = 'MD'
subdivisions['ES']['navarre'] = 'NA'
subdivisions['ES']['region of murcia'] = 'MC'
subdivisions['ES']['valencian community'] = 'VC'
subdivisions['FI']['central finland'] = '08'
subdivisions['FI']['central ostrobothnia'] = '07'
subdivisions['FI']['lapland'] = '10'
subdivisions['FI']['north karelia'] = '13'
subdivisions['FI']['northern ostrobothnia'] = '14'
subdivisions['FI']['northern savonia'] = '15'
subdivisions['FI']['ostrobothnia'] = '12'
subdivisions['FI']['paijanne tavastia'] = '16'
subdivisions['FI']['south karelia'] = '02'
subdivisions['FI']['southern ostrobothnia'] = '03'
subdivisions['FI']['southern savonia'] = '04'
subdivisions['FI']['southwest finland'] = '19'
subdivisions['FI']['tavastia proper'] = '06'
subdivisions['FR']['brittany'] = 'BRE'
subdivisions['FR']['corsica'] = 'COR'
subdivisions['FR']['grand est'] = 'GES'
subdivisions['FR']['normandy'] = 'NOR'
subdivisions['FR']['pays de la loire'] = 'PDL'
subdivisions['FR']['provence-alpes-cote d\'azur'] = 'PAC'
subdivisions['GB']['armagh city, banbridge and craigavon'] = 'ABC'
subdivisions['GB']['borough of halton'] = 'HAL'
subdivisions['GB']['bridgend county borough'] = 'BGE'
subdivisions['GB']['caerphilly county borough'] = 'CAY'
subdivisions['GB']['cardiff'] = 'CRF'
subdivisions['GB']['rhondda cynon taff'] = 'RCT'
subdivisions['GB']['scottish borders'] = 'SCB'
subdivisions['GB']['city of bristol'] = 'BST'
subdivisions['GB']['conwy principal area'] = 'CWY'
subdivisions['GB']['county durham'] = 'DUR'
subdivisions['GB']['edinburgh'] = 'EDH'
subdivisions['GB']['anglesey'] = 'AGY'
subdivisions['GB']['london'] = 'LND'
subdivisions['GB']['merthyr tydfil county borough'] = 'MTY'
subdivisions['GB']['neath port talbot principle area'] = 'NTL'
subdivisions['GB']['orkney'] = 'ORK'
subdivisions['GB']['torfaen principal area'] = 'TOF'
subdivisions['GB']['vale of glamorgan'] = 'VGL'
subdivisions['GB']['wrexham principal area'] = 'WRX'
subdivisions['GR']['crete region'] = 'M'
subdivisions['GR']['decentralized administration of attica'] = 'A1'
subdivisions['GR']['decentralized administration of epirus and western macedonia'] = 'C'
subdivisions['GR']['decentralized administration of macedonia and thrace'] = 'A'
subdivisions['GR']['decentralized administration of the aegean'] = 'K'
subdivisions['GR']['decentralized administration of thessaly and central greece'] = 'E'
subdivisions['HR']['bjelovar-bilogora county'] = '07'
subdivisions['HR']['brod-posavina county'] = '12'
subdivisions['HR']['city of zagreb'] = '21'
subdivisions['HR']['dubrovnik-neretva county'] = '19'
subdivisions['HR']['istria county'] = '18'
subdivisions['HR']['karlovac county'] = '04'
subdivisions['HR']['koprivnica-krizevci county'] = '06'
subdivisions['HR']['krapina-zagorje county'] = '02'
subdivisions['HR']['lika-senj county'] = '09'
subdivisions['HR']['medimurje county'] = '20'
subdivisions['HR']['osijek-baranja county'] = '14'
subdivisions['HR']['pozega-slavonia county'] = '11'
subdivisions['HR']['primorje-gorski kotar county'] = '08'
subdivisions['HR']['sibenik-knin county'] = '15'
subdivisions['HR']['sisak-moslavina county'] = '03'
subdivisions['HR']['split-dalmatia county'] = '17'
subdivisions['HR']['varazdin county'] = '05'
subdivisions['HR']['virovitica-podravina county'] = '10'
subdivisions['HR']['vukovar-srijem county'] = '16'
subdivisions['HR']['zadar county'] = '13'
subdivisions['HR']['zagreb county'] = '01'
subdivisions['ID']['central java'] = 'ID'
subdivisions['ID']['central kalimantan'] = 'KT'
subdivisions['ID']['central sulawesi'] = 'ST'
subdivisions['ID']['east java'] = 'JI'
subdivisions['ID']['east kalimantan'] = 'KI'
subdivisions['ID']['East Nusa Tenggara'] = 'NT'
subdivisions['ID']['jakarta'] = 'JK'
subdivisions['ID']['west kalimantan'] = 'KB'
subdivisions['ID']['north kalimantan'] = 'KA'
subdivisions['ID']['north maluku'] = 'MU'
subdivisions['ID']['north sulawesi'] = 'SA'
subdivisions['ID']['north sumatra'] = 'SU'
subdivisions['ID']['bangka belitung islands'] = 'BB'
subdivisions['ID']['riau islands'] = 'KR'
subdivisions['ID']['south east sulawesi'] = 'SG'
subdivisions['ID']['south kalimantan'] = 'KS'
subdivisions['ID']['south sulawesi'] = 'SN'
subdivisions['ID']['South Sumatra'] = 'SS'
subdivisions['ID']['special region of yogyakarta'] = 'YO'
subdivisions['ID']['west java'] = 'JB'
subdivisions['ID']['west kalimantan'] = 'KB'
subdivisions['ID']['west nusa tenggara'] = 'NB'
subdivisions['ID']['west papua'] = 'PB'
subdivisions['ID']['west sulawesi'] = 'SR'
subdivisions['ID']['west sumatra'] = 'SB'
subdivisions['IT']['apulia'] = '75'
subdivisions['IT']['sicily'] = '82'
subdivisions['IT']['tuscany'] = '52'
subdivisions['IT']['trentino-south tyrol'] = '32'
subdivisions['IT']['sardinia'] = '88'
subdivisions['IT']['piedmont'] = '21'
subdivisions['IT']['lombardy'] = '25'
subdivisions['KW']['al asimah'] = 'KW'
subdivisions['KW']['al jahra'] = 'JA'
subdivisions["KW"]['mubarak al-kabeer'] = 'MU'
subdivisions['LT']['alytus county'] = 'AL'
subdivisions['LT']['kaunas county'] = 'KU'
subdivisions['LT']['marijampole county'] = 'MR'
subdivisions['LT']['panevezys county'] = 'PN'
subdivisions['LT']['siauliai county'] = 'SA'
subdivisions['LT']['taurage county'] = 'TA'
subdivisions['LT']['klaipeda county'] = 'KL'
subdivisions['LT']['telsiai county'] = 'TE'
subdivisions['LT']['utena county'] = 'UT'
subdivisions['LT']['vilnius county'] = 'VL'
subdivisions['LV']['cesis novads'] = '022'
subdivisions['LV']['adazi novads'] = '011'
subdivisions['LV']['balvus novads'] = '015'
subdivisions['LV']['Burtniekis novads'] = '019'
subdivisions['LV']['city of liepaja'] = 'LPX'
subdivisions['LV']['incukalns novads'] = '037'
subdivisions['LV']['kegums novads'] = '051'
subdivisions['LV']['kocenis novads'] = '045'
subdivisions['LV']['limbazis novads'] = '054'
subdivisions['LV']['ozolniekis novads'] = '069'
subdivisions['LV']['preilis novads'] = '073'
subdivisions['LV']['priekulis novads'] = '075'
subdivisions['LV']['ropazis novads'] = '080'
subdivisions['LV']['saulkrastis novads'] = '089'
subdivisions['LV']['aizkraukle novads'] = '002'
subdivisions['LV']['stopinis novads'] = '095'
subdivisions['LV']['talsis novads'] = '097'
subdivisions['LV']['tukums novads'] = '099'
subdivisions['MU']['riviere noire'] = 'BL'
subdivisions['MX']['coahuila'] = 'COA'
subdivisions['MX']['veracruz'] = 'VER'
subdivisions['MX']['mexico city'] = 'CMX'
subdivisions['MX']['state of mexico'] = 'MEX'
subdivisions['MX']['michoacan'] = 'MIC'
subdivisions['NG']['federal capital territory'] = 'FC'
subdivisions['NG']['nassarawa'] = 'NA'
subdivisions['NG']['ogun state'] = 'OG'
subdivisions['NL']['south holland'] = 'ZH'
subdivisions['NO']['viken'] = '30'
subdivisions['NO']['vestland'] = '46'
subdivisions['NO']['vestfold og telemark'] = '38'
subdivisions['NO']['trondelag'] = '50'
subdivisions['NO']['troms og finnmark'] = '54'
subdivisions['NO']['agder'] = '42'
subdivisions['NO']['innlandet'] = '34'
subdivisions['NZ']['gisborne'] = 'GIS'
subdivisions['NZ']['marlborough'] = 'MBH'
subdivisions['NZ']['nelson'] = 'NSN'
subdivisions['NZ']['tasman'] = 'TAS'
subdivisions['PE']['callao region'] = 'CAL'
subdivisions['PE']['cusco'] = 'CUS'
subdivisions['PE']['lima region'] = 'LIM'
subdivisions['PE']['metropolitan municipality of lima'] = 'LMA'
subdivisions['PL']['greater poland'] = 'WP'
subdivisions['PL']['kuyavian-pomeranian'] = 'KP'
subdivisions['PL']['lesser poland'] = 'MA'
subdivisions['PL']['lodz'] = 'LD'
subdivisions['PL']['lower silesian'] = 'DS'
subdivisions['PL']['lublin'] = 'LU'
subdivisions['PL']['lubusz'] = 'LB'
subdivisions['PL']['masovian'] = 'MZ'
subdivisions['PL']['opole'] = 'OP'
subdivisions['PL']['pomeranian'] = 'PM'
subdivisions['PL']['silesian'] = 'SL'
subdivisions['PL']['warmian-masurian'] = 'WN'
subdivisions['PL']['west pomeranian'] = 'ZP'
subdivisions['PT']['azores'] = '20'
subdivisions['PT']['madeira'] = '30'
subdivisions['PT']['lisbon'] = '11'
subdivisions['RO']['bucharest'] = 'B'
subdivisions['SA']['al jowf'] = '12'
subdivisions['SA']['al qasim'] = '05'
subdivisions['SA']['aseer'] = '14'
subdivisions['SA']['hail'] = '06'
subdivisions['SA']['jazan'] = '09'
subdivisions['SA']['riyadh'] = '01'
subdivisions['SA']['eastern'] = '04'
subdivisions['SA']['northern borders'] = '08'
subdivisions['SE']['blekinges lan'] = 'K'
subdivisions['SE']['kalmars lan'] = 'H'
subdivisions['SE']['orebros lan'] = 'T'
subdivisions['SE']['skanes lan'] = 'M'
subdivisions['SE']['uppsalas lan'] = 'C'
subdivisions['SI']['administrative unit maribor'] = '070'
subdivisions['SI']['izola'] = '040'
subdivisions['SI']['koper'] = '050'
subdivisions['SI']['lendava'] = '059'
subdivisions['SI']['sobota'] = '080'
subdivisions['SI']['municipality of hrastnik'] = '034'
subdivisions['SI']['piran'] = '090'
subdivisions['SI']['postojna'] = '094'
subdivisions['SK']['bratislava region'] = 'BL'
subdivisions['SK']['kosice region'] = 'KI'
subdivisions['SK']['nitra region'] = 'NI'
subdivisions['SK']['presov region'] = 'PV'
subdivisions['SK']['trencin region'] = 'TC'
subdivisions['SK']['trnava region'] = 'TA'
subdivisions['SK']['zilina region'] = 'ZI'
subdivisions['SK']['banska bystrica region'] = 'BC'
subdivisions['ZA']['north west'] = 'NW'
for region in list(df["country_region_code"].unique()):
df.loc[df["country_region_code"] == region, "country_region"] = resolve_name(region)
df["ISO_3166_2"] = ""
group_region = df.groupby(['country_region_code','sub_region_helper']).count().reset_index()[["country_region_code", "sub_region_helper"]].values.tolist()
for region_code, sub_region_name in group_region:
if sub_region_name and (type(sub_region_name) is str) and len(sub_region_name):
try:
if sub_region_name.lower() not in list(subdivisions[region_code]):
continue
df.loc[(df["country_region_code"] == region_code) & (df["sub_region_helper"] == sub_region_name), "ISO_3166_2"] = subdivisions[region_code][sub_region_name.lower()]
except KeyError:
continue
# df = df.apply(lambda row: resolve_iso3166_2(row), axis="columns")
df = df.drop(columns=['sub_region_helper'])
column_map = {
"sub_region_1": "PROVINCE_STATE",
"country_region_code": "ISO_3166_1",
"grocery_and_pharmacy_percent_change_from_baseline": "grocery_and_pharmacy_change_perc",
"parks_percent_change_from_baseline": "parks_change_perc",
"residential_percent_change_from_baseline": "residential_change_perc",
"retail_and_recreation_percent_change_from_baseline": "retail_and_recreation_change_perc",
"transit_stations_percent_change_from_baseline": "transit_stations_change_perc",
"workplaces_percent_change_from_baseline": "workplaces_change_perc"
}
df = df.rename(columns=column_map)
df.dtypes
df.to_csv(output_folder + "GOOG_GLOBAL_MOBILITY_REPORT.csv", index=False, columns=[
"country_region",
"PROVINCE_STATE",
"ISO_3166_1",
"ISO_3166_2",
"date",
"grocery_and_pharmacy_change_perc",
"parks_change_perc",
"residential_change_perc",
"retail_and_recreation_change_perc",
"transit_stations_change_perc",
"workplaces_change_perc",
"Last_Update_Date",
"Last_Reported_Flag",
"sub_region_2"
])
```
|
github_jupyter
|
from datetime import datetime
import pycountry
import re
from functools import reduce
import os
# os.environ["MODIN_ENGINE"] = "dask"
import pandas as pd
# papermill parameters
output_folder = "../output/"
df = pd.read_csv("https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv ")
# parse date field
df['date'] = pd.to_datetime(df['date'].astype(str), format='%Y-%m-%d')
# initialize subdivisions dict
subdivisions = {}
# declare helper functions
def resolve_iso3166_1_by_name(name):
# get iso_3166_1 from country name
return pycountry.countries.get(name=name).alpha_2
def resolve_name(row):
# get name from iso_3166_1
lookup = pycountry.countries.get(alpha_2=row)
return lookup.name
def resolve_iso3166_2(row):
region_code = row['country_region_code']
sub_region_name = row['sub_region_helper']
if sub_region_name and (type(sub_region_name) is str) and len(sub_region_name):
sub_region_name = sub_region_name.lower()
try:
if sub_region_name not in list(subdivisions[region_code]):
return row
row['ISO_3166_2'] = subdivisions[region_code][sub_region_name]
return row
except KeyError:
return row
return row
df["Last_Update_Date"] = datetime.utcnow()
df['Last_Reported_Flag'] = df['date'].max() == df['date']
df['sub_region_helper'] = df['sub_region_1']
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'AR') & (df['sub_region_helper'] != 'Buenos Aires Province')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'AR') & (df["sub_region_helper"] != 'Buenos Aires Province')].str.replace(" Province", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BG')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BG')].str.replace(" Province", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BR')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'BR')].str.replace("State of ", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CZ')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CZ')].str.replace(" Region", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CO')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'CO')].str.replace(" Department", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'EE')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'EE')].str.replace(" County", "maa", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')].str.replace("(?i) Council", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')].str.replace("Greater ", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'GB')].str.replace(";.*", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'HU')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'HU')].str.replace(" County", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'IE')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'IE')].str.replace("County ", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')].str.replace(" Governorate", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'KW')].str.replace(" Governate", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')].str.replace(" Municipality", "s novads", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'LV')].str.replace("ss novads", "s novads", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'MU')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'MU')].str.replace(" District", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'NL')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'NL')].str.replace("North ", "noord-", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PL')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PL')].str.replace(" Voivodeship", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PT')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'PT')].str.replace(" District", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'RO')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'RO')].str.replace(" County", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SA')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SA')].str.replace(" Province", "", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SE')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'SE')].str.replace(" County", "s lan", regex=True)
df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'UY')] = df['sub_region_helper'].loc[(df['sub_region_helper'].notna()) & (df['country_region_code'] == 'UY')].str.replace(" Department", "", regex=True)
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)á|ã|à|â|ä|æ|ã|å|ā|Ā|ă", "a", regex=True) # a
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)é|è|ê|ë|ē|ė|ę|ė|ě", "e", regex=True) # e
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)í|î|ï|í|ī|į|ì", "i", regex=True) # i
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ô|õ|ö|ò|ó|ó|œ|ø|ō|õ|ő", "o", regex=True) # o
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)Ñ|ň|ń|ņ", "n", regex=True) # n
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)Ú|ü|û|ù|ū|ů", "u", regex=True) # u
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ß|ś|š|ș|ş", "s", regex=True) # s
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ž|ź|ż|Ȥ", "z", regex=True) # z
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ļ|Ł", "l", regex=True) # l
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ț", "t", regex=True) # t
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)č|ç|ć", "c", regex=True) # c
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ý", "y", regex=True) # y
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ř", "r", regex=True) # r
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ď|ḍ|đ", "d", regex=True) # d
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)Ḩ|Ḥ", "h", regex=True) # h
df['sub_region_helper'] = df['sub_region_helper'].str.replace("(?i)ķ", "k", regex=True) # k
char_replace_map = {
"á|ã|à|â|ä|æ|ã|å|ā|Ā|ă": "a",
"é|è|ê|ë|ē|ė|ę|ė|ě": "e",
"í|î|ï|í|ī|į|ì": "i",
"ô|õ|ö|ò|ó|ó|œ|ø|ō|õ|ő": "o",
"Ñ|ň|ń|ņ": "n",
"Ú|ü|û|ù|ū|ů|ų": "u",
"ß|ś|š|ș|ş": "s",
"ž|ź|ż": "z",
"ļ|Ł": "l",
"ț": "t",
"č|ç|ć": "c",
"ý": "y",
"ř": "r",
"ď|ḍ|đ": "d",
"Ḩ|Ḥ": "h",
";.*": "",
"ķ": "k"
}
def format_subdivision_name(subdivision_name):
for pattern, replacement in char_replace_map.items():
subdivision_name = re.sub(pattern=pattern, repl=replacement, string=subdivision_name, flags=re.IGNORECASE)
return subdivision_name.lower()
def get_subdivisions(region):
try:
subdivs = pycountry.subdivisions.get(country_code=region)
if subdivs:
return {format_subdivision_name(i.name): i.code.replace(f"{region}-", "") for i in subdivs}
return {}
except LookupError:
return {}
for region in df['country_region_code'].loc[df['sub_region_1'].notna()].unique():
_res = get_subdivisions(region)
if len(_res) > 0:
subdivisions[region] = _res
df['country_region_code'].loc[df['country_region_code'].isna()] = df['country_region'].loc[df['country_region_code'].isna()].apply(lambda row: resolve_iso3166_1_by_name(row))
subdivisions['AE']['dubai'] = 'DU'
subdivisions['AE']['umm al quawain'] = 'UQ'
subdivisions['AE']['fujairah'] = 'FU'
subdivisions['AE']['ras al khaimah'] = 'RK'
subdivisions['AE']['ajman'] = 'AJ'
subdivisions['AE']['sharjah'] = 'SH'
subdivisions['AE']['abu dhabi'] = 'AZ'
subdivisions['AR']['la rioja'] = 'F'
subdivisions['AR']['buenos aires province'] = 'B'
subdivisions['AR']['buenos aires'] = 'C'
subdivisions['AT']['vienna'] = '9'
subdivisions['AT']['carinthia'] = '2'
subdivisions['AT']['lower austria'] = '3'
subdivisions['AT']['tyrol'] = '7'
subdivisions['AT']['upper austria'] = '4'
subdivisions['AT']['styria'] = '6'
subdivisions['BG']['smoljan'] = '21'
subdivisions['BG']['sofia city'] = '22'
subdivisions['BG']['vraca'] = '06'
subdivisions['BE']['wallonia'] = 'WAL'
subdivisions['BE']['brussels'] = 'BRU'
subdivisions['BE']['flanders'] = 'VLG'
subdivisions['BR']['federal district'] = 'DF'
subdivisions['CA']['yukon'] = 'YT'
subdivisions['CH']['canton of zug'] = 'ZG'
subdivisions['CH']['canton of bern'] = 'BE'
subdivisions['CH']['basel city'] = 'BS'
subdivisions['CH']['geneva'] = 'GE'
subdivisions['CH']['st. gallen'] = 'SG'
subdivisions['CH']['grisons'] = 'GR'
subdivisions['CH']['lucerne'] = 'LU'
subdivisions['CL']['bio bio'] = 'BI'
subdivisions['CL']['magallanes and chilean antarctica'] = 'MA'
subdivisions['CL']['santiago metropolitan region'] = 'RM'
subdivisions['CL']['aysen'] = 'AI'
subdivisions['CL']['nuble'] = 'NB'
subdivisions['CL']['o\'higgins'] = 'LI'
subdivisions['CO']['bogota'] = 'DC'
subdivisions['CO']['san andres and providencia'] = 'SAP'
subdivisions['CO']['north santander'] = 'NSA'
subdivisions['CZ']['hradec kralove region'] = '52'
subdivisions['CZ']['central bohemian'] = '20'
subdivisions['CZ']['moravian-silesian'] = '80'
subdivisions['CZ']['plzen'] = '32'
subdivisions['CZ']['prague'] = '10'
subdivisions['CZ']['south bohemian'] = '31'
subdivisions['CZ']['south moravian'] = '64'
subdivisions['CZ']['vysocina'] = '63'
subdivisions['DE']['hesse'] = 'HE'
subdivisions['DE']['north rhine-westphalia'] = 'NW'
subdivisions['DE']['saxony-anhalt'] = 'ST'
subdivisions['DE']['saxony'] = 'SN'
subdivisions['DE']['thuringia'] = 'TH'
subdivisions['DE']['rhineland-palatinate'] = 'RP'
subdivisions['DE']['lower saxony'] = 'NI'
subdivisions['DE']['bavaria'] = 'BY'
subdivisions['DK']['north denmark region'] = '81'
subdivisions['DK']['region zealand'] = '85'
subdivisions['DK']['region of southern denmark'] = '83'
subdivisions['DK']['central denmark region'] = '82'
subdivisions['DK']['capital region of denmark'] = '84'
subdivisions['ES']['andalusia'] = 'AN'
subdivisions['ES']['balearic islands'] = 'IB'
subdivisions['ES']['basque country'] = 'PV'
subdivisions['ES']['canary islands'] = 'CN'
subdivisions['ES']['castile and leon'] = 'CL'
subdivisions['ES']['castile-la mancha'] = 'CM'
subdivisions['ES']['catalonia'] = 'CT'
subdivisions['ES']['community of madrid'] = 'MD'
subdivisions['ES']['navarre'] = 'NA'
subdivisions['ES']['region of murcia'] = 'MC'
subdivisions['ES']['valencian community'] = 'VC'
subdivisions['FI']['central finland'] = '08'
subdivisions['FI']['central ostrobothnia'] = '07'
subdivisions['FI']['lapland'] = '10'
subdivisions['FI']['north karelia'] = '13'
subdivisions['FI']['northern ostrobothnia'] = '14'
subdivisions['FI']['northern savonia'] = '15'
subdivisions['FI']['ostrobothnia'] = '12'
subdivisions['FI']['paijanne tavastia'] = '16'
subdivisions['FI']['south karelia'] = '02'
subdivisions['FI']['southern ostrobothnia'] = '03'
subdivisions['FI']['southern savonia'] = '04'
subdivisions['FI']['southwest finland'] = '19'
subdivisions['FI']['tavastia proper'] = '06'
subdivisions['FR']['brittany'] = 'BRE'
subdivisions['FR']['corsica'] = 'COR'
subdivisions['FR']['grand est'] = 'GES'
subdivisions['FR']['normandy'] = 'NOR'
subdivisions['FR']['pays de la loire'] = 'PDL'
subdivisions['FR']['provence-alpes-cote d\'azur'] = 'PAC'
subdivisions['GB']['armagh city, banbridge and craigavon'] = 'ABC'
subdivisions['GB']['borough of halton'] = 'HAL'
subdivisions['GB']['bridgend county borough'] = 'BGE'
subdivisions['GB']['caerphilly county borough'] = 'CAY'
subdivisions['GB']['cardiff'] = 'CRF'
subdivisions['GB']['rhondda cynon taff'] = 'RCT'
subdivisions['GB']['scottish borders'] = 'SCB'
subdivisions['GB']['city of bristol'] = 'BST'
subdivisions['GB']['conwy principal area'] = 'CWY'
subdivisions['GB']['county durham'] = 'DUR'
subdivisions['GB']['edinburgh'] = 'EDH'
subdivisions['GB']['anglesey'] = 'AGY'
subdivisions['GB']['london'] = 'LND'
subdivisions['GB']['merthyr tydfil county borough'] = 'MTY'
subdivisions['GB']['neath port talbot principle area'] = 'NTL'
subdivisions['GB']['orkney'] = 'ORK'
subdivisions['GB']['torfaen principal area'] = 'TOF'
subdivisions['GB']['vale of glamorgan'] = 'VGL'
subdivisions['GB']['wrexham principal area'] = 'WRX'
subdivisions['GR']['crete region'] = 'M'
subdivisions['GR']['decentralized administration of attica'] = 'A1'
subdivisions['GR']['decentralized administration of epirus and western macedonia'] = 'C'
subdivisions['GR']['decentralized administration of macedonia and thrace'] = 'A'
subdivisions['GR']['decentralized administration of the aegean'] = 'K'
subdivisions['GR']['decentralized administration of thessaly and central greece'] = 'E'
subdivisions['HR']['bjelovar-bilogora county'] = '07'
subdivisions['HR']['brod-posavina county'] = '12'
subdivisions['HR']['city of zagreb'] = '21'
subdivisions['HR']['dubrovnik-neretva county'] = '19'
subdivisions['HR']['istria county'] = '18'
subdivisions['HR']['karlovac county'] = '04'
subdivisions['HR']['koprivnica-krizevci county'] = '06'
subdivisions['HR']['krapina-zagorje county'] = '02'
subdivisions['HR']['lika-senj county'] = '09'
subdivisions['HR']['medimurje county'] = '20'
subdivisions['HR']['osijek-baranja county'] = '14'
subdivisions['HR']['pozega-slavonia county'] = '11'
subdivisions['HR']['primorje-gorski kotar county'] = '08'
subdivisions['HR']['sibenik-knin county'] = '15'
subdivisions['HR']['sisak-moslavina county'] = '03'
subdivisions['HR']['split-dalmatia county'] = '17'
subdivisions['HR']['varazdin county'] = '05'
subdivisions['HR']['virovitica-podravina county'] = '10'
subdivisions['HR']['vukovar-srijem county'] = '16'
subdivisions['HR']['zadar county'] = '13'
subdivisions['HR']['zagreb county'] = '01'
subdivisions['ID']['central java'] = 'ID'
subdivisions['ID']['central kalimantan'] = 'KT'
subdivisions['ID']['central sulawesi'] = 'ST'
subdivisions['ID']['east java'] = 'JI'
subdivisions['ID']['east kalimantan'] = 'KI'
subdivisions['ID']['East Nusa Tenggara'] = 'NT'
subdivisions['ID']['jakarta'] = 'JK'
subdivisions['ID']['west kalimantan'] = 'KB'
subdivisions['ID']['north kalimantan'] = 'KA'
subdivisions['ID']['north maluku'] = 'MU'
subdivisions['ID']['north sulawesi'] = 'SA'
subdivisions['ID']['north sumatra'] = 'SU'
subdivisions['ID']['bangka belitung islands'] = 'BB'
subdivisions['ID']['riau islands'] = 'KR'
subdivisions['ID']['south east sulawesi'] = 'SG'
subdivisions['ID']['south kalimantan'] = 'KS'
subdivisions['ID']['south sulawesi'] = 'SN'
subdivisions['ID']['South Sumatra'] = 'SS'
subdivisions['ID']['special region of yogyakarta'] = 'YO'
subdivisions['ID']['west java'] = 'JB'
subdivisions['ID']['west kalimantan'] = 'KB'
subdivisions['ID']['west nusa tenggara'] = 'NB'
subdivisions['ID']['west papua'] = 'PB'
subdivisions['ID']['west sulawesi'] = 'SR'
subdivisions['ID']['west sumatra'] = 'SB'
subdivisions['IT']['apulia'] = '75'
subdivisions['IT']['sicily'] = '82'
subdivisions['IT']['tuscany'] = '52'
subdivisions['IT']['trentino-south tyrol'] = '32'
subdivisions['IT']['sardinia'] = '88'
subdivisions['IT']['piedmont'] = '21'
subdivisions['IT']['lombardy'] = '25'
subdivisions['KW']['al asimah'] = 'KW'
subdivisions['KW']['al jahra'] = 'JA'
subdivisions["KW"]['mubarak al-kabeer'] = 'MU'
subdivisions['LT']['alytus county'] = 'AL'
subdivisions['LT']['kaunas county'] = 'KU'
subdivisions['LT']['marijampole county'] = 'MR'
subdivisions['LT']['panevezys county'] = 'PN'
subdivisions['LT']['siauliai county'] = 'SA'
subdivisions['LT']['taurage county'] = 'TA'
subdivisions['LT']['klaipeda county'] = 'KL'
subdivisions['LT']['telsiai county'] = 'TE'
subdivisions['LT']['utena county'] = 'UT'
subdivisions['LT']['vilnius county'] = 'VL'
subdivisions['LV']['cesis novads'] = '022'
subdivisions['LV']['adazi novads'] = '011'
subdivisions['LV']['balvus novads'] = '015'
subdivisions['LV']['Burtniekis novads'] = '019'
subdivisions['LV']['city of liepaja'] = 'LPX'
subdivisions['LV']['incukalns novads'] = '037'
subdivisions['LV']['kegums novads'] = '051'
subdivisions['LV']['kocenis novads'] = '045'
subdivisions['LV']['limbazis novads'] = '054'
subdivisions['LV']['ozolniekis novads'] = '069'
subdivisions['LV']['preilis novads'] = '073'
subdivisions['LV']['priekulis novads'] = '075'
subdivisions['LV']['ropazis novads'] = '080'
subdivisions['LV']['saulkrastis novads'] = '089'
subdivisions['LV']['aizkraukle novads'] = '002'
subdivisions['LV']['stopinis novads'] = '095'
subdivisions['LV']['talsis novads'] = '097'
subdivisions['LV']['tukums novads'] = '099'
subdivisions['MU']['riviere noire'] = 'BL'
subdivisions['MX']['coahuila'] = 'COA'
subdivisions['MX']['veracruz'] = 'VER'
subdivisions['MX']['mexico city'] = 'CMX'
subdivisions['MX']['state of mexico'] = 'MEX'
subdivisions['MX']['michoacan'] = 'MIC'
subdivisions['NG']['federal capital territory'] = 'FC'
subdivisions['NG']['nassarawa'] = 'NA'
subdivisions['NG']['ogun state'] = 'OG'
subdivisions['NL']['south holland'] = 'ZH'
subdivisions['NO']['viken'] = '30'
subdivisions['NO']['vestland'] = '46'
subdivisions['NO']['vestfold og telemark'] = '38'
subdivisions['NO']['trondelag'] = '50'
subdivisions['NO']['troms og finnmark'] = '54'
subdivisions['NO']['agder'] = '42'
subdivisions['NO']['innlandet'] = '34'
subdivisions['NZ']['gisborne'] = 'GIS'
subdivisions['NZ']['marlborough'] = 'MBH'
subdivisions['NZ']['nelson'] = 'NSN'
subdivisions['NZ']['tasman'] = 'TAS'
subdivisions['PE']['callao region'] = 'CAL'
subdivisions['PE']['cusco'] = 'CUS'
subdivisions['PE']['lima region'] = 'LIM'
subdivisions['PE']['metropolitan municipality of lima'] = 'LMA'
subdivisions['PL']['greater poland'] = 'WP'
subdivisions['PL']['kuyavian-pomeranian'] = 'KP'
subdivisions['PL']['lesser poland'] = 'MA'
subdivisions['PL']['lodz'] = 'LD'
subdivisions['PL']['lower silesian'] = 'DS'
subdivisions['PL']['lublin'] = 'LU'
subdivisions['PL']['lubusz'] = 'LB'
subdivisions['PL']['masovian'] = 'MZ'
subdivisions['PL']['opole'] = 'OP'
subdivisions['PL']['pomeranian'] = 'PM'
subdivisions['PL']['silesian'] = 'SL'
subdivisions['PL']['warmian-masurian'] = 'WN'
subdivisions['PL']['west pomeranian'] = 'ZP'
subdivisions['PT']['azores'] = '20'
subdivisions['PT']['madeira'] = '30'
subdivisions['PT']['lisbon'] = '11'
subdivisions['RO']['bucharest'] = 'B'
subdivisions['SA']['al jowf'] = '12'
subdivisions['SA']['al qasim'] = '05'
subdivisions['SA']['aseer'] = '14'
subdivisions['SA']['hail'] = '06'
subdivisions['SA']['jazan'] = '09'
subdivisions['SA']['riyadh'] = '01'
subdivisions['SA']['eastern'] = '04'
subdivisions['SA']['northern borders'] = '08'
subdivisions['SE']['blekinges lan'] = 'K'
subdivisions['SE']['kalmars lan'] = 'H'
subdivisions['SE']['orebros lan'] = 'T'
subdivisions['SE']['skanes lan'] = 'M'
subdivisions['SE']['uppsalas lan'] = 'C'
subdivisions['SI']['administrative unit maribor'] = '070'
subdivisions['SI']['izola'] = '040'
subdivisions['SI']['koper'] = '050'
subdivisions['SI']['lendava'] = '059'
subdivisions['SI']['sobota'] = '080'
subdivisions['SI']['municipality of hrastnik'] = '034'
subdivisions['SI']['piran'] = '090'
subdivisions['SI']['postojna'] = '094'
subdivisions['SK']['bratislava region'] = 'BL'
subdivisions['SK']['kosice region'] = 'KI'
subdivisions['SK']['nitra region'] = 'NI'
subdivisions['SK']['presov region'] = 'PV'
subdivisions['SK']['trencin region'] = 'TC'
subdivisions['SK']['trnava region'] = 'TA'
subdivisions['SK']['zilina region'] = 'ZI'
subdivisions['SK']['banska bystrica region'] = 'BC'
subdivisions['ZA']['north west'] = 'NW'
for region in list(df["country_region_code"].unique()):
df.loc[df["country_region_code"] == region, "country_region"] = resolve_name(region)
df["ISO_3166_2"] = ""
group_region = df.groupby(['country_region_code','sub_region_helper']).count().reset_index()[["country_region_code", "sub_region_helper"]].values.tolist()
for region_code, sub_region_name in group_region:
if sub_region_name and (type(sub_region_name) is str) and len(sub_region_name):
try:
if sub_region_name.lower() not in list(subdivisions[region_code]):
continue
df.loc[(df["country_region_code"] == region_code) & (df["sub_region_helper"] == sub_region_name), "ISO_3166_2"] = subdivisions[region_code][sub_region_name.lower()]
except KeyError:
continue
# df = df.apply(lambda row: resolve_iso3166_2(row), axis="columns")
df = df.drop(columns=['sub_region_helper'])
column_map = {
"sub_region_1": "PROVINCE_STATE",
"country_region_code": "ISO_3166_1",
"grocery_and_pharmacy_percent_change_from_baseline": "grocery_and_pharmacy_change_perc",
"parks_percent_change_from_baseline": "parks_change_perc",
"residential_percent_change_from_baseline": "residential_change_perc",
"retail_and_recreation_percent_change_from_baseline": "retail_and_recreation_change_perc",
"transit_stations_percent_change_from_baseline": "transit_stations_change_perc",
"workplaces_percent_change_from_baseline": "workplaces_change_perc"
}
df = df.rename(columns=column_map)
df.dtypes
df.to_csv(output_folder + "GOOG_GLOBAL_MOBILITY_REPORT.csv", index=False, columns=[
"country_region",
"PROVINCE_STATE",
"ISO_3166_1",
"ISO_3166_2",
"date",
"grocery_and_pharmacy_change_perc",
"parks_change_perc",
"residential_change_perc",
"retail_and_recreation_change_perc",
"transit_stations_change_perc",
"workplaces_change_perc",
"Last_Update_Date",
"Last_Reported_Flag",
"sub_region_2"
])
| 0.261897 | 0.218659 |
<small><small><i>
All the IPython Notebooks in this lecture series by Dr. Milan Parmar are available @ **[GitHub](https://github.com/milaan9/04_Python_Functions/tree/main/002_Python_Functions_Built_in)**
</i></small></small>
# Python `sorted()`
The **`sorted()`** function returns a sorted list from the items in an iterable.
The **`sorted()`** function sorts the elements of a given iterable in a specific order (either **ascending** or **descending**) and returns the sorted iterable as a list.
**Syntax**:
```python
sorted(iterable, key=None, reverse=False)
```
## `sorted()` Parameters
**`sorted()`** can take a maximum of three parameters:
* **iterable** - A sequence (**[string](https://github.com/milaan9/02_Python_Datatypes/blob/main/002_Python_String.ipynb)**, **[tuple](https://github.com/milaan9/02_Python_Datatypes/blob/main/004_Python_Tuple.ipynb)**, **[list](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List.ipynb)**) or collection (**[set](https://github.com/milaan9/02_Python_Datatypes/blob/main/006_Python_Sets.ipynb)**, **[dictionary](https://github.com/milaan9/02_Python_Datatypes/blob/main/005_Python_Dictionary.ipynb)**, **[frozen set](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/024_Python_frozenset%28%29.ipynb)**) or any other iterator.
* **reverse (Optional)** - If **`True`**, the sorted list is reversed (or sorted in descending order). Defaults to **`False`** if not provided.
* **key (Optional)** - A function that serves as a key for the sort comparison. Defaults to **`None`**.
```
# Example 1: Sort string, list, and tuple
# vowels list
py_list = ['e', 'a', 'u', 'o', 'i']
print(sorted(py_list))
# string
py_string = 'Python'
print(sorted(py_string))
# vowels tuple
py_tuple = ('e', 'a', 'u', 'o', 'i')
print(sorted(py_tuple))
```
Notice that in all cases that a sorted list is returned.
>**Note:** A list also has the **[sort()](https://github.com/milaan9/02_Python_Datatypes/blob/main/003_Python_List_Methods/009_Python_List_sort%28%29.ipynb)** method which performs the same way as **`sorted()`**. The only difference is that the **`sort()`** method doesn't return any value and changes the original list.
```
# Example 2: Sort in descending order
# The sorted() function accepts a reverse parameter as an optional argument.
#Setting reverse = True sorts the iterable in the descending order.
# set
py_set = {'e', 'a', 'u', 'o', 'i'}
print(sorted(py_set, reverse=True))
# dictionary
py_dict = {'e': 1, 'a': 2, 'u': 3, 'o': 4, 'i': 5}
print(sorted(py_dict, reverse=True))
# frozen set
frozen_set = frozenset(('e', 'a', 'u', 'o', 'i'))
print(sorted(frozen_set, reverse=True))
```
### key Parameter in Python `sorted()` function
If you want your own implementation for sorting, **`sorted()`** also accepts a **`key`** function as an optional parameter.
Based on the returned value of the key function, you can sort the given iterable.
```python
sorted(iterable, key=len)
```
Here, **`len()`** is Python's in-built function to count the length of an object.
The list is sorted based on the length of the element, from the lowest count to highest.
```
# Example 3: Sort the list using sorted() having a key function
# take the second element for sort
def take_second(elem):
return elem[1]
# random list
random = [(2, 2), (3, 4), (4, 1), (1, 3)]
# sort list with key
sorted_list = sorted(random, key=take_second)
# print list
print('Sorted list:', sorted_list)
# Example 4: Sorting with multiple keys
# Let us suppose that we have the following list:
# Nested list of student's info in a Science Olympiad
# List elements: (Student's Name, Marks out of 100, Age)
participant_list = [
('Alison', 50, 18),
('Terence', 75, 12),
('David', 75, 20),
('Jimmy', 90, 22),
('John', 45, 12)
]
```
We want to sort the list in such a way that the student with the highest marks is in the beginning. In case the students have equal marks, they must be sorted so that the younger participant comes first.
We can achieve this type of sorting with multiple keys by returning tuple instead of a number.
Two tuples can be compared by comparing their elements starting from first. If there is a tie (elements are equal), the second element is compared, and so on.
```
(1,3) > (1, 4)
(1, 4) < (2,2)
(1, 4, 1) < (2, 1)
# Let's use this logic to build our sorting logic.
# Nested list of student's info in a Science Olympiad
# List elements: (Student's Name, Marks out of 100 , Age)
participant_list = [
('Alison', 50, 18),
('Terence', 75, 12),
('David', 75, 20),
('Jimmy', 90, 22),
('John', 45, 12)
]
def sorter(item):
# Since highest marks first, least error = most marks
error = 100 - item[1]
age = item[2]
return (error, age)
sorted_list = sorted(participant_list, key=sorter)
print(sorted_list)
```
Since the sorting logic function is small and fits in one line, **`lambda`** function is used inside **`key`** rather than passing a separate function name.
The above program can be written using the **`lambda`** function in the following way:
```
# Nested list of student's info in a Science Olympiad
# List elements: (Student's Name, Marks out of 100 , Age)
participant_list = [
('Alison', 50, 18),
('Terence', 75, 12),
('David', 75, 20),
('Jimmy', 90, 22),
('John', 45, 12)
]
sorted_list = sorted(participant_list, key=lambda item: (100-item[1], item[2]))
print(sorted_list)
```
To learn more about **`lambda`** functions, visit **[Python Lambda](https://github.com/milaan9/04_Python_Functions/blob/main/006_Python_Function_Anonymous.ipynb)** Functions.
|
github_jupyter
|
sorted(iterable, key=None, reverse=False)
# Example 1: Sort string, list, and tuple
# vowels list
py_list = ['e', 'a', 'u', 'o', 'i']
print(sorted(py_list))
# string
py_string = 'Python'
print(sorted(py_string))
# vowels tuple
py_tuple = ('e', 'a', 'u', 'o', 'i')
print(sorted(py_tuple))
# Example 2: Sort in descending order
# The sorted() function accepts a reverse parameter as an optional argument.
#Setting reverse = True sorts the iterable in the descending order.
# set
py_set = {'e', 'a', 'u', 'o', 'i'}
print(sorted(py_set, reverse=True))
# dictionary
py_dict = {'e': 1, 'a': 2, 'u': 3, 'o': 4, 'i': 5}
print(sorted(py_dict, reverse=True))
# frozen set
frozen_set = frozenset(('e', 'a', 'u', 'o', 'i'))
print(sorted(frozen_set, reverse=True))
sorted(iterable, key=len)
# Example 3: Sort the list using sorted() having a key function
# take the second element for sort
def take_second(elem):
return elem[1]
# random list
random = [(2, 2), (3, 4), (4, 1), (1, 3)]
# sort list with key
sorted_list = sorted(random, key=take_second)
# print list
print('Sorted list:', sorted_list)
# Example 4: Sorting with multiple keys
# Let us suppose that we have the following list:
# Nested list of student's info in a Science Olympiad
# List elements: (Student's Name, Marks out of 100, Age)
participant_list = [
('Alison', 50, 18),
('Terence', 75, 12),
('David', 75, 20),
('Jimmy', 90, 22),
('John', 45, 12)
]
(1,3) > (1, 4)
(1, 4) < (2,2)
(1, 4, 1) < (2, 1)
# Let's use this logic to build our sorting logic.
# Nested list of student's info in a Science Olympiad
# List elements: (Student's Name, Marks out of 100 , Age)
participant_list = [
('Alison', 50, 18),
('Terence', 75, 12),
('David', 75, 20),
('Jimmy', 90, 22),
('John', 45, 12)
]
def sorter(item):
# Since highest marks first, least error = most marks
error = 100 - item[1]
age = item[2]
return (error, age)
sorted_list = sorted(participant_list, key=sorter)
print(sorted_list)
# Nested list of student's info in a Science Olympiad
# List elements: (Student's Name, Marks out of 100 , Age)
participant_list = [
('Alison', 50, 18),
('Terence', 75, 12),
('David', 75, 20),
('Jimmy', 90, 22),
('John', 45, 12)
]
sorted_list = sorted(participant_list, key=lambda item: (100-item[1], item[2]))
print(sorted_list)
| 0.591959 | 0.962778 |
```
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Zonotope Containment"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pypolycontain as pp\n",
"import time\n",
"import pydrake.solvers.mathematicalprogram as MP\n",
"# use Gurobi solver\n",
"import pydrake.solvers.gurobi as Gurobi_drake\n",
"gurobi_solver=Gurobi_drake.GurobiSolver()"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"def alpha_necessary_old(Z_i,Z_o):\n",
" program=MP.MathematicalProgram()\n",
" zeta=program.NewContinuousVariables(Z_o.G.shape[1],2**Z_i.G.shape[1],\"zeta\")\n",
" beta=program.NewContinuousVariables(1,\"beta\")\n",
" V=pp.vcube(Z_i.G.shape[1])\n",
" for i in range(V.shape[0]):\n",
" program.AddLinearEqualityConstraint( Aeq=Z_o.G, beq=np.dot(Z_i.G,V.T)[:,i], vars= zeta[:,i] )\n",
" program.AddLinearConstraint( np.less_equal(zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearConstraint( np.less_equal(-zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearCost(np.eye(1),np.zeros((1)),beta)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=1/result.GetSolution(beta)[0]\n",
" return alpha\n",
" else:\n",
" print(\"optimization failed\")\n",
" \n",
"def alpha_necessary_older(Z_i,Z_o):\n",
" alpha_min=np.inf\n",
" V=pp.vcube(Z_i.G.shape[1])\n",
" B=np.dot(Z_i.G,V.T)\n",
" for i in range(V.shape[0]):\n",
" program=MP.MathematicalProgram()\n",
" zeta=program.NewContinuousVariables(Z_o.G.shape[1],\"zeta\")\n",
" beta=program.NewContinuousVariables(1,\"beta\")\n",
" program.AddLinearEqualityConstraint( Aeq=Z_o.G, beq=B[:,i:i+1], vars= zeta )\n",
"# for j in range(zeta.shape[0]):\n",
"# program.AddLinearConstraint( zeta[j]-beta, -np.inf, 0 )\n",
"# program.AddLinearConstraint( -zeta[j]+ beta, -np.inf, 0 )\n",
" program.AddLinearConstraint( np.less_equal(zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearConstraint( np.less_equal(-zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearCost(np.eye(1),np.zeros((1)),beta)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=1/result.GetSolution(beta)[0]\n",
" alpha_min=min(alpha,alpha_min)\n",
" else:\n",
" print(\"optimization failed\")\n",
" return alpha_min\n",
"\n",
"def alpha_necessary(Z_i,Z_o):\n",
" alpha_min=np.inf\n",
" V=pp.vcube(Z_i.G.shape[1])\n",
" B=np.dot(Z_i.G,V.T)\n",
" for i in range(V.shape[0]):\n",
" program=MP.MathematicalProgram()\n",
" zeta=program.NewContinuousVariables(Z_o.G.shape[1],\"zeta\")\n",
" alpha=program.NewContinuousVariables(1,\"alpha\")\n",
" Aeq=np.hstack(( Z_o.G, -B[:,i:i+1]))\n",
" program.AddLinearEqualityConstraint( Aeq=np.hstack(( Z_o.G, -B[:,i:i+1])), \\\n",
" beq=np.zeros((Z_o.G.shape[0])),\\\n",
" vars= np.hstack((zeta,alpha)) )\n",
" program.AddBoundingBoxConstraint(-1,1,zeta)\n",
" program.AddLinearCost(-np.eye(1),np.zeros((1)),alpha)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=result.GetSolution(alpha)[0]\n",
" alpha_min=min(alpha,alpha_min)\n",
" else:\n",
" print(\"optimization failed\")\n",
" return alpha_min\n",
" \n",
" \n",
"def alpha_sufficient(Z_i,Z_o):\n",
" program=MP.MathematicalProgram()\n",
" beta=program.NewContinuousVariables(1,\"beta\")\n",
" circumbody=pp.to_AH_polytope(Z_o)\n",
" parametric_circumbody=circumbody.copy()\n",
" parametric_circumbody.P.h=circumbody.P.h*beta\n",
" Theta,*_=pp.subset(program,Z_i,parametric_circumbody,k=-1)\n",
" program.AddLinearCost(np.eye(1),np.zeros((1)),beta)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=1/result.GetSolution(beta)[0]\n",
" return alpha\n",
" else:\n",
" print(\"optimization failed\")"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"==================== \n",
" n= 3\n",
"3 5 0\n",
"3 5 1\n",
"3 5 2\n",
"3 5 3\n",
"3 5 4\n",
"3 5 5\n",
"3 5 6\n",
"3 5 7\n",
"3 5 8\n",
"3 5 9\n",
"3 5 10\n",
"3 5 11\n",
"3 5 12\n",
"3 5 13\n",
"3 5 14\n",
"3 5 15\n",
"3 5 16\n",
"3 5 17\n",
"3 5 18\n",
"3 5 19\n",
"3 5 20\n",
"3 5 21\n",
"3 5 22\n",
"3 5 23\n",
"3 5 24\n",
"3 5 25\n",
"3 5 26\n",
"3 5 27\n",
"3 5 28\n",
"3 5 29\n",
"3 5 30\n",
"3 5 31\n",
"3 5 32\n",
"3 5 33\n",
"3 5 34\n",
"3 5 35\n",
"3 5 36\n",
"3 5 37\n",
"3 5 38\n",
"3 5 39\n",
"3 5 40\n",
"3 5 41\n",
"3 5 42\n",
"3 5 43\n",
"3 5 44\n",
"3 5 45\n",
"3 5 46\n",
"3 5 47\n",
"3 5 48\n",
"3 5 49\n",
"3 5 50\n",
"3 5 51\n",
"3 5 52\n",
"3 5 53\n",
"3 5 54\n",
"3 5 55\n",
"3 5 56\n",
"3 5 57\n",
"3 5 58\n",
"3 5 59\n",
"3 5 60\n",
"3 5 61\n",
"3 5 62\n",
"3 5 63\n",
"3 5 64\n",
"3 5 65\n",
"3 5 66\n",
"3 5 67\n",
"3 5 68\n",
"3 5 69\n",
"3 5 70\n",
"3 5 71\n",
"3 5 72\n",
"3 5 73\n",
"3 5 74\n",
"3 5 75\n",
"3 5 76\n",
"3 5 77\n",
"3 5 78\n",
"3 5 79\n",
"3 5 80\n",
"3 5 81\n",
"3 5 82\n",
"3 5 83\n",
"3 5 84\n",
"3 5 85\n",
"3 5 86\n",
"3 5 87\n",
"3 5 88\n",
"3 5 89\n",
"3 5 90\n",
"3 5 91\n",
"3 5 92\n",
"3 5 93\n",
"3 5 94\n",
"3 5 95\n",
"3 5 96\n",
"3 5 97\n",
"3 5 98\n",
"3 5 99\n",
"3 5 100\n",
"3 5 101\n",
"3 5 102\n",
"3 5 103\n",
"3 5 104\n",
"3 5 105\n",
"3 5 106\n",
"3 5 107\n",
"3 5 108\n",
"3 5 109\n",
"3 5 110\n",
"3 5 111\n",
"3 5 112\n",
"3 5 113\n",
"3 5 114\n",
"3 5 115\n",
"3 5 116\n",
"3 5 117\n",
"3 5 118\n",
"3 5 119\n",
"3 5 120\n",
"3 5 121\n",
"3 5 122\n",
"3 5 123\n",
"3 5 124\n",
"3 5 125\n",
"3 5 126\n",
"3 5 127\n",
"3 5 128\n",
"3 5 129\n",
"3 5 130\n",
"3 5 131\n",
"3 5 132\n",
"3 5 133\n",
"3 5 134\n",
"3 5 135\n",
"3 5 136\n",
"3 5 137\n",
"3 5 138\n",
"3 5 139\n",
"3 5 140\n",
"3 5 141\n",
"3 5 142\n",
"3 5 143\n",
"3 5 144\n",
"3 5 145\n",
"3 5 146\n",
"3 5 147\n",
"3 5 148\n",
"3 5 149\n",
"3 5 150\n",
"3 5 151\n",
"3 5 152\n",
"3 5 153\n",
"3 5 154\n",
"3 5 155\n",
"3 5 156\n",
"3 5 157\n",
"3 5 158\n",
"3 5 159\n",
"3 5 160\n",
"3 5 161\n",
"3 5 162\n",
"3 5 163\n",
"3 5 164\n",
"3 5 165\n",
"3 5 166\n",
"3 5 167\n",
"3 5 168\n",
"3 5 169\n",
"3 5 170\n",
"3 5 171\n",
"3 5 172\n",
"3 5 173\n",
"3 5 174\n",
"3 5 175\n",
"3 5 176\n",
"3 5 177\n",
"3 5 178\n",
"3 5 179\n",
"3 5 180\n",
"3 5 181\n",
"3 5 182\n",
"3 5 183\n",
"3 5 184\n",
"3 5 185\n",
"3 5 186\n",
"3 5 187\n",
"3 5 188\n",
"3 5 189\n",
"3 5 190\n",
"3 5 191\n",
"3 5 192\n",
"3 5 193\n",
"3 5 194\n",
"3 5 195\n",
"3 5 196\n",
"3 5 197\n",
"3 5 198\n",
"3 5 199\n",
"3 5 200\n",
"3 5 201\n",
"3 5 202\n",
"3 5 203\n",
"3 5 204\n",
"3 5 205\n",
"3 5 206\n",
"3 5 207\n",
"3 5 208\n",
"3 5 209\n",
"3 5 210\n",
"3 5 211\n",
"3 5 212\n",
"3 5 213\n",
"3 5 214\n",
"3 5 215\n",
"3 5 216\n",
"3 5 217\n",
"3 5 218\n",
"3 5 219\n",
"3 5 220\n",
"3 5 221\n",
"3 5 222\n",
"3 5 223\n",
"3 5 224\n",
"3 5 225\n",
"3 5 226\n",
"3 5 227\n",
"3 5 228\n",
"3 5 229\n",
"3 5 230\n",
"3 5 231\n",
"3 5 232\n",
"3 5 233\n",
"3 5 234\n",
"3 5 235\n",
"3 5 236\n",
"3 5 237\n",
"3 5 238\n",
"3 5 239\n",
"3 5 240\n",
"3 5 241\n",
"3 5 242\n",
"3 5 243\n",
"3 5 244\n",
"3 5 245\n",
"3 5 246\n",
"3 5 247\n",
"3 5 248\n",
"3 5 249\n",
"3 5 250\n",
"3 5 251\n",
"3 5 252\n",
"3 5 253\n",
"3 5 254\n",
"3 5 255\n",
"3 5 256\n",
"3 5 257\n",
"3 5 258\n",
"3 5 259\n",
"3 5 260\n",
"3 5 261\n",
"3 5 262\n",
"3 5 263\n",
"3 5 264\n",
"3 5 265\n",
"3 5 266\n",
"3 5 267\n",
"3 5 268\n",
"3 5 269\n",
"3 5 270\n",
"3 5 271\n",
"3 5 272\n",
"3 5 273\n",
"3 5 274\n",
"3 5 275\n",
"3 5 276\n",
"3 5 277\n",
"3 5 278\n",
"3 5 279\n",
"3 5 280\n",
"3 5 281\n",
"3 5 282\n",
"3 5 283\n",
"3 5 284\n",
"3 5 285\n",
"3 5 286\n",
"3 5 287\n",
"3 5 288\n",
"3 5 289\n",
"3 5 290\n",
"3 5 291\n",
"3 5 292\n",
"3 5 293\n",
"3 5 294\n",
"3 5 295\n",
"3 5 296\n",
"3 5 297\n",
"3 5 298\n",
"3 5 299\n",
"3 5 300\n",
"3 5 301\n",
"3 5 302\n",
"3 5 303\n",
"3 5 304\n",
"3 5 305\n",
"3 5 306\n",
"3 5 307\n",
"3 5 308\n",
"3 5 309\n",
"3 5 310\n",
"3 5 311\n",
"3 5 312\n",
"3 5 313\n",
"3 5 314\n",
"3 5 315\n",
"3 5 316\n",
"3 5 317\n",
"3 5 318\n",
"3 5 319\n",
"3 5 320\n",
"3 5 321\n",
"3 5 322\n",
"3 5 323\n",
"3 5 324\n",
"3 5 325\n",
"3 5 326\n",
"3 5 327\n",
"3 5 328\n",
"3 5 329\n",
"3 5 330\n",
"3 5 331\n",
"3 5 332\n",
"3 5 333\n",
"3 5 334\n",
"3 5 335\n",
"3 5 336\n",
"3 5 337\n",
"3 5 338\n",
"3 5 339\n",
"3 5 340\n",
"3 5 341\n",
"3 5 342\n",
"3 5 343\n",
"3 5 344\n",
"3 5 345\n",
"3 5 346\n",
"3 5 347\n",
"3 5 348\n",
"3 5 349\n",
"3 5 350\n",
"3 5 351\n",
"3 5 352\n",
"3 5 353\n",
"3 5 354\n",
"3 5 355\n",
"3 5 356\n",
"3 5 357\n",
"3 5 358\n",
"3 5 359\n",
"3 5 360\n",
"3 5 361\n",
"3 5 362\n",
"3 5 363\n",
"3 5 364\n",
"3 5 365\n",
"3 5 366\n",
"3 5 367\n",
"3 5 368\n",
"3 5 369\n",
"3 5 370\n",
"3 5 371\n",
"3 5 372\n",
"3 5 373\n",
"3 5 374\n",
"3 5 375\n",
"3 5 376\n",
"3 5 377\n",
"3 5 378\n",
"3 5 379\n",
"3 5 380\n",
"3 5 381\n",
"3 5 382\n",
"3 5 383\n",
"3 5 384\n",
"3 5 385\n",
"3 5 386\n",
"3 5 387\n",
"3 5 388\n",
"3 5 389\n",
"3 5 390\n",
"3 5 391\n",
"3 5 392\n",
"3 5 393\n",
"3 5 394\n",
"3 5 395\n",
"3 5 396\n",
"3 5 397\n",
"3 5 398\n",
"3 5 399\n",
"3 5 400\n",
"3 5 401\n",
"3 5 402\n",
"3 5 403\n",
"3 5 404\n",
"3 5 405\n",
"3 5 406\n",
"3 5 407\n",
"3 5 408\n",
"3 5 409\n",
"3 5 410\n",
"3 5 411\n",
"3 5 412\n",
"3 5 413\n",
"3 5 414\n",
"3 5 415\n",
"3 5 416\n",
"3 5 417\n",
"3 5 418\n",
"3 5 419\n",
"3 5 420\n",
"3 5 421\n",
"3 5 422\n",
"3 5 423\n",
"3 5 424\n",
"3 5 425\n",
"3 5 426\n",
"3 5 427\n",
"3 5 428\n",
"3 5 429\n",
"3 5 430\n",
"3 5 431\n",
"3 5 432\n",
"3 5 433\n",
"3 5 434\n",
"3 5 435\n",
"3 5 436\n",
"3 5 437\n",
"3 5 438\n",
"3 5 439\n",
"3 5 440\n",
"3 5 441\n",
"3 5 442\n",
"3 5 443\n",
"3 5 444\n",
"3 5 445\n",
"3 5 446\n",
"3 5 447\n",
"3 5 448\n",
"3 5 449\n",
"3 5 450\n",
"3 5 451\n",
"3 5 452\n",
"3 5 453\n",
"3 5 454\n",
"3 5 455\n",
"3 5 456\n",
"3 5 457\n",
"3 5 458\n",
"3 5 459\n",
"3 5 460\n",
"3 5 461\n",
"3 5 462\n",
"3 5 463\n",
"3 5 464\n",
"3 5 465\n",
"3 5 466\n",
"3 5 467\n",
"3 5 468\n",
"3 5 469\n",
"3 5 470\n",
"3 5 471\n",
"3 5 472\n",
"3 5 473\n",
"3 5 474\n",
"3 5 475\n",
"3 5 476\n",
"3 5 477\n",
"3 5 478\n",
"3 5 479\n",
"3 5 480\n",
"3 5 481\n",
"3 5 482\n",
"3 5 483\n",
"3 5 484\n",
"3 5 485\n",
"3 5 486\n",
"3 5 487\n",
"3 5 488\n",
"3 5 489\n",
"3 5 490\n",
"3 5 491\n",
"3 5 492\n",
"3 5 493\n",
"3 5 494\n",
"3 5 495\n",
"3 5 496\n",
"3 5 497\n",
"3 5 498\n",
"3 5 499\n",
"3 10 0\n",
"3 10 1\n",
"3 10 2\n",
"3 10 3\n",
"3 10 4\n",
"3 10 5\n",
"3 10 6\n",
"3 10 7\n",
"3 10 8\n",
"3 10 9\n",
"3 10 10\n",
"3 10 11\n",
"3 10 12\n",
"3 10 13\n",
"3 10 14\n",
"3 10 15\n",
"3 10 16\n",
"3 10 17\n",
"3 10 18\n",
"3 10 19\n",
"3 10 20\n",
"3 10 21\n",
"3 10 22\n",
"3 10 23\n",
"3 10 24\n",
"3 10 25\n",
"3 10 26\n",
"3 10 27\n",
"3 10 28\n",
"3 10 29\n",
"3 10 30\n",
"3 10 31\n",
"3 10 32\n",
"3 10 33\n",
"3 10 34\n",
"3 10 35\n",
"3 10 36\n",
"3 10 37\n",
"3 10 38\n",
"3 10 39\n",
"3 10 40\n",
"3 10 41\n",
"3 10 42\n",
"3 10 43\n",
"3 10 44\n",
"3 10 45\n",
"3 10 46\n",
"3 10 47\n",
"3 10 48\n",
"3 10 49\n",
"3 10 50\n",
"3 10 51\n",
"3 10 52\n",
"3 10 53\n",
"3 10 54\n",
"3 10 55\n",
"3 10 56\n",
"3 10 57\n",
"3 10 58\n",
"3 10 59\n",
"3 10 60\n",
"3 10 61\n",
"3 10 62\n",
"3 10 63\n",
"3 10 64\n",
"3 10 65\n",
"3 10 66\n",
"3 10 67\n",
"3 10 68\n",
"3 10 69\n",
"3 10 70\n",
"3 10 71\n",
"3 10 72\n",
"3 10 73\n",
"3 10 74\n",
"3 10 75\n",
"3 10 76\n",
"3 10 77\n",
"3 10 78\n",
"3 10 79\n",
"3 10 80\n",
"3 10 81\n",
"3 10 82\n",
"3 10 83\n",
"3 10 84\n",
"3 10 85\n",
"3 10 86\n",
"3 10 87\n",
"3 10 88\n",
"3 10 89\n",
"3 10 90\n",
"3 10 91\n",
"3 10 92\n",
"3 10 93\n",
"3 10 94\n",
"3 10 95\n",
"3 10 96\n",
"3 10 97\n",
"3 10 98\n",
"3 10 99\n",
"3 10 100\n",
"3 10 101\n",
"3 10 102\n",
"3 10 103\n",
"3 10 104\n",
"3 10 105\n",
"3 10 106\n",
"3 10 107\n",
"3 10 108\n",
"3 10 109\n",
"3 10 110\n",
"3 10 111\n",
"3 10 112\n",
"3 10 113\n",
"3 10 114\n",
"3 10 115\n",
"3 10 116\n",
"3 10 117\n",
"3 10 118\n",
"3 10 119\n",
"3 10 120\n",
"3 10 121\n",
"3 10 122\n",
"3 10 123\n",
"3 10 124\n",
"3 10 125\n",
"3 10 126\n",
"3 10 127\n",
"3 10 128\n",
"3 10 129\n",
"3 10 130\n",
"3 10 131\n",
"3 10 132\n",
"3 10 133\n",
"3 10 134\n",
"3 10 135\n",
"3 10 136\n",
"3 10 137\n",
"3 10 138\n",
"3 10 139\n",
"3 10 140\n",
"3 10 141\n",
"3 10 142\n",
"3 10 143\n",
"3 10 144\n",
"3 10 145\n",
"3 10 146\n",
"3 10 147\n",
"3 10 148\n",
"3 10 149\n",
"3 10 150\n",
"3 10 151\n",
"3 10 152\n",
"3 10 153\n",
"3 10 154\n",
"3 10 155\n",
"3 10 156\n",
"3 10 157\n",
"3 10 158\n",
"3 10 159\n",
"3 10 160\n",
"3 10 161\n",
"3 10 162\n",
"3 10 163\n",
"3 10 164\n",
"3 10 165\n",
"3 10 166\n",
"3 10 167\n",
"3 10 168\n",
"3 10 169\n",
"3 10 170\n",
"3 10 171\n",
"3 10 172\n",
"3 10 173\n",
"3 10 174\n",
"3 10 175\n",
"3 10 176\n",
"3 10 177\n",
"3 10 178\n",
"3 10 179\n",
"3 10 180\n",
"3 10 181\n",
"3 10 182\n",
"3 10 183\n",
"3 10 184\n",
"3 10 185\n",
"3 10 186\n",
"3 10 187\n",
"3 10 188\n",
"3 10 189\n",
"3 10 190\n",
"3 10 191\n",
"3 10 192\n",
"3 10 193\n",
"3 10 194\n",
"3 10 195\n",
"3 10 196\n",
"3 10 197\n",
"3 10 198\n",
"3 10 199\n",
"3 10 200\n",
"3 10 201\n",
"3 10 202\n",
"3 10 203\n",
"3 10 204\n",
"3 10 205\n",
"3 10 206\n",
"3 10 207\n",
"3 10 208\n",
"3 10 209\n",
"3 10 210\n",
"3 10 211\n",
"3 10 212\n",
"3 10 213\n",
"3 10 214\n",
"3 10 215\n",
"3 10 216\n",
"3 10 217\n",
"3 10 218\n",
"3 10 219\n",
"3 10 220\n",
"3 10 221\n",
"3 10 222\n",
"3 10 223\n",
"3 10 224\n",
"3 10 225\n",
"3 10 226\n",
"3 10 227\n",
"3 10 228\n",
"3 10 229\n",
"3 10 230\n",
"3 10 231\n",
"3 10 232\n",
"3 10 233\n",
"3 10 234\n",
"3 10 235\n",
"3 10 236\n",
"3 10 237\n",
"3 10 238\n",
"3 10 239\n",
"3 10 240\n",
"3 10 241\n",
"3 10 242\n",
"3 10 243\n",
"3 10 244\n",
"3 10 245\n",
"3 10 246\n",
"3 10 247\n",
"3 10 248\n",
"3 10 249\n",
"3 10 250\n",
"3 10 251\n",
"3 10 252\n",
"3 10 253\n",
"3 10 254\n",
"3 10 255\n",
"3 10 256\n",
"3 10 257\n",
"3 10 258\n",
"3 10 259\n",
"3 10 260\n",
"3 10 261\n",
"3 10 262\n",
"3 10 263\n",
"3 10 264\n",
"3 10 265\n",
"3 10 266\n",
"3 10 267\n",
"3 10 268\n",
"3 10 269\n",
"3 10 270\n",
"3 10 271\n",
"3 10 272\n",
"3 10 273\n",
"3 10 274\n",
"3 10 275\n",
"3 10 276\n",
"3 10 277\n",
"3 10 278\n",
"3 10 279\n",
"3 10 280\n",
"3 10 281\n",
"3 10 282\n",
"3 10 283\n",
"3 10 284\n",
"3 10 285\n",
"3 10 286\n",
"3 10 287\n",
"3 10 288\n",
"3 10 289\n",
"3 10 290\n",
"3 10 291\n",
"3 10 292\n",
"3 10 293\n",
"3 10 294\n",
"3 10 295\n",
"3 10 296\n",
"3 10 297\n",
"3 10 298\n",
"3 10 299\n",
"3 10 300\n",
"3 10 301\n",
"3 10 302\n",
"3 10 303\n",
"3 10 304\n",
"3 10 305\n",
"3 10 306\n",
"3 10 307\n",
"3 10 308\n",
"3 10 309\n",
"3 10 310\n",
"3 10 311\n",
"3 10 312\n",
"3 10 313\n",
"3 10 314\n",
"3 10 315\n",
"3 10 316\n",
"3 10 317\n",
"3 10 318\n",
"3 10 319\n",
"3 10 320\n",
"3 10 321\n",
"3 10 322\n",
"3 10 323\n",
"3 10 324\n",
"3 10 325\n",
"3 10 326\n",
"3 10 327\n",
"3 10 328\n",
"3 10 329\n",
"3 10 330\n",
"3 10 331\n",
"3 10 332\n",
"3 10 333\n",
"3 10 334\n",
"3 10 335\n",
"3 10 336\n",
"3 10 337\n",
"3 10 338\n",
"3 10 339\n",
"3 10 340\n",
"3 10 341\n",
"3 10 342\n",
"3 10 343\n",
"3 10 344\n",
"3 10 345\n",
"3 10 346\n",
"3 10 347\n",
"3 10 348\n",
"3 10 349\n",
"3 10 350\n",
"3 10 351\n",
"3 10 352\n",
"3 10 353\n",
"3 10 354\n",
"3 10 355\n",
"3 10 356\n",
"3 10 357\n",
"3 10 358\n",
"3 10 359\n",
"3 10 360\n",
"3 10 361\n",
"3 10 362\n",
"3 10 363\n",
"3 10 364\n",
"3 10 365\n",
"3 10 366\n",
"3 10 367\n",
"3 10 368\n",
"3 10 369\n",
"3 10 370\n",
"3 10 371\n",
"3 10 372\n",
"3 10 373\n",
"3 10 374\n",
"3 10 375\n",
"3 10 376\n",
"3 10 377\n",
"3 10 378\n",
"3 10 379\n",
"3 10 380\n",
"3 10 381\n",
"3 10 382\n",
"3 10 383\n",
"3 10 384\n",
"3 10 385\n",
"3 10 386\n",
"3 10 387\n",
"3 10 388\n",
"3 10 389\n",
"3 10 390\n",
"3 10 391\n",
"3 10 392\n",
"3 10 393\n",
"3 10 394\n",
"3 10 395\n",
"3 10 396\n",
"3 10 397\n",
"3 10 398\n",
"3 10 399\n",
"3 10 400\n",
"3 10 401\n",
"3 10 402\n",
"3 10 403\n",
"3 10 404\n",
"3 10 405\n",
"3 10 406\n",
"3 10 407\n",
"3 10 408\n",
"3 10 409\n",
"3 10 410\n",
"3 10 411\n",
"3 10 412\n",
"3 10 413\n",
"3 10 414\n",
"3 10 415\n",
"3 10 416\n",
"3 10 417\n",
"3 10 418\n",
"3 10 419\n",
"3 10 420\n",
"3 10 421\n",
"3 10 422\n",
"3 10 423\n",
"3 10 424\n",
"3 10 425\n",
"3 10 426\n",
"3 10 427\n",
"3 10 428\n",
"3 10 429\n",
"3 10 430\n",
"3 10 431\n",
"3 10 432\n",
"3 10 433\n",
"3 10 434\n",
"3 10 435\n",
"3 10 436\n",
"3 10 437\n",
"3 10 438\n",
"3 10 439\n",
"3 10 440\n",
"3 10 441\n",
"3 10 442\n",
"3 10 443\n",
"3 10 444\n",
"3 10 445\n",
"3 10 446\n",
"3 10 447\n",
"3 10 448\n",
"3 10 449\n",
"3 10 450\n",
"3 10 451\n",
"3 10 452\n",
"3 10 453\n",
"3 10 454\n",
"3 10 455\n",
"3 10 456\n",
"3 10 457\n",
"3 10 458\n",
"3 10 459\n",
"3 10 460\n",
"3 10 461\n",
"3 10 462\n",
"3 10 463\n",
"3 10 464\n",
"3 10 465\n",
"3 10 466\n",
"3 10 467\n",
"3 10 468\n",
"3 10 469\n",
"3 10 470\n",
"3 10 471\n",
"3 10 472\n",
"3 10 473\n",
"3 10 474\n",
"3 10 475\n",
"3 10 476\n",
"3 10 477\n",
"3 10 478\n",
"3 10 479\n",
"3 10 480\n",
"3 10 481\n",
"3 10 482\n",
"3 10 483\n",
"3 10 484\n",
"3 10 485\n",
"3 10 486\n",
"3 10 487\n",
"3 10 488\n",
"3 10 489\n",
"3 10 490\n",
"3 10 491\n",
"3 10 492\n",
"3 10 493\n",
"3 10 494\n",
"3 10 495\n",
"3 10 496\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 10 497\n",
"3 10 498\n",
"3 10 499\n",
"3 15 0\n",
"3 15 1\n",
"3 15 2\n",
"3 15 3\n",
"3 15 4\n",
"3 15 5\n",
"3 15 6\n",
"3 15 7\n",
"3 15 8\n",
"3 15 9\n",
"3 15 10\n",
"3 15 11\n",
"3 15 12\n",
"3 15 13\n",
"3 15 14\n",
"3 15 15\n",
"3 15 16\n",
"3 15 17\n",
"3 15 18\n",
"3 15 19\n",
"3 15 20\n",
"3 15 21\n",
"3 15 22\n",
"3 15 23\n",
"3 15 24\n",
"3 15 25\n",
"3 15 26\n",
"3 15 27\n",
"3 15 28\n",
"3 15 29\n",
"3 15 30\n",
"3 15 31\n",
"3 15 32\n",
"3 15 33\n",
"3 15 34\n",
"3 15 35\n",
"3 15 36\n",
"3 15 37\n",
"3 15 38\n",
"3 15 39\n",
"3 15 40\n",
"3 15 41\n",
"3 15 42\n",
"3 15 43\n",
"3 15 44\n",
"3 15 45\n",
"3 15 46\n",
"3 15 47\n",
"3 15 48\n",
"3 15 49\n",
"3 15 50\n",
"3 15 51\n",
"3 15 52\n",
"3 15 53\n",
"3 15 54\n",
"3 15 55\n",
"3 15 56\n",
"3 15 57\n",
"3 15 58\n",
"3 15 59\n",
"3 15 60\n",
"3 15 61\n",
"3 15 62\n",
"3 15 63\n",
"3 15 64\n",
"3 15 65\n",
"3 15 66\n",
"3 15 67\n",
"3 15 68\n",
"3 15 69\n",
"3 15 70\n",
"3 15 71\n",
"3 15 72\n",
"3 15 73\n",
"3 15 74\n",
"3 15 75\n",
"3 15 76\n",
"3 15 77\n",
"3 15 78\n",
"3 15 79\n",
"3 15 80\n",
"3 15 81\n",
"3 15 82\n",
"3 15 83\n",
"3 15 84\n",
"3 15 85\n",
"3 15 86\n",
"3 15 87\n",
"3 15 88\n",
"3 15 89\n",
"3 15 90\n",
"3 15 91\n",
"3 15 92\n",
"3 15 93\n",
"3 15 94\n",
"3 15 95\n",
"3 15 96\n",
"3 15 97\n",
"3 15 98\n",
"3 15 99\n",
"3 15 100\n",
"3 15 101\n",
"3 15 102\n",
"3 15 103\n",
"3 15 104\n",
"3 15 105\n",
"3 15 106\n",
"3 15 107\n",
"3 15 108\n",
"3 15 109\n",
"3 15 110\n",
"3 15 111\n",
"3 15 112\n",
"3 15 113\n",
"3 15 114\n",
"3 15 115\n",
"3 15 116\n",
"3 15 117\n",
"3 15 118\n",
"3 15 119\n",
"3 15 120\n",
"3 15 121\n",
"3 15 122\n",
"3 15 123\n",
"3 15 124\n",
"3 15 125\n",
"3 15 126\n",
"3 15 127\n",
"3 15 128\n",
"3 15 129\n",
"3 15 130\n",
"3 15 131\n",
"3 15 132\n",
"3 15 133\n",
"3 15 134\n",
"3 15 135\n",
"3 15 136\n",
"3 15 137\n",
"3 15 138\n",
"3 15 139\n",
"3 15 140\n",
"3 15 141\n",
"3 15 142\n",
"3 15 143\n",
"3 15 144\n",
"3 15 145\n",
"3 15 146\n",
"3 15 147\n",
"3 15 148\n",
"3 15 149\n",
"3 15 150\n",
"3 15 151\n",
"3 15 152\n",
"3 15 153\n",
"3 15 154\n",
"3 15 155\n",
"3 15 156\n",
"3 15 157\n",
"3 15 158\n",
"3 15 159\n",
"3 15 160\n",
"3 15 161\n",
"3 15 162\n",
"3 15 163\n",
"3 15 164\n",
"3 15 165\n",
"3 15 166\n",
"3 15 167\n",
"3 15 168\n",
"3 15 169\n",
"3 15 170\n",
"3 15 171\n",
"3 15 172\n",
"3 15 173\n",
"3 15 174\n",
"3 15 175\n",
"3 15 176\n",
"3 15 177\n",
"3 15 178\n",
"3 15 179\n",
"3 15 180\n",
"3 15 181\n",
"3 15 182\n",
"3 15 183\n",
"3 15 184\n",
"3 15 185\n",
"3 15 186\n",
"3 15 187\n",
"3 15 188\n",
"3 15 189\n",
"3 15 190\n",
"3 15 191\n",
"3 15 192\n",
"3 15 193\n",
"3 15 194\n",
"3 15 195\n",
"3 15 196\n",
"3 15 197\n",
"3 15 198\n",
"3 15 199\n",
"3 15 200\n",
"3 15 201\n",
"3 15 202\n",
"3 15 203\n",
"3 15 204\n",
"3 15 205\n",
"3 15 206\n",
"3 15 207\n",
"3 15 208\n",
"3 15 209\n",
"3 15 210\n",
"3 15 211\n",
"3 15 212\n",
"3 15 213\n",
"3 15 214\n",
"3 15 215\n",
"3 15 216\n",
"3 15 217\n",
"3 15 218\n",
"3 15 219\n",
"3 15 220\n",
"3 15 221\n",
"3 15 222\n",
"3 15 223\n",
"3 15 224\n",
"3 15 225\n",
"3 15 226\n",
"3 15 227\n",
"3 15 228\n",
"3 15 229\n",
"3 15 230\n",
"3 15 231\n",
"3 15 232\n",
"3 15 233\n",
"3 15 234\n",
"3 15 235\n",
"3 15 236\n",
"3 15 237\n",
"3 15 238\n",
"3 15 239\n",
"3 15 240\n",
"3 15 241\n",
"3 15 242\n",
"3 15 243\n",
"3 15 244\n",
"3 15 245\n",
"3 15 246\n",
"3 15 247\n",
"3 15 248\n",
"3 15 249\n",
"3 15 250\n",
"3 15 251\n",
"3 15 252\n",
"3 15 253\n",
"3 15 254\n",
"3 15 255\n",
"3 15 256\n",
"3 15 257\n",
"3 15 258\n",
"3 15 259\n",
"3 15 260\n",
"3 15 261\n",
"3 15 262\n",
"3 15 263\n",
"3 15 264\n",
"3 15 265\n",
"3 15 266\n",
"3 15 267\n",
"3 15 268\n",
"3 15 269\n",
"3 15 270\n",
"3 15 271\n",
"3 15 272\n",
"3 15 273\n",
"3 15 274\n",
"3 15 275\n",
"3 15 276\n",
"3 15 277\n",
"3 15 278\n",
"3 15 279\n",
"3 15 280\n",
"3 15 281\n",
"3 15 282\n",
"3 15 283\n",
"3 15 284\n",
"3 15 285\n",
"3 15 286\n",
"3 15 287\n",
"3 15 288\n",
"3 15 289\n",
"3 15 290\n",
"3 15 291\n",
"3 15 292\n",
"3 15 293\n",
"3 15 294\n",
"3 15 295\n",
"3 15 296\n",
"3 15 297\n",
"3 15 298\n",
"3 15 299\n",
"3 15 300\n",
"3 15 301\n",
"3 15 302\n",
"3 15 303\n",
"3 15 304\n",
"3 15 305\n",
"3 15 306\n",
"3 15 307\n",
"3 15 308\n",
"3 15 309\n",
"3 15 310\n",
"3 15 311\n",
"3 15 312\n",
"3 15 313\n",
"3 15 314\n",
"3 15 315\n",
"3 15 316\n",
"3 15 317\n",
"3 15 318\n",
"3 15 319\n",
"3 15 320\n",
"3 15 321\n",
"3 15 322\n",
"3 15 323\n",
"3 15 324\n",
"3 15 325\n",
"3 15 326\n",
"3 15 327\n",
"3 15 328\n",
"3 15 329\n",
"3 15 330\n",
"3 15 331\n",
"3 15 332\n",
"3 15 333\n",
"3 15 334\n",
"3 15 335\n",
"3 15 336\n",
"3 15 337\n",
"3 15 338\n",
"3 15 339\n",
"3 15 340\n",
"3 15 341\n",
"3 15 342\n",
"3 15 343\n",
"3 15 344\n",
"3 15 345\n",
"3 15 346\n",
"3 15 347\n",
"3 15 348\n",
"3 15 349\n",
"3 15 350\n",
"3 15 351\n",
"3 15 352\n",
"3 15 353\n",
"3 15 354\n",
"3 15 355\n",
"3 15 356\n",
"3 15 357\n",
"3 15 358\n",
"3 15 359\n",
"3 15 360\n",
"3 15 361\n",
"3 15 362\n",
"3 15 363\n",
"3 15 364\n",
"3 15 365\n",
"3 15 366\n",
"3 15 367\n",
"3 15 368\n",
"3 15 369\n",
"3 15 370\n",
"3 15 371\n",
"3 15 372\n",
"3 15 373\n",
"3 15 374\n",
"3 15 375\n",
"3 15 376\n",
"3 15 377\n",
"3 15 378\n",
"3 15 379\n",
"3 15 380\n",
"3 15 381\n",
"3 15 382\n",
"3 15 383\n",
"3 15 384\n",
"3 15 385\n",
"3 15 386\n",
"3 15 387\n",
"3 15 388\n",
"3 15 389\n",
"3 15 390\n",
"3 15 391\n",
"3 15 392\n",
"3 15 393\n",
"3 15 394\n",
"3 15 395\n",
"3 15 396\n",
"3 15 397\n",
"3 15 398\n",
"3 15 399\n",
"3 15 400\n",
"3 15 401\n",
"3 15 402\n",
"3 15 403\n",
"3 15 404\n",
"3 15 405\n",
"3 15 406\n",
"3 15 407\n",
"3 15 408\n",
"3 15 409\n",
"3 15 410\n",
"3 15 411\n",
"3 15 412\n",
"3 15 413\n",
"3 15 414\n",
"3 15 415\n",
"3 15 416\n",
"3 15 417\n",
"3 15 418\n",
"3 15 419\n",
"3 15 420\n",
"3 15 421\n",
"3 15 422\n",
"3 15 423\n",
"3 15 424\n",
"3 15 425\n",
"3 15 426\n",
"3 15 427\n",
"3 15 428\n",
"3 15 429\n",
"3 15 430\n",
"3 15 431\n",
"3 15 432\n",
"3 15 433\n",
"3 15 434\n",
"3 15 435\n",
"3 15 436\n",
"3 15 437\n",
"3 15 438\n",
"3 15 439\n",
"3 15 440\n",
"3 15 441\n",
"3 15 442\n",
"3 15 443\n",
"3 15 444\n",
"3 15 445\n",
"3 15 446\n",
"3 15 447\n",
"3 15 448\n",
"3 15 449\n",
"3 15 450\n",
"3 15 451\n",
"3 15 452\n",
"3 15 453\n",
"3 15 454\n",
"3 15 455\n",
"3 15 456\n",
"3 15 457\n",
"3 15 458\n",
"3 15 459\n",
"3 15 460\n",
"3 15 461\n",
"3 15 462\n",
"3 15 463\n",
"3 15 464\n",
"3 15 465\n",
"3 15 466\n",
"3 15 467\n",
"3 15 468\n",
"3 15 469\n",
"3 15 470\n",
"3 15 471\n",
"3 15 472\n",
"3 15 473\n",
"3 15 474\n",
"3 15 475\n",
"3 15 476\n",
"3 15 477\n",
"3 15 478\n",
"3 15 479\n",
"3 15 480\n",
"3 15 481\n",
"3 15 482\n",
"3 15 483\n",
"3 15 484\n",
"3 15 485\n",
"3 15 486\n",
"3 15 487\n",
"3 15 488\n",
"3 15 489\n",
"3 15 490\n",
"3 15 491\n",
"3 15 492\n",
"3 15 493\n",
"3 15 494\n",
"3 15 495\n",
"3 15 496\n",
"3 15 497\n",
"3 15 498\n",
"3 15 499\n",
"3 20 0\n",
"3 20 1\n",
"3 20 2\n",
"3 20 3\n",
"3 20 4\n",
"3 20 5\n",
"3 20 6\n",
"3 20 7\n",
"3 20 8\n",
"3 20 9\n",
"3 20 10\n",
"3 20 11\n",
"3 20 12\n",
"3 20 13\n",
"3 20 14\n",
"3 20 15\n",
"3 20 16\n",
"3 20 17\n",
"3 20 18\n",
"3 20 19\n",
"3 20 20\n",
"3 20 21\n",
"3 20 22\n",
"3 20 23\n",
"3 20 24\n",
"3 20 25\n",
"3 20 26\n",
"3 20 27\n",
"3 20 28\n",
"3 20 29\n",
"3 20 30\n",
"3 20 31\n",
"3 20 32\n",
"3 20 33\n",
"3 20 34\n",
"3 20 35\n",
"3 20 36\n",
"3 20 37\n",
"3 20 38\n",
"3 20 39\n",
"3 20 40\n",
"3 20 41\n",
"3 20 42\n",
"3 20 43\n",
"3 20 44\n",
"3 20 45\n",
"3 20 46\n",
"3 20 47\n",
"3 20 48\n",
"3 20 49\n",
"3 20 50\n",
"3 20 51\n",
"3 20 52\n",
"3 20 53\n",
"3 20 54\n",
"3 20 55\n",
"3 20 56\n",
"3 20 57\n",
"3 20 58\n",
"3 20 59\n",
"3 20 60\n",
"3 20 61\n",
"3 20 62\n",
"3 20 63\n",
"3 20 64\n",
"3 20 65\n",
"3 20 66\n",
"3 20 67\n",
"3 20 68\n",
"3 20 69\n",
"3 20 70\n",
"3 20 71\n",
"3 20 72\n",
"3 20 73\n",
"3 20 74\n",
"3 20 75\n",
"3 20 76\n",
"3 20 77\n",
"3 20 78\n",
"3 20 79\n",
"3 20 80\n",
"3 20 81\n",
"3 20 82\n",
"3 20 83\n",
"3 20 84\n",
"3 20 85\n",
"3 20 86\n",
"3 20 87\n",
"3 20 88\n",
"3 20 89\n",
"3 20 90\n",
"3 20 91\n",
"3 20 92\n",
"3 20 93\n",
"3 20 94\n",
"3 20 95\n",
"3 20 96\n",
"3 20 97\n",
"3 20 98\n",
"3 20 99\n",
"3 20 100\n",
"3 20 101\n",
"3 20 102\n",
"3 20 103\n",
"3 20 104\n",
"3 20 105\n",
"3 20 106\n",
"3 20 107\n",
"3 20 108\n",
"3 20 109\n",
"3 20 110\n",
"3 20 111\n",
"3 20 112\n",
"3 20 113\n",
"3 20 114\n",
"3 20 115\n",
"3 20 116\n",
"3 20 117\n",
"3 20 118\n",
"3 20 119\n",
"3 20 120\n",
"3 20 121\n",
"3 20 122\n",
"3 20 123\n",
"3 20 124\n",
"3 20 125\n",
"3 20 126\n",
"3 20 127\n",
"3 20 128\n",
"3 20 129\n",
"3 20 130\n",
"3 20 131\n",
"3 20 132\n",
"3 20 133\n",
"3 20 134\n",
"3 20 135\n",
"3 20 136\n",
"3 20 137\n",
"3 20 138\n",
"3 20 139\n",
"3 20 140\n",
"3 20 141\n",
"3 20 142\n",
"3 20 143\n",
"3 20 144\n",
"3 20 145\n",
"3 20 146\n",
"3 20 147\n",
"3 20 148\n",
"3 20 149\n",
"3 20 150\n",
"3 20 151\n",
"3 20 152\n",
"3 20 153\n",
"3 20 154\n",
"3 20 155\n",
"3 20 156\n",
"3 20 157\n",
"3 20 158\n",
"3 20 159\n",
"3 20 160\n",
"3 20 161\n",
"3 20 162\n",
"3 20 163\n",
"3 20 164\n",
"3 20 165\n",
"3 20 166\n",
"3 20 167\n",
"3 20 168\n",
"3 20 169\n",
"3 20 170\n",
"3 20 171\n",
"3 20 172\n",
"3 20 173\n",
"3 20 174\n",
"3 20 175\n",
"3 20 176\n",
"3 20 177\n",
"3 20 178\n",
"3 20 179\n",
"3 20 180\n",
"3 20 181\n",
"3 20 182\n",
"3 20 183\n",
"3 20 184\n",
"3 20 185\n",
"3 20 186\n",
"3 20 187\n",
"3 20 188\n",
"3 20 189\n",
"3 20 190\n",
"3 20 191\n",
"3 20 192\n",
"3 20 193\n",
"3 20 194\n",
"3 20 195\n",
"3 20 196\n",
"3 20 197\n",
"3 20 198\n",
"3 20 199\n",
"3 20 200\n",
"3 20 201\n",
"3 20 202\n",
"3 20 203\n",
"3 20 204\n",
"3 20 205\n",
"3 20 206\n",
"3 20 207\n",
"3 20 208\n",
"3 20 209\n",
"3 20 210\n",
"3 20 211\n",
"3 20 212\n",
"3 20 213\n",
"3 20 214\n",
"3 20 215\n",
"3 20 216\n",
"3 20 217\n",
"3 20 218\n",
"3 20 219\n",
"3 20 220\n",
"3 20 221\n",
"3 20 222\n",
"3 20 223\n",
"3 20 224\n",
"3 20 225\n",
"3 20 226\n",
"3 20 227\n",
"3 20 228\n",
"3 20 229\n",
"3 20 230\n",
"3 20 231\n",
"3 20 232\n",
"3 20 233\n",
"3 20 234\n",
"3 20 235\n",
"3 20 236\n",
"3 20 237\n",
"3 20 238\n",
"3 20 239\n",
"3 20 240\n",
"3 20 241\n",
"3 20 242\n",
"3 20 243\n",
"3 20 244\n",
"3 20 245\n",
"3 20 246\n",
"3 20 247\n",
"3 20 248\n",
"3 20 249\n",
"3 20 250\n",
"3 20 251\n",
"3 20 252\n",
"3 20 253\n",
"3 20 254\n",
"3 20 255\n",
"3 20 256\n",
"3 20 257\n",
"3 20 258\n",
"3 20 259\n",
"3 20 260\n",
"3 20 261\n",
"3 20 262\n",
"3 20 263\n",
"3 20 264\n",
"3 20 265\n",
"3 20 266\n",
"3 20 267\n",
"3 20 268\n",
"3 20 269\n",
"3 20 270\n",
"3 20 271\n",
"3 20 272\n",
"3 20 273\n",
"3 20 274\n",
"3 20 275\n",
"3 20 276\n",
"3 20 277\n",
"3 20 278\n",
"3 20 279\n",
"3 20 280\n",
"3 20 281\n",
"3 20 282\n",
"3 20 283\n",
"3 20 284\n",
"3 20 285\n",
"3 20 286\n",
"3 20 287\n",
"3 20 288\n",
"3 20 289\n",
"3 20 290\n",
"3 20 291\n",
"3 20 292\n",
"3 20 293\n",
"3 20 294\n",
"3 20 295\n",
"3 20 296\n",
"3 20 297\n",
"3 20 298\n",
"3 20 299\n",
"3 20 300\n",
"3 20 301\n",
"3 20 302\n",
"3 20 303\n",
"3 20 304\n",
"3 20 305\n",
"3 20 306\n",
"3 20 307\n",
"3 20 308\n",
"3 20 309\n",
"3 20 310\n",
"3 20 311\n",
"3 20 312\n",
"3 20 313\n",
"3 20 314\n",
"3 20 315\n",
"3 20 316\n",
"3 20 317\n",
"3 20 318\n",
"3 20 319\n",
"3 20 320\n",
"3 20 321\n",
"3 20 322\n",
"3 20 323\n",
"3 20 324\n",
"3 20 325\n",
"3 20 326\n",
"3 20 327\n",
"3 20 328\n",
"3 20 329\n",
"3 20 330\n",
"3 20 331\n",
"3 20 332\n",
"3 20 333\n",
"3 20 334\n",
"3 20 335\n",
"3 20 336\n",
"3 20 337\n",
"3 20 338\n",
"3 20 339\n",
"3 20 340\n",
"3 20 341\n",
"3 20 342\n",
"3 20 343\n",
"3 20 344\n",
"3 20 345\n",
"3 20 346\n",
"3 20 347\n",
"3 20 348\n",
"3 20 349\n",
"3 20 350\n",
"3 20 351\n",
"3 20 352\n",
"3 20 353\n",
"3 20 354\n",
"3 20 355\n",
"3 20 356\n",
"3 20 357\n",
"3 20 358\n",
"3 20 359\n",
"3 20 360\n",
"3 20 361\n",
"3 20 362\n",
"3 20 363\n",
"3 20 364\n",
"3 20 365\n",
"3 20 366\n",
"3 20 367\n",
"3 20 368\n",
"3 20 369\n",
"3 20 370\n",
"3 20 371\n",
"3 20 372\n",
"3 20 373\n",
"3 20 374\n",
"3 20 375\n",
"3 20 376\n",
"3 20 377\n",
"3 20 378\n",
"3 20 379\n",
"3 20 380\n",
"3 20 381\n",
"3 20 382\n",
"3 20 383\n",
"3 20 384\n",
"3 20 385\n",
"3 20 386\n",
"3 20 387\n",
"3 20 388\n",
"3 20 389\n",
"3 20 390\n",
"3 20 391\n",
"3 20 392\n",
"3 20 393\n",
"3 20 394\n",
"3 20 395\n",
"3 20 396\n",
"3 20 397\n",
"3 20 398\n",
"3 20 399\n",
"3 20 400\n",
"3 20 401\n",
"3 20 402\n",
"3 20 403\n",
"3 20 404\n",
"3 20 405\n",
"3 20 406\n",
"3 20 407\n",
"3 20 408\n",
"3 20 409\n",
"3 20 410\n",
"3 20 411\n",
"3 20 412\n",
"3 20 413\n",
"3 20 414\n",
"3 20 415\n",
"3 20 416\n",
"3 20 417\n",
"3 20 418\n",
"3 20 419\n",
"3 20 420\n",
"3 20 421\n",
"3 20 422\n",
"3 20 423\n",
"3 20 424\n",
"3 20 425\n",
"3 20 426\n",
"3 20 427\n",
"3 20 428\n",
"3 20 429\n",
"3 20 430\n",
"3 20 431\n",
"3 20 432\n",
"3 20 433\n",
"3 20 434\n",
"3 20 435\n",
"3 20 436\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 20 437\n",
"3 20 438\n",
"3 20 439\n",
"3 20 440\n",
"3 20 441\n",
"3 20 442\n",
"3 20 443\n",
"3 20 444\n",
"3 20 445\n",
"3 20 446\n",
"3 20 447\n",
"3 20 448\n",
"3 20 449\n",
"3 20 450\n",
"3 20 451\n",
"3 20 452\n",
"3 20 453\n",
"3 20 454\n",
"3 20 455\n",
"3 20 456\n",
"3 20 457\n",
"3 20 458\n",
"3 20 459\n",
"3 20 460\n",
"3 20 461\n",
"3 20 462\n",
"3 20 463\n",
"3 20 464\n",
"3 20 465\n",
"3 20 466\n",
"3 20 467\n",
"3 20 468\n",
"3 20 469\n",
"3 20 470\n",
"3 20 471\n",
"3 20 472\n",
"3 20 473\n",
"3 20 474\n",
"3 20 475\n",
"3 20 476\n",
"3 20 477\n",
"3 20 478\n",
"3 20 479\n",
"3 20 480\n",
"3 20 481\n",
"3 20 482\n",
"3 20 483\n",
"3 20 484\n",
"3 20 485\n",
"3 20 486\n",
"3 20 487\n",
"3 20 488\n",
"3 20 489\n",
"3 20 490\n",
"3 20 491\n",
"3 20 492\n",
"3 20 493\n",
"3 20 494\n",
"3 20 495\n",
"3 20 496\n",
"3 20 497\n",
"3 20 498\n",
"3 20 499\n",
"==================== \n",
" n= 5\n",
"5 10 0\n",
"5 10 1\n",
"5 10 2\n",
"5 10 3\n",
"5 10 4\n",
"5 10 5\n",
"5 10 6\n",
"5 10 7\n",
"5 10 8\n",
"5 10 9\n",
"5 10 10\n",
"5 10 11\n",
"5 10 12\n",
"5 10 13\n",
"5 10 14\n",
"5 10 15\n",
"5 10 16\n",
"5 10 17\n",
"5 10 18\n",
"5 10 19\n",
"5 10 20\n",
"5 10 21\n",
"5 10 22\n",
"5 10 23\n",
"5 10 24\n",
"5 10 25\n",
"5 10 26\n",
"5 10 27\n",
"5 10 28\n",
"5 10 29\n",
"5 10 30\n",
"5 10 31\n",
"5 10 32\n",
"5 10 33\n",
"5 10 34\n",
"5 10 35\n",
"5 10 36\n",
"5 10 37\n",
"5 10 38\n",
"5 10 39\n",
"5 10 40\n",
"5 10 41\n",
"5 10 42\n",
"5 10 43\n",
"5 10 44\n",
"5 10 45\n",
"5 10 46\n",
"5 10 47\n",
"5 10 48\n",
"5 10 49\n",
"5 10 50\n",
"5 10 51\n",
"5 10 52\n",
"5 10 53\n",
"5 10 54\n",
"5 10 55\n",
"5 10 56\n",
"5 10 57\n",
"5 10 58\n",
"5 10 59\n",
"5 10 60\n",
"5 10 61\n",
"5 10 62\n",
"5 10 63\n",
"5 10 64\n",
"5 10 65\n",
"5 10 66\n",
"5 10 67\n",
"5 10 68\n",
"5 10 69\n",
"5 10 70\n",
"5 10 71\n",
"5 10 72\n",
"5 10 73\n",
"5 10 74\n",
"5 10 75\n",
"5 10 76\n",
"5 10 77\n",
"5 10 78\n",
"5 10 79\n",
"5 10 80\n",
"5 10 81\n",
"5 10 82\n",
"5 10 83\n",
"5 10 84\n",
"5 10 85\n",
"5 10 86\n",
"5 10 87\n",
"5 10 88\n",
"5 10 89\n",
"5 10 90\n",
"5 10 91\n",
"5 10 92\n",
"5 10 93\n",
"5 10 94\n",
"5 10 95\n",
"5 10 96\n",
"5 10 97\n",
"5 10 98\n",
"5 10 99\n",
"5 10 100\n",
"5 10 101\n",
"5 10 102\n",
"5 10 103\n",
"5 10 104\n",
"5 10 105\n",
"5 10 106\n",
"5 10 107\n",
"5 10 108\n",
"5 10 109\n",
"5 10 110\n",
"5 10 111\n",
"5 10 112\n",
"5 10 113\n",
"5 10 114\n",
"5 10 115\n",
"5 10 116\n",
"5 10 117\n",
"5 10 118\n",
"5 10 119\n",
"5 10 120\n",
"5 10 121\n",
"5 10 122\n",
"5 10 123\n",
"5 10 124\n",
"5 10 125\n",
"5 10 126\n",
"5 10 127\n",
"5 10 128\n",
"5 10 129\n",
"5 10 130\n",
"5 10 131\n",
"5 10 132\n",
"5 10 133\n",
"5 10 134\n",
"5 10 135\n",
"5 10 136\n",
"5 10 137\n",
"5 10 138\n",
"5 10 139\n",
"5 10 140\n",
"5 10 141\n",
"5 10 142\n",
"5 10 143\n",
"5 10 144\n",
"5 10 145\n",
"5 10 146\n",
"5 10 147\n",
"5 10 148\n",
"5 10 149\n",
"5 10 150\n",
"5 10 151\n",
"5 10 152\n",
"5 10 153\n",
"5 10 154\n",
"5 10 155\n",
"5 10 156\n",
"5 10 157\n",
"5 10 158\n",
"5 10 159\n",
"5 10 160\n",
"5 10 161\n",
"5 10 162\n",
"5 10 163\n",
"5 10 164\n",
"5 10 165\n",
"5 10 166\n",
"5 10 167\n",
"5 10 168\n",
"5 10 169\n",
"5 10 170\n",
"5 10 171\n",
"5 10 172\n",
"5 10 173\n",
"5 10 174\n",
"5 10 175\n",
"5 10 176\n",
"5 10 177\n",
"5 10 178\n",
"5 10 179\n",
"5 10 180\n",
"5 10 181\n",
"5 10 182\n",
"5 10 183\n",
"5 10 184\n",
"5 10 185\n",
"5 10 186\n",
"5 10 187\n",
"5 10 188\n",
"5 10 189\n",
"5 10 190\n",
"5 10 191\n",
"5 10 192\n",
"5 10 193\n",
"5 10 194\n",
"5 10 195\n",
"5 10 196\n",
"5 10 197\n",
"5 10 198\n",
"5 10 199\n",
"5 10 200\n",
"5 10 201\n",
"5 10 202\n",
"5 10 203\n",
"5 10 204\n",
"5 10 205\n",
"5 10 206\n",
"5 10 207\n",
"5 10 208\n",
"5 10 209\n",
"5 10 210\n",
"5 10 211\n",
"5 10 212\n",
"5 10 213\n",
"5 10 214\n",
"5 10 215\n",
"5 10 216\n",
"5 10 217\n",
"5 10 218\n",
"5 10 219\n",
"5 10 220\n",
"5 10 221\n",
"5 10 222\n",
"5 10 223\n",
"5 10 224\n",
"5 10 225\n",
"5 10 226\n",
"5 10 227\n",
"5 10 228\n",
"5 10 229\n",
"5 10 230\n",
"5 10 231\n",
"5 10 232\n",
"5 10 233\n",
"5 10 234\n",
"5 10 235\n",
"5 10 236\n",
"5 10 237\n",
"5 10 238\n",
"5 10 239\n",
"5 10 240\n",
"5 10 241\n",
"5 10 242\n",
"5 10 243\n",
"5 10 244\n",
"5 10 245\n",
"5 10 246\n",
"5 10 247\n",
"5 10 248\n",
"5 10 249\n",
"5 10 250\n",
"5 10 251\n",
"5 10 252\n",
"5 10 253\n",
"5 10 254\n",
"5 10 255\n",
"5 10 256\n",
"5 10 257\n",
"5 10 258\n",
"5 10 259\n",
"5 10 260\n",
"5 10 261\n",
"5 10 262\n",
"5 10 263\n",
"5 10 264\n",
"5 10 265\n",
"5 10 266\n",
"5 10 267\n",
"5 10 268\n",
"5 10 269\n",
"5 10 270\n",
"5 10 271\n",
"5 10 272\n",
"5 10 273\n",
"5 10 274\n",
"5 10 275\n",
"5 10 276\n",
"5 10 277\n",
"5 10 278\n",
"5 10 279\n",
"5 10 280\n",
"5 10 281\n",
"5 10 282\n",
"5 10 283\n",
"5 10 284\n",
"5 10 285\n",
"5 10 286\n",
"5 10 287\n",
"5 10 288\n",
"5 10 289\n",
"5 10 290\n",
"5 10 291\n",
"5 10 292\n",
"5 10 293\n",
"5 10 294\n",
"5 10 295\n",
"5 10 296\n",
"5 10 297\n",
"5 10 298\n",
"5 10 299\n",
"5 10 300\n",
"5 10 301\n",
"5 10 302\n",
"5 10 303\n",
"5 10 304\n",
"5 10 305\n",
"5 10 306\n",
"5 10 307\n",
"5 10 308\n",
"5 10 309\n",
"5 10 310\n",
"5 10 311\n",
"5 10 312\n",
"5 10 313\n",
"5 10 314\n",
"5 10 315\n",
"5 10 316\n",
"5 10 317\n",
"5 10 318\n",
"5 10 319\n",
"5 10 320\n",
"5 10 321\n",
"5 10 322\n",
"5 10 323\n",
"5 10 324\n",
"5 10 325\n",
"5 10 326\n",
"5 10 327\n",
"5 10 328\n",
"5 10 329\n",
"5 10 330\n",
"5 10 331\n",
"5 10 332\n",
"5 10 333\n",
"5 10 334\n",
"5 10 335\n",
"5 10 336\n",
"5 10 337\n",
"5 10 338\n",
"5 10 339\n",
"5 10 340\n",
"5 10 341\n",
"5 10 342\n",
"5 10 343\n",
"5 10 344\n",
"5 10 345\n",
"5 10 346\n",
"5 10 347\n",
"5 10 348\n",
"5 10 349\n",
"5 10 350\n",
"5 10 351\n",
"5 10 352\n",
"5 10 353\n",
"5 10 354\n",
"5 10 355\n",
"5 10 356\n",
"5 10 357\n",
"5 10 358\n",
"5 10 359\n",
"5 10 360\n",
"5 10 361\n",
"5 10 362\n",
"5 10 363\n",
"5 10 364\n",
"5 10 365\n",
"5 10 366\n",
"5 10 367\n",
"5 10 368\n",
"5 10 369\n",
"5 10 370\n",
"5 10 371\n",
"5 10 372\n",
"5 10 373\n",
"5 10 374\n",
"5 10 375\n",
"5 10 376\n",
"5 10 377\n",
"5 10 378\n",
"5 10 379\n",
"5 10 380\n",
"5 10 381\n",
"5 10 382\n",
"5 10 383\n",
"5 10 384\n",
"5 10 385\n",
"5 10 386\n",
"5 10 387\n",
"5 10 388\n",
"5 10 389\n",
"5 10 390\n",
"5 10 391\n",
"5 10 392\n",
"5 10 393\n",
"5 10 394\n",
"5 10 395\n",
"5 10 396\n",
"5 10 397\n",
"5 10 398\n",
"5 10 399\n",
"5 10 400\n",
"5 10 401\n",
"5 10 402\n",
"5 10 403\n",
"5 10 404\n",
"5 10 405\n",
"5 10 406\n",
"5 10 407\n",
"5 10 408\n",
"5 10 409\n",
"5 10 410\n",
"5 10 411\n",
"5 10 412\n",
"5 10 413\n",
"5 10 414\n",
"5 10 415\n",
"5 10 416\n",
"5 10 417\n",
"5 10 418\n",
"5 10 419\n",
"5 10 420\n",
"5 10 421\n",
"5 10 422\n",
"5 10 423\n",
"5 10 424\n",
"5 10 425\n",
"5 10 426\n",
"5 10 427\n",
"5 10 428\n",
"5 10 429\n",
"5 10 430\n",
"5 10 431\n",
"5 10 432\n",
"5 10 433\n",
"5 10 434\n",
"5 10 435\n",
"5 10 436\n",
"5 10 437\n",
"5 10 438\n",
"5 10 439\n",
"5 10 440\n",
"5 10 441\n",
"5 10 442\n",
"5 10 443\n",
"5 10 444\n",
"5 10 445\n",
"5 10 446\n",
"5 10 447\n",
"5 10 448\n",
"5 10 449\n",
"5 10 450\n",
"5 10 451\n",
"5 10 452\n",
"5 10 453\n",
"5 10 454\n",
"5 10 455\n",
"5 10 456\n",
"5 10 457\n",
"5 10 458\n",
"5 10 459\n",
"5 10 460\n",
"5 10 461\n",
"5 10 462\n",
"5 10 463\n",
"5 10 464\n",
"5 10 465\n",
"5 10 466\n",
"5 10 467\n",
"5 10 468\n",
"5 10 469\n",
"5 10 470\n",
"5 10 471\n",
"5 10 472\n",
"5 10 473\n",
"5 10 474\n",
"5 10 475\n",
"5 10 476\n",
"5 10 477\n",
"5 10 478\n",
"5 10 479\n",
"5 10 480\n",
"5 10 481\n",
"5 10 482\n",
"5 10 483\n",
"5 10 484\n",
"5 10 485\n",
"5 10 486\n",
"5 10 487\n",
"5 10 488\n",
"5 10 489\n",
"5 10 490\n",
"5 10 491\n",
"5 10 492\n",
"5 10 493\n",
"5 10 494\n",
"5 10 495\n",
"5 10 496\n",
"5 10 497\n",
"5 10 498\n",
"5 10 499\n",
"5 15 0\n",
"5 15 1\n",
"5 15 2\n",
"5 15 3\n",
"5 15 4\n",
"5 15 5\n",
"5 15 6\n",
"5 15 7\n",
"5 15 8\n",
"5 15 9\n",
"5 15 10\n",
"5 15 11\n",
"5 15 12\n",
"5 15 13\n",
"5 15 14\n",
"5 15 15\n",
"5 15 16\n",
"5 15 17\n",
"5 15 18\n",
"5 15 19\n",
"5 15 20\n",
"5 15 21\n",
"5 15 22\n",
"5 15 23\n",
"5 15 24\n",
"5 15 25\n",
"5 15 26\n",
"5 15 27\n",
"5 15 28\n",
"5 15 29\n",
"5 15 30\n",
"5 15 31\n",
"5 15 32\n",
"5 15 33\n",
"5 15 34\n",
"5 15 35\n",
"5 15 36\n",
"5 15 37\n",
"5 15 38\n",
"5 15 39\n",
"5 15 40\n",
"5 15 41\n",
"5 15 42\n",
"5 15 43\n",
"5 15 44\n",
"5 15 45\n",
"5 15 46\n",
"5 15 47\n",
"5 15 48\n",
"5 15 49\n",
"5 15 50\n",
"5 15 51\n",
"5 15 52\n",
"5 15 53\n",
"5 15 54\n",
"5 15 55\n",
"5 15 56\n",
"5 15 57\n",
"5 15 58\n",
"5 15 59\n",
"5 15 60\n",
"5 15 61\n",
"5 15 62\n",
"5 15 63\n",
"5 15 64\n",
"5 15 65\n",
"5 15 66\n",
"5 15 67\n",
"5 15 68\n",
"5 15 69\n",
"5 15 70\n",
"5 15 71\n",
"5 15 72\n",
"5 15 73\n",
"5 15 74\n",
"5 15 75\n",
"5 15 76\n",
"5 15 77\n",
"5 15 78\n",
"5 15 79\n",
"5 15 80\n",
"5 15 81\n",
"5 15 82\n",
"5 15 83\n",
"5 15 84\n",
"5 15 85\n",
"5 15 86\n",
"5 15 87\n",
"5 15 88\n",
"5 15 89\n",
"5 15 90\n",
"5 15 91\n",
"5 15 92\n",
"5 15 93\n",
"5 15 94\n",
"5 15 95\n",
"5 15 96\n",
"5 15 97\n",
"5 15 98\n",
"5 15 99\n",
"5 15 100\n",
"5 15 101\n",
"5 15 102\n",
"5 15 103\n",
"5 15 104\n",
"5 15 105\n",
"5 15 106\n",
"5 15 107\n",
"5 15 108\n",
"5 15 109\n",
"5 15 110\n",
"5 15 111\n",
"5 15 112\n",
"5 15 113\n",
"5 15 114\n",
"5 15 115\n",
"5 15 116\n",
"5 15 117\n",
"5 15 118\n",
"5 15 119\n",
"5 15 120\n",
"5 15 121\n",
"5 15 122\n",
"5 15 123\n",
"5 15 124\n",
"5 15 125\n",
"5 15 126\n",
"5 15 127\n",
"5 15 128\n",
"5 15 129\n",
"5 15 130\n",
"5 15 131\n",
"5 15 132\n",
"5 15 133\n",
"5 15 134\n",
"5 15 135\n",
"5 15 136\n",
"5 15 137\n",
"5 15 138\n",
"5 15 139\n",
"5 15 140\n",
"5 15 141\n",
"5 15 142\n",
"5 15 143\n",
"5 15 144\n",
"5 15 145\n",
"5 15 146\n",
"5 15 147\n",
"5 15 148\n",
"5 15 149\n",
"5 15 150\n",
"5 15 151\n",
"5 15 152\n",
"5 15 153\n",
"5 15 154\n",
"5 15 155\n",
"5 15 156\n",
"5 15 157\n",
"5 15 158\n",
"5 15 159\n",
"5 15 160\n",
"5 15 161\n",
"5 15 162\n",
"5 15 163\n",
"5 15 164\n",
"5 15 165\n",
"5 15 166\n",
"5 15 167\n",
"5 15 168\n",
"5 15 169\n",
"5 15 170\n",
"5 15 171\n",
"5 15 172\n",
"5 15 173\n",
"5 15 174\n",
"5 15 175\n",
"5 15 176\n",
"5 15 177\n",
"5 15 178\n",
"5 15 179\n",
"5 15 180\n",
"5 15 181\n",
"5 15 182\n",
"5 15 183\n",
"5 15 184\n",
"5 15 185\n",
"5 15 186\n",
"5 15 187\n",
"5 15 188\n",
"5 15 189\n",
"5 15 190\n",
"5 15 191\n",
"5 15 192\n",
"5 15 193\n",
"5 15 194\n",
"5 15 195\n",
"5 15 196\n",
"5 15 197\n",
"5 15 198\n",
"5 15 199\n",
"5 15 200\n",
"5 15 201\n",
"5 15 202\n",
"5 15 203\n",
"5 15 204\n",
"5 15 205\n",
"5 15 206\n",
"5 15 207\n",
"5 15 208\n",
"5 15 209\n",
"5 15 210\n",
"5 15 211\n",
"5 15 212\n",
"5 15 213\n",
"5 15 214\n",
"5 15 215\n",
"5 15 216\n",
"5 15 217\n",
"5 15 218\n",
"5 15 219\n",
"5 15 220\n",
"5 15 221\n",
"5 15 222\n",
"5 15 223\n",
"5 15 224\n",
"5 15 225\n",
"5 15 226\n",
"5 15 227\n",
"5 15 228\n",
"5 15 229\n",
"5 15 230\n",
"5 15 231\n",
"5 15 232\n",
"5 15 233\n",
"5 15 234\n",
"5 15 235\n",
"5 15 236\n",
"5 15 237\n",
"5 15 238\n",
"5 15 239\n",
"5 15 240\n",
"5 15 241\n",
"5 15 242\n",
"5 15 243\n",
"5 15 244\n",
"5 15 245\n",
"5 15 246\n",
"5 15 247\n",
"5 15 248\n",
"5 15 249\n",
"5 15 250\n",
"5 15 251\n",
"5 15 252\n",
"5 15 253\n",
"5 15 254\n",
"5 15 255\n",
"5 15 256\n",
"5 15 257\n",
"5 15 258\n",
"5 15 259\n",
"5 15 260\n",
"5 15 261\n",
"5 15 262\n",
"5 15 263\n",
"5 15 264\n",
"5 15 265\n",
"5 15 266\n",
"5 15 267\n",
"5 15 268\n",
"5 15 269\n",
"5 15 270\n",
"5 15 271\n",
"5 15 272\n",
"5 15 273\n",
"5 15 274\n",
"5 15 275\n",
"5 15 276\n",
"5 15 277\n",
"5 15 278\n",
"5 15 279\n",
"5 15 280\n",
"5 15 281\n",
"5 15 282\n",
"5 15 283\n",
"5 15 284\n",
"5 15 285\n",
"5 15 286\n",
"5 15 287\n",
"5 15 288\n",
"5 15 289\n",
"5 15 290\n",
"5 15 291\n",
"5 15 292\n",
"5 15 293\n",
"5 15 294\n",
"5 15 295\n",
"5 15 296\n",
"5 15 297\n",
"5 15 298\n",
"5 15 299\n",
"5 15 300\n",
"5 15 301\n",
"5 15 302\n",
"5 15 303\n",
"5 15 304\n",
"5 15 305\n",
"5 15 306\n",
"5 15 307\n",
"5 15 308\n",
"5 15 309\n",
"5 15 310\n",
"5 15 311\n",
"5 15 312\n",
"5 15 313\n",
"5 15 314\n",
"5 15 315\n",
"5 15 316\n",
"5 15 317\n",
"5 15 318\n",
"5 15 319\n",
"5 15 320\n",
"5 15 321\n",
"5 15 322\n",
"5 15 323\n",
"5 15 324\n",
"5 15 325\n",
"5 15 326\n",
"5 15 327\n",
"5 15 328\n",
"5 15 329\n",
"5 15 330\n",
"5 15 331\n",
"5 15 332\n",
"5 15 333\n",
"5 15 334\n",
"5 15 335\n",
"5 15 336\n",
"5 15 337\n",
"5 15 338\n",
"5 15 339\n",
"5 15 340\n",
"5 15 341\n",
"5 15 342\n",
"5 15 343\n",
"5 15 344\n",
"5 15 345\n",
"5 15 346\n",
"5 15 347\n",
"5 15 348\n",
"5 15 349\n",
"5 15 350\n",
"5 15 351\n",
"5 15 352\n",
"5 15 353\n",
"5 15 354\n",
"5 15 355\n",
"5 15 356\n",
"5 15 357\n",
"5 15 358\n",
"5 15 359\n",
"5 15 360\n",
"5 15 361\n",
"5 15 362\n",
"5 15 363\n",
"5 15 364\n",
"5 15 365\n",
"5 15 366\n",
"5 15 367\n",
"5 15 368\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"5 15 369\n",
"5 15 370\n",
"5 15 371\n",
"5 15 372\n",
"5 15 373\n",
"5 15 374\n",
"5 15 375\n",
"5 15 376\n",
"5 15 377\n",
"5 15 378\n",
"5 15 379\n",
"5 15 380\n",
"5 15 381\n",
"5 15 382\n",
"5 15 383\n",
"5 15 384\n",
"5 15 385\n",
"5 15 386\n",
"5 15 387\n",
"5 15 388\n",
"5 15 389\n",
"5 15 390\n",
"5 15 391\n",
"5 15 392\n",
"5 15 393\n",
"5 15 394\n",
"5 15 395\n",
"5 15 396\n",
"5 15 397\n",
"5 15 398\n",
"5 15 399\n",
"5 15 400\n",
"5 15 401\n",
"5 15 402\n",
"5 15 403\n",
"5 15 404\n",
"5 15 405\n",
"5 15 406\n",
"5 15 407\n",
"5 15 408\n",
"5 15 409\n",
"5 15 410\n",
"5 15 411\n",
"5 15 412\n",
"5 15 413\n",
"5 15 414\n",
"5 15 415\n",
"5 15 416\n",
"5 15 417\n",
"5 15 418\n",
"5 15 419\n",
"5 15 420\n",
"5 15 421\n",
"5 15 422\n",
"5 15 423\n",
"5 15 424\n",
"5 15 425\n",
"5 15 426\n",
"5 15 427\n",
"5 15 428\n",
"5 15 429\n",
"5 15 430\n",
"5 15 431\n",
"5 15 432\n",
"5 15 433\n",
"5 15 434\n",
"5 15 435\n",
"5 15 436\n",
"5 15 437\n",
"5 15 438\n",
"5 15 439\n",
"5 15 440\n",
"5 15 441\n",
"5 15 442\n",
"5 15 443\n",
"5 15 444\n",
"5 15 445\n",
"5 15 446\n",
"5 15 447\n",
"5 15 448\n",
"5 15 449\n",
"5 15 450\n",
"5 15 451\n",
"5 15 452\n",
"5 15 453\n",
"5 15 454\n",
"5 15 455\n",
"5 15 456\n",
"5 15 457\n",
"5 15 458\n",
"5 15 459\n",
"5 15 460\n",
"5 15 461\n",
"5 15 462\n",
"5 15 463\n",
"5 15 464\n",
"5 15 465\n",
"5 15 466\n",
"5 15 467\n",
"5 15 468\n",
"5 15 469\n",
"5 15 470\n",
"5 15 471\n",
"5 15 472\n",
"5 15 473\n",
"5 15 474\n",
"5 15 475\n",
"5 15 476\n",
"5 15 477\n",
"5 15 478\n",
"5 15 479\n",
"5 15 480\n",
"5 15 481\n",
"5 15 482\n",
"5 15 483\n",
"5 15 484\n",
"5 15 485\n",
"5 15 486\n",
"5 15 487\n",
"5 15 488\n",
"5 15 489\n",
"5 15 490\n",
"5 15 491\n",
"5 15 492\n",
"5 15 493\n",
"5 15 494\n",
"5 15 495\n",
"5 15 496\n",
"5 15 497\n",
"5 15 498\n",
"5 15 499\n",
"5 20 0\n",
"5 20 1\n",
"5 20 2\n",
"5 20 3\n",
"5 20 4\n",
"5 20 5\n",
"5 20 6\n",
"5 20 7\n",
"5 20 8\n",
"5 20 9\n",
"5 20 10\n",
"5 20 11\n",
"5 20 12\n",
"5 20 13\n",
"5 20 14\n",
"5 20 15\n",
"5 20 16\n",
"5 20 17\n",
"5 20 18\n",
"5 20 19\n",
"5 20 20\n",
"5 20 21\n",
"5 20 22\n",
"5 20 23\n",
"5 20 24\n",
"5 20 25\n",
"5 20 26\n",
"5 20 27\n",
"5 20 28\n",
"5 20 29\n",
"5 20 30\n",
"5 20 31\n",
"5 20 32\n",
"5 20 33\n",
"5 20 34\n",
"5 20 35\n",
"5 20 36\n",
"5 20 37\n",
"5 20 38\n",
"5 20 39\n",
"5 20 40\n",
"5 20 41\n",
"5 20 42\n",
"5 20 43\n",
"5 20 44\n",
"5 20 45\n",
"5 20 46\n",
"5 20 47\n",
"5 20 48\n",
"5 20 49\n",
"5 20 50\n",
"5 20 51\n",
"5 20 52\n",
"5 20 53\n",
"5 20 54\n",
"5 20 55\n",
"5 20 56\n",
"5 20 57\n",
"5 20 58\n",
"5 20 59\n",
"5 20 60\n",
"5 20 61\n",
"5 20 62\n",
"5 20 63\n",
"5 20 64\n",
"5 20 65\n",
"5 20 66\n",
"5 20 67\n",
"5 20 68\n",
"5 20 69\n",
"5 20 70\n",
"5 20 71\n",
"5 20 72\n",
"5 20 73\n",
"5 20 74\n",
"5 20 75\n",
"5 20 76\n",
"5 20 77\n",
"5 20 78\n",
"5 20 79\n",
"5 20 80\n",
"5 20 81\n",
"5 20 82\n",
"5 20 83\n",
"5 20 84\n",
"5 20 85\n",
"5 20 86\n",
"5 20 87\n",
"5 20 88\n",
"5 20 89\n",
"5 20 90\n",
"5 20 91\n",
"5 20 92\n",
"5 20 93\n",
"5 20 94\n",
"5 20 95\n",
"5 20 96\n",
"5 20 97\n",
"5 20 98\n",
"5 20 99\n",
"5 20 100\n",
"5 20 101\n",
"5 20 102\n",
"5 20 103\n",
"5 20 104\n",
"5 20 105\n",
"5 20 106\n",
"5 20 107\n",
"5 20 108\n",
"5 20 109\n",
"5 20 110\n",
"5 20 111\n",
"5 20 112\n",
"5 20 113\n",
"5 20 114\n",
"5 20 115\n",
"5 20 116\n",
"5 20 117\n",
"5 20 118\n",
"5 20 119\n",
"5 20 120\n",
"5 20 121\n",
"5 20 122\n",
"5 20 123\n",
"5 20 124\n",
"5 20 125\n",
"5 20 126\n",
"5 20 127\n",
"5 20 128\n",
"5 20 129\n",
"5 20 130\n",
"5 20 131\n",
"5 20 132\n",
"5 20 133\n",
"5 20 134\n",
"5 20 135\n",
"5 20 136\n",
"5 20 137\n",
"5 20 138\n",
"5 20 139\n",
"5 20 140\n",
"5 20 141\n",
"5 20 142\n",
"5 20 143\n",
"5 20 144\n",
"5 20 145\n",
"5 20 146\n",
"5 20 147\n",
"5 20 148\n",
"5 20 149\n",
"5 20 150\n",
"5 20 151\n",
"5 20 152\n",
"5 20 153\n",
"5 20 154\n",
"5 20 155\n",
"5 20 156\n",
"5 20 157\n",
"5 20 158\n",
"5 20 159\n",
"5 20 160\n",
"5 20 161\n",
"5 20 162\n",
"5 20 163\n",
"5 20 164\n",
"5 20 165\n",
"5 20 166\n",
"5 20 167\n",
"5 20 168\n",
"5 20 169\n",
"5 20 170\n",
"5 20 171\n",
"5 20 172\n",
"5 20 173\n",
"5 20 174\n",
"5 20 175\n",
"5 20 176\n",
"5 20 177\n",
"5 20 178\n",
"5 20 179\n",
"5 20 180\n",
"5 20 181\n",
"5 20 182\n",
"5 20 183\n",
"5 20 184\n",
"5 20 185\n",
"5 20 186\n",
"5 20 187\n",
"5 20 188\n",
"5 20 189\n",
"5 20 190\n",
"5 20 191\n",
"5 20 192\n",
"5 20 193\n",
"5 20 194\n",
"5 20 195\n",
"5 20 196\n",
"5 20 197\n",
"5 20 198\n",
"5 20 199\n",
"5 20 200\n",
"5 20 201\n",
"5 20 202\n",
"5 20 203\n",
"5 20 204\n",
"5 20 205\n",
"5 20 206\n",
"5 20 207\n",
"5 20 208\n",
"5 20 209\n",
"5 20 210\n",
"5 20 211\n",
"5 20 212\n",
"5 20 213\n",
"5 20 214\n",
"5 20 215\n",
"5 20 216\n",
"5 20 217\n",
"5 20 218\n",
"5 20 219\n",
"5 20 220\n",
"5 20 221\n",
"5 20 222\n",
"5 20 223\n",
"5 20 224\n",
"5 20 225\n",
"5 20 226\n",
"5 20 227\n",
"5 20 228\n",
"5 20 229\n",
"5 20 230\n",
"5 20 231\n",
"5 20 232\n",
"5 20 233\n",
"5 20 234\n",
"5 20 235\n",
"5 20 236\n",
"5 20 237\n",
"5 20 238\n",
"5 20 239\n",
"5 20 240\n",
"5 20 241\n",
"5 20 242\n",
"5 20 243\n",
"5 20 244\n",
"5 20 245\n",
"5 20 246\n",
"5 20 247\n",
"5 20 248\n",
"5 20 249\n",
"5 20 250\n",
"5 20 251\n",
"5 20 252\n",
"5 20 253\n",
"5 20 254\n",
"5 20 255\n",
"5 20 256\n",
"5 20 257\n",
"5 20 258\n",
"5 20 259\n",
"5 20 260\n",
"5 20 261\n",
"5 20 262\n",
"5 20 263\n",
"5 20 264\n",
"5 20 265\n",
"5 20 266\n",
"5 20 267\n",
"5 20 268\n",
"5 20 269\n",
"5 20 270\n",
"5 20 271\n",
"5 20 272\n",
"5 20 273\n",
"5 20 274\n",
"5 20 275\n",
"5 20 276\n",
"5 20 277\n",
"5 20 278\n",
"5 20 279\n",
"5 20 280\n",
"5 20 281\n",
"5 20 282\n",
"5 20 283\n",
"5 20 284\n",
"5 20 285\n",
"5 20 286\n",
"5 20 287\n",
"5 20 288\n",
"5 20 289\n",
"5 20 290\n",
"5 20 291\n",
"5 20 292\n",
"5 20 293\n",
"5 20 294\n",
"5 20 295\n",
"5 20 296\n",
"5 20 297\n",
"5 20 298\n",
"5 20 299\n",
"5 20 300\n",
"5 20 301\n",
"5 20 302\n",
"5 20 303\n",
"5 20 304\n",
"5 20 305\n",
"5 20 306\n",
"5 20 307\n",
"5 20 308\n",
"5 20 309\n",
"5 20 310\n",
"5 20 311\n",
"5 20 312\n",
"5 20 313\n",
"5 20 314\n",
"5 20 315\n",
"5 20 316\n",
"5 20 317\n",
"5 20 318\n",
"5 20 319\n",
"5 20 320\n",
"5 20 321\n",
"5 20 322\n",
"5 20 323\n",
"5 20 324\n",
"5 20 325\n",
"5 20 326\n",
"5 20 327\n",
"5 20 328\n",
"5 20 329\n",
"5 20 330\n",
"5 20 331\n",
"5 20 332\n",
"5 20 333\n",
"5 20 334\n",
"5 20 335\n",
"5 20 336\n",
"5 20 337\n",
"5 20 338\n",
"5 20 339\n",
"5 20 340\n",
"5 20 341\n",
"5 20 342\n",
"5 20 343\n",
"5 20 344\n",
"5 20 345\n",
"5 20 346\n",
"5 20 347\n",
"5 20 348\n",
"5 20 349\n",
"5 20 350\n",
"5 20 351\n",
"5 20 352\n",
"5 20 353\n",
"5 20 354\n",
"5 20 355\n",
"5 20 356\n",
"5 20 357\n",
"5 20 358\n",
"5 20 359\n",
"5 20 360\n",
"5 20 361\n",
"5 20 362\n",
"5 20 363\n",
"5 20 364\n",
"5 20 365\n",
"5 20 366\n",
"5 20 367\n",
"5 20 368\n",
"5 20 369\n",
"5 20 370\n",
"5 20 371\n",
"5 20 372\n",
"5 20 373\n",
"5 20 374\n",
"5 20 375\n",
"5 20 376\n",
"5 20 377\n",
"5 20 378\n",
"5 20 379\n",
"5 20 380\n",
"5 20 381\n",
"5 20 382\n",
"5 20 383\n",
"5 20 384\n",
"5 20 385\n",
"5 20 386\n",
"5 20 387\n",
"5 20 388\n",
"5 20 389\n",
"5 20 390\n",
"5 20 391\n",
"5 20 392\n",
"5 20 393\n",
"5 20 394\n",
"5 20 395\n",
"5 20 396\n",
"5 20 397\n",
"5 20 398\n",
"5 20 399\n",
"5 20 400\n",
"5 20 401\n",
"5 20 402\n",
"5 20 403\n",
"5 20 404\n",
"5 20 405\n",
"5 20 406\n",
"5 20 407\n",
"5 20 408\n",
"5 20 409\n",
"5 20 410\n",
"5 20 411\n",
"5 20 412\n",
"5 20 413\n",
"5 20 414\n",
"5 20 415\n",
"5 20 416\n",
"5 20 417\n",
"5 20 418\n",
"5 20 419\n",
"5 20 420\n",
"5 20 421\n",
"5 20 422\n",
"5 20 423\n",
"5 20 424\n",
"5 20 425\n",
"5 20 426\n",
"5 20 427\n",
"5 20 428\n",
"5 20 429\n",
"5 20 430\n",
"5 20 431\n",
"5 20 432\n",
"5 20 433\n",
"5 20 434\n",
"5 20 435\n",
"5 20 436\n",
"5 20 437\n",
"5 20 438\n",
"5 20 439\n",
"5 20 440\n",
"5 20 441\n",
"5 20 442\n",
"5 20 443\n",
"5 20 444\n",
"5 20 445\n",
"5 20 446\n",
"5 20 447\n",
"5 20 448\n",
"5 20 449\n",
"5 20 450\n",
"5 20 451\n",
"5 20 452\n",
"5 20 453\n",
"5 20 454\n",
"5 20 455\n",
"5 20 456\n",
"5 20 457\n",
"5 20 458\n",
"5 20 459\n",
"5 20 460\n",
"5 20 461\n",
"5 20 462\n",
"5 20 463\n",
"5 20 464\n",
"5 20 465\n",
"5 20 466\n",
"5 20 467\n",
"5 20 468\n",
"5 20 469\n",
"5 20 470\n",
"5 20 471\n",
"5 20 472\n",
"5 20 473\n",
"5 20 474\n",
"5 20 475\n",
"5 20 476\n",
"5 20 477\n",
"5 20 478\n",
"5 20 479\n",
"5 20 480\n",
"5 20 481\n",
"5 20 482\n",
"5 20 483\n",
"5 20 484\n",
"5 20 485\n",
"5 20 486\n",
"5 20 487\n",
"5 20 488\n",
"5 20 489\n",
"5 20 490\n",
"5 20 491\n",
"5 20 492\n",
"5 20 493\n",
"5 20 494\n",
"5 20 495\n",
"5 20 496\n",
"5 20 497\n",
"5 20 498\n",
"5 20 499\n",
"==================== \n",
" n= 10\n",
"10 15 0\n",
"10 15 1\n",
"10 15 2\n",
"10 15 3\n",
"10 15 4\n",
"10 15 5\n",
"10 15 6\n",
"10 15 7\n",
"10 15 8\n",
"10 15 9\n",
"10 15 10\n",
"10 15 11\n",
"10 15 12\n",
"10 15 13\n",
"10 15 14\n",
"10 15 15\n",
"10 15 16\n",
"10 15 17\n",
"10 15 18\n",
"10 15 19\n",
"10 15 20\n",
"10 15 21\n",
"10 15 22\n",
"10 15 23\n",
"10 15 24\n",
"10 15 25\n",
"10 15 26\n",
"10 15 27\n",
"10 15 28\n",
"10 15 29\n",
"10 15 30\n",
"10 15 31\n",
"10 15 32\n",
"10 15 33\n",
"10 15 34\n",
"10 15 35\n",
"10 15 36\n",
"10 15 37\n",
"10 15 38\n",
"10 15 39\n",
"10 15 40\n",
"10 15 41\n",
"10 15 42\n",
"10 15 43\n",
"10 15 44\n",
"10 15 45\n",
"10 15 46\n",
"10 15 47\n",
"10 15 48\n",
"10 15 49\n",
"10 15 50\n",
"10 15 51\n",
"10 15 52\n",
"10 15 53\n",
"10 15 54\n",
"10 15 55\n",
"10 15 56\n",
"10 15 57\n",
"10 15 58\n",
"10 15 59\n",
"10 15 60\n",
"10 15 61\n",
"10 15 62\n",
"10 15 63\n",
"10 15 64\n",
"10 15 65\n",
"10 15 66\n",
"10 15 67\n",
"10 15 68\n",
"10 15 69\n",
"10 15 70\n",
"10 15 71\n",
"10 15 72\n",
"10 15 73\n",
"10 15 74\n",
"10 15 75\n",
"10 15 76\n",
"10 15 77\n",
"10 15 78\n",
"10 15 79\n",
"10 15 80\n",
"10 15 81\n",
"10 15 82\n",
"10 15 83\n",
"10 15 84\n",
"10 15 85\n",
"10 15 86\n",
"10 15 87\n",
"10 15 88\n",
"10 15 89\n",
"10 15 90\n",
"10 15 91\n",
"10 15 92\n",
"10 15 93\n",
"10 15 94\n",
"10 15 95\n",
"10 15 96\n",
"10 15 97\n",
"10 15 98\n",
"10 15 99\n",
"10 15 100\n",
"10 15 101\n",
"10 15 102\n",
"10 15 103\n",
"10 15 104\n",
"10 15 105\n",
"10 15 106\n",
"10 15 107\n",
"10 15 108\n",
"10 15 109\n",
"10 15 110\n",
"10 15 111\n",
"10 15 112\n",
"10 15 113\n",
"10 15 114\n",
"10 15 115\n",
"10 15 116\n",
"10 15 117\n",
"10 15 118\n",
"10 15 119\n",
"10 15 120\n",
"10 15 121\n",
"10 15 122\n",
"10 15 123\n",
"10 15 124\n",
"10 15 125\n",
"10 15 126\n",
"10 15 127\n",
"10 15 128\n",
"10 15 129\n",
"10 15 130\n",
"10 15 131\n",
"10 15 132\n",
"10 15 133\n",
"10 15 134\n",
"10 15 135\n",
"10 15 136\n",
"10 15 137\n",
"10 15 138\n",
"10 15 139\n",
"10 15 140\n",
"10 15 141\n",
"10 15 142\n",
"10 15 143\n",
"10 15 144\n",
"10 15 145\n",
"10 15 146\n",
"10 15 147\n",
"10 15 148\n",
"10 15 149\n",
"10 15 150\n",
"10 15 151\n",
"10 15 152\n",
"10 15 153\n",
"10 15 154\n",
"10 15 155\n",
"10 15 156\n",
"10 15 157\n",
"10 15 158\n",
"10 15 159\n",
"10 15 160\n",
"10 15 161\n",
"10 15 162\n",
"10 15 163\n",
"10 15 164\n",
"10 15 165\n",
"10 15 166\n",
"10 15 167\n",
"10 15 168\n",
"10 15 169\n",
"10 15 170\n",
"10 15 171\n",
"10 15 172\n",
"10 15 173\n",
"10 15 174\n",
"10 15 175\n",
"10 15 176\n",
"10 15 177\n",
"10 15 178\n",
"10 15 179\n",
"10 15 180\n",
"10 15 181\n",
"10 15 182\n",
"10 15 183\n",
"10 15 184\n",
"10 15 185\n",
"10 15 186\n",
"10 15 187\n",
"10 15 188\n",
"10 15 189\n",
"10 15 190\n",
"10 15 191\n",
"10 15 192\n",
"10 15 193\n",
"10 15 194\n",
"10 15 195\n",
"10 15 196\n",
"10 15 197\n",
"10 15 198\n",
"10 15 199\n",
"10 15 200\n",
"10 15 201\n",
"10 15 202\n",
"10 15 203\n",
"10 15 204\n",
"10 15 205\n",
"10 15 206\n",
"10 15 207\n",
"10 15 208\n",
"10 15 209\n",
"10 15 210\n",
"10 15 211\n",
"10 15 212\n",
"10 15 213\n",
"10 15 214\n",
"10 15 215\n",
"10 15 216\n",
"10 15 217\n",
"10 15 218\n",
"10 15 219\n",
"10 15 220\n",
"10 15 221\n",
"10 15 222\n",
"10 15 223\n",
"10 15 224\n",
"10 15 225\n",
"10 15 226\n",
"10 15 227\n",
"10 15 228\n",
"10 15 229\n",
"10 15 230\n",
"10 15 231\n",
"10 15 232\n",
"10 15 233\n",
"10 15 234\n",
"10 15 235\n",
"10 15 236\n",
"10 15 237\n",
"10 15 238\n",
"10 15 239\n",
"10 15 240\n",
"10 15 241\n",
"10 15 242\n",
"10 15 243\n",
"10 15 244\n",
"10 15 245\n",
"10 15 246\n",
"10 15 247\n",
"10 15 248\n",
"10 15 249\n",
"10 15 250\n",
"10 15 251\n",
"10 15 252\n",
"10 15 253\n",
"10 15 254\n",
"10 15 255\n",
"10 15 256\n",
"10 15 257\n",
"10 15 258\n",
"10 15 259\n",
"10 15 260\n",
"10 15 261\n",
"10 15 262\n",
"10 15 263\n",
"10 15 264\n",
"10 15 265\n",
"10 15 266\n",
"10 15 267\n",
"10 15 268\n",
"10 15 269\n",
"10 15 270\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"10 15 271\n",
"10 15 272\n",
"10 15 273\n",
"10 15 274\n",
"10 15 275\n",
"10 15 276\n",
"10 15 277\n",
"10 15 278\n",
"10 15 279\n",
"10 15 280\n",
"10 15 281\n",
"10 15 282\n",
"10 15 283\n",
"10 15 284\n",
"10 15 285\n",
"10 15 286\n",
"10 15 287\n",
"10 15 288\n",
"10 15 289\n",
"10 15 290\n",
"10 15 291\n",
"10 15 292\n",
"10 15 293\n",
"10 15 294\n",
"10 15 295\n",
"10 15 296\n",
"10 15 297\n",
"10 15 298\n",
"10 15 299\n",
"10 15 300\n",
"10 15 301\n",
"10 15 302\n",
"10 15 303\n",
"10 15 304\n",
"10 15 305\n",
"10 15 306\n",
"10 15 307\n",
"10 15 308\n",
"10 15 309\n",
"10 15 310\n",
"10 15 311\n",
"10 15 312\n",
"10 15 313\n",
"10 15 314\n",
"10 15 315\n",
"10 15 316\n",
"10 15 317\n",
"10 15 318\n",
"10 15 319\n",
"10 15 320\n",
"10 15 321\n",
"10 15 322\n",
"10 15 323\n",
"10 15 324\n",
"10 15 325\n",
"10 15 326\n",
"10 15 327\n",
"10 15 328\n",
"10 15 329\n",
"10 15 330\n",
"10 15 331\n",
"10 15 332\n",
"10 15 333\n",
"10 15 334\n",
"10 15 335\n",
"10 15 336\n",
"10 15 337\n",
"10 15 338\n",
"10 15 339\n",
"10 15 340\n",
"10 15 341\n",
"10 15 342\n",
"10 15 343\n",
"10 15 344\n",
"10 15 345\n",
"10 15 346\n",
"10 15 347\n",
"10 15 348\n",
"10 15 349\n",
"10 15 350\n",
"10 15 351\n",
"10 15 352\n",
"10 15 353\n",
"10 15 354\n",
"10 15 355\n",
"10 15 356\n",
"10 15 357\n",
"10 15 358\n",
"10 15 359\n",
"10 15 360\n",
"10 15 361\n",
"10 15 362\n",
"10 15 363\n",
"10 15 364\n",
"10 15 365\n",
"10 15 366\n",
"10 15 367\n",
"10 15 368\n",
"10 15 369\n",
"10 15 370\n",
"10 15 371\n",
"10 15 372\n",
"10 15 373\n",
"10 15 374\n",
"10 15 375\n",
"10 15 376\n",
"10 15 377\n",
"10 15 378\n",
"10 15 379\n",
"10 15 380\n",
"10 15 381\n",
"10 15 382\n",
"10 15 383\n",
"10 15 384\n",
"10 15 385\n",
"10 15 386\n",
"10 15 387\n",
"10 15 388\n",
"10 15 389\n",
"10 15 390\n",
"10 15 391\n",
"10 15 392\n",
"10 15 393\n",
"10 15 394\n",
"10 15 395\n",
"10 15 396\n",
"10 15 397\n",
"10 15 398\n",
"10 15 399\n",
"10 15 400\n",
"10 15 401\n",
"10 15 402\n",
"10 15 403\n",
"10 15 404\n",
"10 15 405\n",
"10 15 406\n",
"10 15 407\n",
"10 15 408\n",
"10 15 409\n",
"10 15 410\n",
"10 15 411\n",
"10 15 412\n",
"10 15 413\n",
"10 15 414\n",
"10 15 415\n",
"10 15 416\n",
"10 15 417\n",
"10 15 418\n",
"10 15 419\n",
"10 15 420\n",
"10 15 421\n",
"10 15 422\n",
"10 15 423\n",
"10 15 424\n",
"10 15 425\n",
"10 15 426\n",
"10 15 427\n",
"10 15 428\n",
"10 15 429\n",
"10 15 430\n",
"10 15 431\n",
"10 15 432\n",
"10 15 433\n",
"10 15 434\n",
"10 15 435\n",
"10 15 436\n",
"10 15 437\n",
"10 15 438\n",
"10 15 439\n",
"10 15 440\n",
"10 15 441\n",
"10 15 442\n",
"10 15 443\n",
"10 15 444\n",
"10 15 445\n",
"10 15 446\n",
"10 15 447\n",
"10 15 448\n",
"10 15 449\n",
"10 15 450\n",
"10 15 451\n",
"10 15 452\n",
"10 15 453\n",
"10 15 454\n",
"10 15 455\n",
"10 15 456\n",
"10 15 457\n",
"10 15 458\n",
"10 15 459\n",
"10 15 460\n",
"10 15 461\n",
"10 15 462\n",
"10 15 463\n",
"10 15 464\n",
"10 15 465\n",
"10 15 466\n",
"10 15 467\n",
"10 15 468\n",
"10 15 469\n",
"10 15 470\n",
"10 15 471\n",
"10 15 472\n",
"10 15 473\n",
"10 15 474\n",
"10 15 475\n",
"10 15 476\n",
"10 15 477\n",
"10 15 478\n",
"10 15 479\n",
"10 15 480\n",
"10 15 481\n",
"10 15 482\n",
"10 15 483\n",
"10 15 484\n",
"10 15 485\n",
"10 15 486\n",
"10 15 487\n",
"10 15 488\n",
"10 15 489\n",
"10 15 490\n",
"10 15 491\n",
"10 15 492\n",
"10 15 493\n",
"10 15 494\n",
"10 15 495\n",
"10 15 496\n",
"10 15 497\n",
"10 15 498\n",
"10 15 499\n",
"10 20 0\n",
"10 20 1\n",
"10 20 2\n",
"10 20 3\n",
"10 20 4\n",
"10 20 5\n",
"10 20 6\n",
"10 20 7\n",
"10 20 8\n",
"10 20 9\n",
"10 20 10\n",
"10 20 11\n",
"10 20 12\n",
"10 20 13\n",
"10 20 14\n",
"10 20 15\n",
"10 20 16\n",
"10 20 17\n",
"10 20 18\n",
"10 20 19\n",
"10 20 20\n",
"10 20 21\n",
"10 20 22\n",
"10 20 23\n",
"10 20 24\n",
"10 20 25\n",
"10 20 26\n",
"10 20 27\n",
"10 20 28\n",
"10 20 29\n",
"10 20 30\n",
"10 20 31\n",
"10 20 32\n",
"10 20 33\n",
"10 20 34\n",
"10 20 35\n",
"10 20 36\n",
"10 20 37\n",
"10 20 38\n",
"10 20 39\n",
"10 20 40\n",
"10 20 41\n",
"10 20 42\n",
"10 20 43\n",
"10 20 44\n",
"10 20 45\n",
"10 20 46\n",
"10 20 47\n",
"10 20 48\n",
"10 20 49\n",
"10 20 50\n",
"10 20 51\n",
"10 20 52\n",
"10 20 53\n",
"10 20 54\n",
"10 20 55\n",
"10 20 56\n",
"10 20 57\n",
"10 20 58\n",
"10 20 59\n",
"10 20 60\n",
"10 20 61\n",
"10 20 62\n",
"10 20 63\n",
"10 20 64\n",
"10 20 65\n",
"10 20 66\n",
"10 20 67\n",
"10 20 68\n",
"10 20 69\n",
"10 20 70\n",
"10 20 71\n",
"10 20 72\n",
"10 20 73\n",
"10 20 74\n",
"10 20 75\n",
"10 20 76\n",
"10 20 77\n",
"10 20 78\n",
"10 20 79\n",
"10 20 80\n",
"10 20 81\n",
"10 20 82\n",
"10 20 83\n",
"10 20 84\n",
"10 20 85\n",
"10 20 86\n",
"10 20 87\n",
"10 20 88\n",
"10 20 89\n",
"10 20 90\n",
"10 20 91\n",
"10 20 92\n",
"10 20 93\n",
"10 20 94\n",
"10 20 95\n",
"10 20 96\n",
"10 20 97\n",
"10 20 98\n",
"10 20 99\n",
"10 20 100\n",
"10 20 101\n",
"10 20 102\n",
"10 20 103\n",
"10 20 104\n",
"10 20 105\n",
"10 20 106\n",
"10 20 107\n",
"10 20 108\n",
"10 20 109\n",
"10 20 110\n",
"10 20 111\n",
"10 20 112\n",
"10 20 113\n",
"10 20 114\n",
"10 20 115\n",
"10 20 116\n",
"10 20 117\n",
"10 20 118\n",
"10 20 119\n",
"10 20 120\n",
"10 20 121\n",
"10 20 122\n",
"10 20 123\n",
"10 20 124\n",
"10 20 125\n",
"10 20 126\n",
"10 20 127\n",
"10 20 128\n",
"10 20 129\n",
"10 20 130\n",
"10 20 131\n",
"10 20 132\n",
"10 20 133\n",
"10 20 134\n",
"10 20 135\n",
"10 20 136\n",
"10 20 137\n",
"10 20 138\n",
"10 20 139\n",
"10 20 140\n",
"10 20 141\n",
"10 20 142\n",
"10 20 143\n",
"10 20 144\n",
"10 20 145\n",
"10 20 146\n",
"10 20 147\n",
"10 20 148\n",
"10 20 149\n",
"10 20 150\n",
"10 20 151\n",
"10 20 152\n",
"10 20 153\n",
"10 20 154\n",
"10 20 155\n",
"10 20 156\n",
"10 20 157\n",
"10 20 158\n",
"10 20 159\n",
"10 20 160\n",
"10 20 161\n",
"10 20 162\n",
"10 20 163\n",
"10 20 164\n",
"10 20 165\n",
"10 20 166\n",
"10 20 167\n",
"10 20 168\n",
"10 20 169\n",
"10 20 170\n",
"10 20 171\n",
"10 20 172\n",
"10 20 173\n",
"10 20 174\n",
"10 20 175\n",
"10 20 176\n",
"10 20 177\n",
"10 20 178\n",
"10 20 179\n",
"10 20 180\n",
"10 20 181\n",
"10 20 182\n",
"10 20 183\n",
"10 20 184\n",
"10 20 185\n",
"10 20 186\n",
"10 20 187\n",
"10 20 188\n",
"10 20 189\n",
"10 20 190\n",
"10 20 191\n",
"10 20 192\n",
"10 20 193\n",
"10 20 194\n",
"10 20 195\n",
"10 20 196\n",
"10 20 197\n",
"10 20 198\n",
"10 20 199\n",
"10 20 200\n",
"10 20 201\n",
"10 20 202\n",
"10 20 203\n",
"10 20 204\n",
"10 20 205\n",
"10 20 206\n",
"10 20 207\n",
"10 20 208\n",
"10 20 209\n",
"10 20 210\n",
"10 20 211\n",
"10 20 212\n",
"10 20 213\n",
"10 20 214\n",
"10 20 215\n",
"10 20 216\n",
"10 20 217\n",
"10 20 218\n",
"10 20 219\n",
"10 20 220\n",
"10 20 221\n",
"10 20 222\n",
"10 20 223\n",
"10 20 224\n",
"10 20 225\n",
"10 20 226\n",
"10 20 227\n",
"10 20 228\n",
"10 20 229\n",
"10 20 230\n",
"10 20 231\n",
"10 20 232\n",
"10 20 233\n",
"10 20 234\n",
"10 20 235\n",
"10 20 236\n",
"10 20 237\n",
"10 20 238\n",
"10 20 239\n",
"10 20 240\n",
"10 20 241\n",
"10 20 242\n",
"10 20 243\n",
"10 20 244\n",
"10 20 245\n",
"10 20 246\n",
"10 20 247\n",
"10 20 248\n",
"10 20 249\n",
"10 20 250\n",
"10 20 251\n",
"10 20 252\n",
"10 20 253\n",
"10 20 254\n",
"10 20 255\n",
"10 20 256\n",
"10 20 257\n",
"10 20 258\n",
"10 20 259\n",
"10 20 260\n",
"10 20 261\n",
"10 20 262\n",
"10 20 263\n",
"10 20 264\n",
"10 20 265\n",
"10 20 266\n",
"10 20 267\n",
"10 20 268\n",
"10 20 269\n",
"10 20 270\n",
"10 20 271\n",
"10 20 272\n",
"10 20 273\n",
"10 20 274\n",
"10 20 275\n",
"10 20 276\n",
"10 20 277\n",
"10 20 278\n",
"10 20 279\n",
"10 20 280\n",
"10 20 281\n",
"10 20 282\n",
"10 20 283\n",
"10 20 284\n",
"10 20 285\n",
"10 20 286\n",
"10 20 287\n",
"10 20 288\n",
"10 20 289\n",
"10 20 290\n",
"10 20 291\n",
"10 20 292\n",
"10 20 293\n",
"10 20 294\n",
"10 20 295\n",
"10 20 296\n",
"10 20 297\n",
"10 20 298\n",
"10 20 299\n",
"10 20 300\n",
"10 20 301\n",
"10 20 302\n",
"10 20 303\n",
"10 20 304\n",
"10 20 305\n",
"10 20 306\n",
"10 20 307\n",
"10 20 308\n",
"10 20 309\n",
"10 20 310\n",
"10 20 311\n",
"10 20 312\n",
"10 20 313\n",
"10 20 314\n",
"10 20 315\n",
"10 20 316\n",
"10 20 317\n",
"10 20 318\n",
"10 20 319\n",
"10 20 320\n",
"10 20 321\n",
"10 20 322\n",
"10 20 323\n",
"10 20 324\n",
"10 20 325\n",
"10 20 326\n",
"10 20 327\n",
"10 20 328\n",
"10 20 329\n",
"10 20 330\n",
"10 20 331\n",
"10 20 332\n",
"10 20 333\n",
"10 20 334\n",
"10 20 335\n",
"10 20 336\n",
"10 20 337\n",
"10 20 338\n",
"10 20 339\n",
"10 20 340\n",
"10 20 341\n",
"10 20 342\n",
"10 20 343\n",
"10 20 344\n",
"10 20 345\n",
"10 20 346\n",
"10 20 347\n",
"10 20 348\n",
"10 20 349\n",
"10 20 350\n",
"10 20 351\n",
"10 20 352\n",
"10 20 353\n",
"10 20 354\n",
"10 20 355\n",
"10 20 356\n",
"10 20 357\n",
"10 20 358\n",
"10 20 359\n",
"10 20 360\n",
"10 20 361\n",
"10 20 362\n",
"10 20 363\n",
"10 20 364\n",
"10 20 365\n",
"10 20 366\n",
"10 20 367\n",
"10 20 368\n",
"10 20 369\n",
"10 20 370\n",
"10 20 371\n",
"10 20 372\n",
"10 20 373\n",
"10 20 374\n",
"10 20 375\n",
"10 20 376\n",
"10 20 377\n",
"10 20 378\n",
"10 20 379\n",
"10 20 380\n",
"10 20 381\n",
"10 20 382\n",
"10 20 383\n",
"10 20 384\n",
"10 20 385\n",
"10 20 386\n",
"10 20 387\n",
"10 20 388\n",
"10 20 389\n",
"10 20 390\n",
"10 20 391\n",
"10 20 392\n",
"10 20 393\n",
"10 20 394\n",
"10 20 395\n",
"10 20 396\n",
"10 20 397\n",
"10 20 398\n",
"10 20 399\n",
"10 20 400\n",
"10 20 401\n",
"10 20 402\n",
"10 20 403\n",
"10 20 404\n",
"10 20 405\n",
"10 20 406\n",
"10 20 407\n",
"10 20 408\n",
"10 20 409\n",
"10 20 410\n",
"10 20 411\n",
"10 20 412\n",
"10 20 413\n",
"10 20 414\n",
"10 20 415\n",
"10 20 416\n",
"10 20 417\n",
"10 20 418\n",
"10 20 419\n",
"10 20 420\n",
"10 20 421\n",
"10 20 422\n",
"10 20 423\n",
"10 20 424\n",
"10 20 425\n",
"10 20 426\n",
"10 20 427\n",
"10 20 428\n",
"10 20 429\n",
"10 20 430\n",
"10 20 431\n",
"10 20 432\n",
"10 20 433\n",
"10 20 434\n",
"10 20 435\n",
"10 20 436\n",
"10 20 437\n",
"10 20 438\n",
"10 20 439\n",
"10 20 440\n",
"10 20 441\n",
"10 20 442\n",
"10 20 443\n",
"10 20 444\n",
"10 20 445\n",
"10 20 446\n",
"10 20 447\n",
"10 20 448\n",
"10 20 449\n",
"10 20 450\n",
"10 20 451\n",
"10 20 452\n",
"10 20 453\n",
"10 20 454\n",
"10 20 455\n",
"10 20 456\n",
"10 20 457\n",
"10 20 458\n",
"10 20 459\n",
"10 20 460\n",
"10 20 461\n",
"10 20 462\n",
"10 20 463\n",
"10 20 464\n",
"10 20 465\n",
"10 20 466\n",
"10 20 467\n",
"10 20 468\n",
"10 20 469\n",
"10 20 470\n",
"10 20 471\n",
"10 20 472\n",
"10 20 473\n",
"10 20 474\n",
"10 20 475\n",
"10 20 476\n",
"10 20 477\n",
"10 20 478\n",
"10 20 479\n",
"10 20 480\n",
"10 20 481\n",
"10 20 482\n",
"10 20 483\n",
"10 20 484\n",
"10 20 485\n",
"10 20 486\n",
"10 20 487\n",
"10 20 488\n",
"10 20 489\n",
"10 20 490\n",
"10 20 491\n",
"10 20 492\n",
"10 20 493\n",
"10 20 494\n",
"10 20 495\n",
"10 20 496\n",
"10 20 497\n",
"10 20 498\n",
"10 20 499\n",
"==================== \n",
" n= 15\n",
"15 20 0\n",
"15 20 1\n",
"15 20 2\n",
"15 20 3\n",
"15 20 4\n",
"15 20 5\n",
"15 20 6\n",
"15 20 7\n",
"15 20 8\n",
"15 20 9\n",
"15 20 10\n",
"15 20 11\n",
"15 20 12\n",
"15 20 13\n",
"15 20 14\n",
"15 20 15\n",
"15 20 16\n",
"15 20 17\n",
"15 20 18\n",
"15 20 19\n",
"15 20 20\n",
"15 20 21\n",
"15 20 22\n",
"15 20 23\n",
"15 20 24\n",
"15 20 25\n",
"15 20 26\n",
"15 20 27\n",
"15 20 28\n",
"15 20 29\n",
"15 20 30\n",
"15 20 31\n",
"15 20 32\n",
"15 20 33\n",
"15 20 34\n",
"15 20 35\n",
"15 20 36\n",
"15 20 37\n",
"15 20 38\n",
"15 20 39\n",
"15 20 40\n",
"15 20 41\n",
"15 20 42\n",
"15 20 43\n",
"15 20 44\n",
"15 20 45\n",
"15 20 46\n",
"15 20 47\n",
"15 20 48\n",
"15 20 49\n",
"15 20 50\n",
"15 20 51\n",
"15 20 52\n",
"15 20 53\n",
"15 20 54\n",
"15 20 55\n",
"15 20 56\n",
"15 20 57\n",
"15 20 58\n",
"15 20 59\n",
"15 20 60\n",
"15 20 61\n",
"15 20 62\n",
"15 20 63\n",
"15 20 64\n",
"15 20 65\n",
"15 20 66\n",
"15 20 67\n",
"15 20 68\n",
"15 20 69\n",
"15 20 70\n",
"15 20 71\n",
"15 20 72\n",
"15 20 73\n",
"15 20 74\n",
"15 20 75\n",
"15 20 76\n",
"15 20 77\n",
"15 20 78\n",
"15 20 79\n",
"15 20 80\n",
"15 20 81\n",
"15 20 82\n",
"15 20 83\n",
"15 20 84\n",
"15 20 85\n",
"15 20 86\n",
"15 20 87\n",
"15 20 88\n",
"15 20 89\n",
"15 20 90\n",
"15 20 91\n",
"15 20 92\n",
"15 20 93\n",
"15 20 94\n",
"15 20 95\n",
"15 20 96\n",
"15 20 97\n",
"15 20 98\n",
"15 20 99\n",
"15 20 100\n",
"15 20 101\n",
"15 20 102\n",
"15 20 103\n",
"15 20 104\n",
"15 20 105\n",
"15 20 106\n",
"15 20 107\n",
"15 20 108\n",
"15 20 109\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"15 20 110\n",
"15 20 111\n",
"15 20 112\n",
"15 20 113\n",
"15 20 114\n",
"15 20 115\n",
"15 20 116\n",
"15 20 117\n",
"15 20 118\n",
"15 20 119\n",
"15 20 120\n",
"15 20 121\n",
"15 20 122\n",
"15 20 123\n",
"15 20 124\n",
"15 20 125\n",
"15 20 126\n",
"15 20 127\n",
"15 20 128\n",
"15 20 129\n",
"15 20 130\n",
"15 20 131\n",
"15 20 132\n",
"15 20 133\n",
"15 20 134\n",
"15 20 135\n",
"15 20 136\n",
"15 20 137\n",
"15 20 138\n",
"15 20 139\n",
"15 20 140\n",
"15 20 141\n",
"15 20 142\n",
"15 20 143\n",
"15 20 144\n",
"15 20 145\n",
"15 20 146\n",
"15 20 147\n",
"15 20 148\n",
"15 20 149\n",
"15 20 150\n",
"15 20 151\n",
"15 20 152\n",
"15 20 153\n",
"15 20 154\n",
"15 20 155\n",
"15 20 156\n",
"15 20 157\n",
"15 20 158\n",
"15 20 159\n",
"15 20 160\n",
"15 20 161\n",
"15 20 162\n",
"15 20 163\n",
"15 20 164\n",
"15 20 165\n",
"15 20 166\n",
"15 20 167\n",
"15 20 168\n",
"15 20 169\n",
"15 20 170\n",
"15 20 171\n",
"15 20 172\n",
"15 20 173\n",
"15 20 174\n",
"15 20 175\n",
"15 20 176\n",
"15 20 177\n",
"15 20 178\n",
"15 20 179\n",
"15 20 180\n",
"15 20 181\n",
"15 20 182\n",
"15 20 183\n",
"15 20 184\n",
"15 20 185\n",
"15 20 186\n",
"15 20 187\n",
"15 20 188\n",
"15 20 189\n",
"15 20 190\n",
"15 20 191\n",
"15 20 192\n",
"15 20 193\n",
"15 20 194\n",
"15 20 195\n",
"15 20 196\n",
"15 20 197\n",
"15 20 198\n",
"15 20 199\n",
"15 20 200\n",
"15 20 201\n",
"15 20 202\n",
"15 20 203\n",
"15 20 204\n",
"15 20 205\n",
"15 20 206\n",
"15 20 207\n",
"15 20 208\n",
"15 20 209\n",
"15 20 210\n",
"15 20 211\n",
"15 20 212\n",
"15 20 213\n",
"15 20 214\n",
"15 20 215\n",
"15 20 216\n",
"15 20 217\n",
"15 20 218\n",
"15 20 219\n",
"15 20 220\n",
"15 20 221\n",
"15 20 222\n",
"15 20 223\n",
"15 20 224\n",
"15 20 225\n",
"15 20 226\n",
"15 20 227\n",
"15 20 228\n",
"15 20 229\n",
"15 20 230\n",
"15 20 231\n",
"15 20 232\n",
"15 20 233\n",
"15 20 234\n",
"15 20 235\n",
"15 20 236\n",
"15 20 237\n",
"15 20 238\n",
"15 20 239\n",
"15 20 240\n",
"15 20 241\n",
"15 20 242\n",
"15 20 243\n",
"15 20 244\n",
"15 20 245\n",
"15 20 246\n",
"15 20 247\n",
"15 20 248\n",
"15 20 249\n",
"15 20 250\n",
"15 20 251\n",
"15 20 252\n",
"15 20 253\n",
"15 20 254\n",
"15 20 255\n",
"15 20 256\n",
"15 20 257\n",
"15 20 258\n",
"15 20 259\n",
"15 20 260\n",
"15 20 261\n",
"15 20 262\n",
"15 20 263\n",
"15 20 264\n",
"15 20 265\n",
"15 20 266\n",
"15 20 267\n",
"15 20 268\n",
"15 20 269\n",
"15 20 270\n",
"15 20 271\n",
"15 20 272\n",
"15 20 273\n",
"15 20 274\n",
"15 20 275\n",
"15 20 276\n",
"15 20 277\n",
"15 20 278\n",
"15 20 279\n",
"15 20 280\n",
"15 20 281\n",
"15 20 282\n",
"15 20 283\n",
"15 20 284\n",
"15 20 285\n",
"15 20 286\n",
"15 20 287\n",
"15 20 288\n",
"15 20 289\n",
"15 20 290\n",
"15 20 291\n",
"15 20 292\n",
"15 20 293\n",
"15 20 294\n",
"15 20 295\n",
"15 20 296\n",
"15 20 297\n",
"15 20 298\n",
"15 20 299\n",
"15 20 300\n",
"15 20 301\n",
"15 20 302\n",
"15 20 303\n",
"15 20 304\n",
"15 20 305\n",
"15 20 306\n",
"15 20 307\n",
"15 20 308\n",
"15 20 309\n",
"15 20 310\n",
"15 20 311\n",
"15 20 312\n",
"15 20 313\n",
"15 20 314\n",
"15 20 315\n",
"15 20 316\n",
"15 20 317\n",
"15 20 318\n",
"15 20 319\n",
"15 20 320\n",
"15 20 321\n",
"15 20 322\n",
"15 20 323\n",
"15 20 324\n",
"15 20 325\n",
"15 20 326\n",
"15 20 327\n",
"15 20 328\n",
"15 20 329\n",
"15 20 330\n",
"15 20 331\n",
"15 20 332\n",
"15 20 333\n",
"15 20 334\n",
"15 20 335\n",
"15 20 336\n",
"15 20 337\n",
"15 20 338\n",
"15 20 339\n",
"15 20 340\n",
"15 20 341\n",
"15 20 342\n",
"15 20 343\n",
"15 20 344\n",
"15 20 345\n",
"15 20 346\n",
"15 20 347\n",
"15 20 348\n",
"15 20 349\n",
"15 20 350\n",
"15 20 351\n",
"15 20 352\n",
"15 20 353\n",
"15 20 354\n",
"15 20 355\n",
"15 20 356\n",
"15 20 357\n",
"15 20 358\n",
"15 20 359\n",
"15 20 360\n",
"15 20 361\n",
"15 20 362\n",
"15 20 363\n",
"15 20 364\n",
"15 20 365\n",
"15 20 366\n",
"15 20 367\n",
"15 20 368\n",
"15 20 369\n",
"15 20 370\n",
"15 20 371\n",
"15 20 372\n",
"15 20 373\n",
"15 20 374\n",
"15 20 375\n",
"15 20 376\n",
"15 20 377\n",
"15 20 378\n",
"15 20 379\n",
"15 20 380\n",
"15 20 381\n",
"15 20 382\n",
"15 20 383\n",
"15 20 384\n",
"15 20 385\n",
"15 20 386\n",
"15 20 387\n",
"15 20 388\n",
"15 20 389\n",
"15 20 390\n",
"15 20 391\n",
"15 20 392\n",
"15 20 393\n",
"15 20 394\n",
"15 20 395\n",
"15 20 396\n",
"15 20 397\n",
"15 20 398\n",
"15 20 399\n",
"15 20 400\n",
"15 20 401\n",
"15 20 402\n",
"15 20 403\n",
"15 20 404\n",
"15 20 405\n",
"15 20 406\n",
"15 20 407\n",
"15 20 408\n",
"15 20 409\n",
"15 20 410\n",
"15 20 411\n",
"15 20 412\n",
"15 20 413\n",
"15 20 414\n",
"15 20 415\n",
"15 20 416\n",
"15 20 417\n",
"15 20 418\n",
"15 20 419\n",
"15 20 420\n",
"15 20 421\n",
"15 20 422\n",
"15 20 423\n",
"15 20 424\n",
"15 20 425\n",
"15 20 426\n",
"15 20 427\n",
"15 20 428\n",
"15 20 429\n",
"15 20 430\n",
"15 20 431\n",
"15 20 432\n",
"15 20 433\n",
"15 20 434\n",
"15 20 435\n",
"15 20 436\n",
"15 20 437\n",
"15 20 438\n",
"15 20 439\n",
"15 20 440\n",
"15 20 441\n",
"15 20 442\n",
"15 20 443\n",
"15 20 444\n",
"15 20 445\n",
"15 20 446\n",
"15 20 447\n",
"15 20 448\n",
"15 20 449\n",
"15 20 450\n",
"15 20 451\n",
"15 20 452\n",
"15 20 453\n",
"15 20 454\n",
"15 20 455\n",
"15 20 456\n",
"15 20 457\n",
"15 20 458\n",
"15 20 459\n",
"15 20 460\n",
"15 20 461\n",
"15 20 462\n",
"15 20 463\n",
"15 20 464\n",
"15 20 465\n",
"15 20 466\n",
"15 20 467\n",
"15 20 468\n",
"15 20 469\n",
"15 20 470\n",
"15 20 471\n",
"15 20 472\n",
"15 20 473\n",
"15 20 474\n",
"15 20 475\n",
"15 20 476\n",
"15 20 477\n",
"15 20 478\n",
"15 20 479\n",
"15 20 480\n",
"15 20 481\n",
"15 20 482\n",
"15 20 483\n",
"15 20 484\n",
"15 20 485\n",
"15 20 486\n",
"15 20 487\n",
"15 20 488\n",
"15 20 489\n",
"15 20 490\n",
"15 20 491\n",
"15 20 492\n",
"15 20 493\n",
"15 20 494\n",
"15 20 495\n",
"15 20 496\n",
"15 20 497\n",
"15 20 498\n",
"15 20 499\n",
"Done\n"
]
}
],
"source": [
"N,n_max=500,20\n",
"D=10 # Percent to report\n",
"k,j=0,0 # Counter\n",
"d,j={},0\n",
"Q_o={ 3:[5,10,15,20], 5:[10,15,20], 10:[15,20], 15:[20] }\n",
"for n in Q_o.keys():\n",
" print(\"=\"*20,\"\\n\",\"n=\",n)\n",
" for q_o in Q_o[n]:\n",
" d[n,q_o]=np.zeros(N)\n",
" for i in range(N):\n",
" j+=1\n",
" print(n,q_o,i)\n",
"# q_i=np.random.randint(n,q_o+1)\n",
" z_i=pp.zonotope(G=np.random.normal(size=(n,n)))\n",
" z_o=pp.zonotope(G=np.random.normal(size=(n,q_o)))\n",
"# x=time.time()\n",
" a_suf=alpha_sufficient(z_i,z_o)\n",
"# print(\"sufficient:\",time.time()-x)\n",
"# x=time.time()\n",
" a_nec=alpha_necessary(z_i,z_o)\n",
"# print(\"necessary:\",time.time()-x)\n",
" d[n,q_o][i]=1-a_suf/a_nec\n",
"# print(\"gap:\",n,q_o,\"=\",d[n,q_o][i])\n",
" j+=1\n",
"print(\"Done\")"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(3, 5) [496, 496, 500, 500] 0.049187775432405045\n",
"(3, 10) [496, 499, 500, 500] 0.04621478578855909\n",
"(3, 15) [496, 499, 500, 500] 0.025488967420588482\n",
"(3, 20) [500, 500, 500, 500] 0.0008762382316735984\n",
"(5, 10) [476, 489, 500, 500] 0.02700849729590582\n",
"(5, 15) [472, 490, 500, 500] 0.04464416234485524\n",
"(5, 20) [480, 493, 500, 500] 0.04962959408652312\n",
"(10, 15) [421, 453, 492, 500] 0.07280827975640214\n",
"(10, 20) [332, 381, 474, 499] 0.10066627720859611\n",
"(15, 20) [370, 420, 489, 500] 0.08882584056299803\n"
]
}
],
"source": [
"T=[0.001,0.01,0.05,0.1]\n",
"for key in d.keys():\n",
" print(key,[np.sum(d[key]<t) for t in T],np.max(d[key]))"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"10000"
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"j"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
```
|
github_jupyter
|
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Zonotope Containment"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pypolycontain as pp\n",
"import time\n",
"import pydrake.solvers.mathematicalprogram as MP\n",
"# use Gurobi solver\n",
"import pydrake.solvers.gurobi as Gurobi_drake\n",
"gurobi_solver=Gurobi_drake.GurobiSolver()"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"def alpha_necessary_old(Z_i,Z_o):\n",
" program=MP.MathematicalProgram()\n",
" zeta=program.NewContinuousVariables(Z_o.G.shape[1],2**Z_i.G.shape[1],\"zeta\")\n",
" beta=program.NewContinuousVariables(1,\"beta\")\n",
" V=pp.vcube(Z_i.G.shape[1])\n",
" for i in range(V.shape[0]):\n",
" program.AddLinearEqualityConstraint( Aeq=Z_o.G, beq=np.dot(Z_i.G,V.T)[:,i], vars= zeta[:,i] )\n",
" program.AddLinearConstraint( np.less_equal(zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearConstraint( np.less_equal(-zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearCost(np.eye(1),np.zeros((1)),beta)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=1/result.GetSolution(beta)[0]\n",
" return alpha\n",
" else:\n",
" print(\"optimization failed\")\n",
" \n",
"def alpha_necessary_older(Z_i,Z_o):\n",
" alpha_min=np.inf\n",
" V=pp.vcube(Z_i.G.shape[1])\n",
" B=np.dot(Z_i.G,V.T)\n",
" for i in range(V.shape[0]):\n",
" program=MP.MathematicalProgram()\n",
" zeta=program.NewContinuousVariables(Z_o.G.shape[1],\"zeta\")\n",
" beta=program.NewContinuousVariables(1,\"beta\")\n",
" program.AddLinearEqualityConstraint( Aeq=Z_o.G, beq=B[:,i:i+1], vars= zeta )\n",
"# for j in range(zeta.shape[0]):\n",
"# program.AddLinearConstraint( zeta[j]-beta, -np.inf, 0 )\n",
"# program.AddLinearConstraint( -zeta[j]+ beta, -np.inf, 0 )\n",
" program.AddLinearConstraint( np.less_equal(zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearConstraint( np.less_equal(-zeta,beta*np.ones(zeta.shape),dtype='object').flatten() )\n",
" program.AddLinearCost(np.eye(1),np.zeros((1)),beta)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=1/result.GetSolution(beta)[0]\n",
" alpha_min=min(alpha,alpha_min)\n",
" else:\n",
" print(\"optimization failed\")\n",
" return alpha_min\n",
"\n",
"def alpha_necessary(Z_i,Z_o):\n",
" alpha_min=np.inf\n",
" V=pp.vcube(Z_i.G.shape[1])\n",
" B=np.dot(Z_i.G,V.T)\n",
" for i in range(V.shape[0]):\n",
" program=MP.MathematicalProgram()\n",
" zeta=program.NewContinuousVariables(Z_o.G.shape[1],\"zeta\")\n",
" alpha=program.NewContinuousVariables(1,\"alpha\")\n",
" Aeq=np.hstack(( Z_o.G, -B[:,i:i+1]))\n",
" program.AddLinearEqualityConstraint( Aeq=np.hstack(( Z_o.G, -B[:,i:i+1])), \\\n",
" beq=np.zeros((Z_o.G.shape[0])),\\\n",
" vars= np.hstack((zeta,alpha)) )\n",
" program.AddBoundingBoxConstraint(-1,1,zeta)\n",
" program.AddLinearCost(-np.eye(1),np.zeros((1)),alpha)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=result.GetSolution(alpha)[0]\n",
" alpha_min=min(alpha,alpha_min)\n",
" else:\n",
" print(\"optimization failed\")\n",
" return alpha_min\n",
" \n",
" \n",
"def alpha_sufficient(Z_i,Z_o):\n",
" program=MP.MathematicalProgram()\n",
" beta=program.NewContinuousVariables(1,\"beta\")\n",
" circumbody=pp.to_AH_polytope(Z_o)\n",
" parametric_circumbody=circumbody.copy()\n",
" parametric_circumbody.P.h=circumbody.P.h*beta\n",
" Theta,*_=pp.subset(program,Z_i,parametric_circumbody,k=-1)\n",
" program.AddLinearCost(np.eye(1),np.zeros((1)),beta)\n",
" result=gurobi_solver.Solve(program,None,None)\n",
" if result.is_success():\n",
" alpha=1/result.GetSolution(beta)[0]\n",
" return alpha\n",
" else:\n",
" print(\"optimization failed\")"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"==================== \n",
" n= 3\n",
"3 5 0\n",
"3 5 1\n",
"3 5 2\n",
"3 5 3\n",
"3 5 4\n",
"3 5 5\n",
"3 5 6\n",
"3 5 7\n",
"3 5 8\n",
"3 5 9\n",
"3 5 10\n",
"3 5 11\n",
"3 5 12\n",
"3 5 13\n",
"3 5 14\n",
"3 5 15\n",
"3 5 16\n",
"3 5 17\n",
"3 5 18\n",
"3 5 19\n",
"3 5 20\n",
"3 5 21\n",
"3 5 22\n",
"3 5 23\n",
"3 5 24\n",
"3 5 25\n",
"3 5 26\n",
"3 5 27\n",
"3 5 28\n",
"3 5 29\n",
"3 5 30\n",
"3 5 31\n",
"3 5 32\n",
"3 5 33\n",
"3 5 34\n",
"3 5 35\n",
"3 5 36\n",
"3 5 37\n",
"3 5 38\n",
"3 5 39\n",
"3 5 40\n",
"3 5 41\n",
"3 5 42\n",
"3 5 43\n",
"3 5 44\n",
"3 5 45\n",
"3 5 46\n",
"3 5 47\n",
"3 5 48\n",
"3 5 49\n",
"3 5 50\n",
"3 5 51\n",
"3 5 52\n",
"3 5 53\n",
"3 5 54\n",
"3 5 55\n",
"3 5 56\n",
"3 5 57\n",
"3 5 58\n",
"3 5 59\n",
"3 5 60\n",
"3 5 61\n",
"3 5 62\n",
"3 5 63\n",
"3 5 64\n",
"3 5 65\n",
"3 5 66\n",
"3 5 67\n",
"3 5 68\n",
"3 5 69\n",
"3 5 70\n",
"3 5 71\n",
"3 5 72\n",
"3 5 73\n",
"3 5 74\n",
"3 5 75\n",
"3 5 76\n",
"3 5 77\n",
"3 5 78\n",
"3 5 79\n",
"3 5 80\n",
"3 5 81\n",
"3 5 82\n",
"3 5 83\n",
"3 5 84\n",
"3 5 85\n",
"3 5 86\n",
"3 5 87\n",
"3 5 88\n",
"3 5 89\n",
"3 5 90\n",
"3 5 91\n",
"3 5 92\n",
"3 5 93\n",
"3 5 94\n",
"3 5 95\n",
"3 5 96\n",
"3 5 97\n",
"3 5 98\n",
"3 5 99\n",
"3 5 100\n",
"3 5 101\n",
"3 5 102\n",
"3 5 103\n",
"3 5 104\n",
"3 5 105\n",
"3 5 106\n",
"3 5 107\n",
"3 5 108\n",
"3 5 109\n",
"3 5 110\n",
"3 5 111\n",
"3 5 112\n",
"3 5 113\n",
"3 5 114\n",
"3 5 115\n",
"3 5 116\n",
"3 5 117\n",
"3 5 118\n",
"3 5 119\n",
"3 5 120\n",
"3 5 121\n",
"3 5 122\n",
"3 5 123\n",
"3 5 124\n",
"3 5 125\n",
"3 5 126\n",
"3 5 127\n",
"3 5 128\n",
"3 5 129\n",
"3 5 130\n",
"3 5 131\n",
"3 5 132\n",
"3 5 133\n",
"3 5 134\n",
"3 5 135\n",
"3 5 136\n",
"3 5 137\n",
"3 5 138\n",
"3 5 139\n",
"3 5 140\n",
"3 5 141\n",
"3 5 142\n",
"3 5 143\n",
"3 5 144\n",
"3 5 145\n",
"3 5 146\n",
"3 5 147\n",
"3 5 148\n",
"3 5 149\n",
"3 5 150\n",
"3 5 151\n",
"3 5 152\n",
"3 5 153\n",
"3 5 154\n",
"3 5 155\n",
"3 5 156\n",
"3 5 157\n",
"3 5 158\n",
"3 5 159\n",
"3 5 160\n",
"3 5 161\n",
"3 5 162\n",
"3 5 163\n",
"3 5 164\n",
"3 5 165\n",
"3 5 166\n",
"3 5 167\n",
"3 5 168\n",
"3 5 169\n",
"3 5 170\n",
"3 5 171\n",
"3 5 172\n",
"3 5 173\n",
"3 5 174\n",
"3 5 175\n",
"3 5 176\n",
"3 5 177\n",
"3 5 178\n",
"3 5 179\n",
"3 5 180\n",
"3 5 181\n",
"3 5 182\n",
"3 5 183\n",
"3 5 184\n",
"3 5 185\n",
"3 5 186\n",
"3 5 187\n",
"3 5 188\n",
"3 5 189\n",
"3 5 190\n",
"3 5 191\n",
"3 5 192\n",
"3 5 193\n",
"3 5 194\n",
"3 5 195\n",
"3 5 196\n",
"3 5 197\n",
"3 5 198\n",
"3 5 199\n",
"3 5 200\n",
"3 5 201\n",
"3 5 202\n",
"3 5 203\n",
"3 5 204\n",
"3 5 205\n",
"3 5 206\n",
"3 5 207\n",
"3 5 208\n",
"3 5 209\n",
"3 5 210\n",
"3 5 211\n",
"3 5 212\n",
"3 5 213\n",
"3 5 214\n",
"3 5 215\n",
"3 5 216\n",
"3 5 217\n",
"3 5 218\n",
"3 5 219\n",
"3 5 220\n",
"3 5 221\n",
"3 5 222\n",
"3 5 223\n",
"3 5 224\n",
"3 5 225\n",
"3 5 226\n",
"3 5 227\n",
"3 5 228\n",
"3 5 229\n",
"3 5 230\n",
"3 5 231\n",
"3 5 232\n",
"3 5 233\n",
"3 5 234\n",
"3 5 235\n",
"3 5 236\n",
"3 5 237\n",
"3 5 238\n",
"3 5 239\n",
"3 5 240\n",
"3 5 241\n",
"3 5 242\n",
"3 5 243\n",
"3 5 244\n",
"3 5 245\n",
"3 5 246\n",
"3 5 247\n",
"3 5 248\n",
"3 5 249\n",
"3 5 250\n",
"3 5 251\n",
"3 5 252\n",
"3 5 253\n",
"3 5 254\n",
"3 5 255\n",
"3 5 256\n",
"3 5 257\n",
"3 5 258\n",
"3 5 259\n",
"3 5 260\n",
"3 5 261\n",
"3 5 262\n",
"3 5 263\n",
"3 5 264\n",
"3 5 265\n",
"3 5 266\n",
"3 5 267\n",
"3 5 268\n",
"3 5 269\n",
"3 5 270\n",
"3 5 271\n",
"3 5 272\n",
"3 5 273\n",
"3 5 274\n",
"3 5 275\n",
"3 5 276\n",
"3 5 277\n",
"3 5 278\n",
"3 5 279\n",
"3 5 280\n",
"3 5 281\n",
"3 5 282\n",
"3 5 283\n",
"3 5 284\n",
"3 5 285\n",
"3 5 286\n",
"3 5 287\n",
"3 5 288\n",
"3 5 289\n",
"3 5 290\n",
"3 5 291\n",
"3 5 292\n",
"3 5 293\n",
"3 5 294\n",
"3 5 295\n",
"3 5 296\n",
"3 5 297\n",
"3 5 298\n",
"3 5 299\n",
"3 5 300\n",
"3 5 301\n",
"3 5 302\n",
"3 5 303\n",
"3 5 304\n",
"3 5 305\n",
"3 5 306\n",
"3 5 307\n",
"3 5 308\n",
"3 5 309\n",
"3 5 310\n",
"3 5 311\n",
"3 5 312\n",
"3 5 313\n",
"3 5 314\n",
"3 5 315\n",
"3 5 316\n",
"3 5 317\n",
"3 5 318\n",
"3 5 319\n",
"3 5 320\n",
"3 5 321\n",
"3 5 322\n",
"3 5 323\n",
"3 5 324\n",
"3 5 325\n",
"3 5 326\n",
"3 5 327\n",
"3 5 328\n",
"3 5 329\n",
"3 5 330\n",
"3 5 331\n",
"3 5 332\n",
"3 5 333\n",
"3 5 334\n",
"3 5 335\n",
"3 5 336\n",
"3 5 337\n",
"3 5 338\n",
"3 5 339\n",
"3 5 340\n",
"3 5 341\n",
"3 5 342\n",
"3 5 343\n",
"3 5 344\n",
"3 5 345\n",
"3 5 346\n",
"3 5 347\n",
"3 5 348\n",
"3 5 349\n",
"3 5 350\n",
"3 5 351\n",
"3 5 352\n",
"3 5 353\n",
"3 5 354\n",
"3 5 355\n",
"3 5 356\n",
"3 5 357\n",
"3 5 358\n",
"3 5 359\n",
"3 5 360\n",
"3 5 361\n",
"3 5 362\n",
"3 5 363\n",
"3 5 364\n",
"3 5 365\n",
"3 5 366\n",
"3 5 367\n",
"3 5 368\n",
"3 5 369\n",
"3 5 370\n",
"3 5 371\n",
"3 5 372\n",
"3 5 373\n",
"3 5 374\n",
"3 5 375\n",
"3 5 376\n",
"3 5 377\n",
"3 5 378\n",
"3 5 379\n",
"3 5 380\n",
"3 5 381\n",
"3 5 382\n",
"3 5 383\n",
"3 5 384\n",
"3 5 385\n",
"3 5 386\n",
"3 5 387\n",
"3 5 388\n",
"3 5 389\n",
"3 5 390\n",
"3 5 391\n",
"3 5 392\n",
"3 5 393\n",
"3 5 394\n",
"3 5 395\n",
"3 5 396\n",
"3 5 397\n",
"3 5 398\n",
"3 5 399\n",
"3 5 400\n",
"3 5 401\n",
"3 5 402\n",
"3 5 403\n",
"3 5 404\n",
"3 5 405\n",
"3 5 406\n",
"3 5 407\n",
"3 5 408\n",
"3 5 409\n",
"3 5 410\n",
"3 5 411\n",
"3 5 412\n",
"3 5 413\n",
"3 5 414\n",
"3 5 415\n",
"3 5 416\n",
"3 5 417\n",
"3 5 418\n",
"3 5 419\n",
"3 5 420\n",
"3 5 421\n",
"3 5 422\n",
"3 5 423\n",
"3 5 424\n",
"3 5 425\n",
"3 5 426\n",
"3 5 427\n",
"3 5 428\n",
"3 5 429\n",
"3 5 430\n",
"3 5 431\n",
"3 5 432\n",
"3 5 433\n",
"3 5 434\n",
"3 5 435\n",
"3 5 436\n",
"3 5 437\n",
"3 5 438\n",
"3 5 439\n",
"3 5 440\n",
"3 5 441\n",
"3 5 442\n",
"3 5 443\n",
"3 5 444\n",
"3 5 445\n",
"3 5 446\n",
"3 5 447\n",
"3 5 448\n",
"3 5 449\n",
"3 5 450\n",
"3 5 451\n",
"3 5 452\n",
"3 5 453\n",
"3 5 454\n",
"3 5 455\n",
"3 5 456\n",
"3 5 457\n",
"3 5 458\n",
"3 5 459\n",
"3 5 460\n",
"3 5 461\n",
"3 5 462\n",
"3 5 463\n",
"3 5 464\n",
"3 5 465\n",
"3 5 466\n",
"3 5 467\n",
"3 5 468\n",
"3 5 469\n",
"3 5 470\n",
"3 5 471\n",
"3 5 472\n",
"3 5 473\n",
"3 5 474\n",
"3 5 475\n",
"3 5 476\n",
"3 5 477\n",
"3 5 478\n",
"3 5 479\n",
"3 5 480\n",
"3 5 481\n",
"3 5 482\n",
"3 5 483\n",
"3 5 484\n",
"3 5 485\n",
"3 5 486\n",
"3 5 487\n",
"3 5 488\n",
"3 5 489\n",
"3 5 490\n",
"3 5 491\n",
"3 5 492\n",
"3 5 493\n",
"3 5 494\n",
"3 5 495\n",
"3 5 496\n",
"3 5 497\n",
"3 5 498\n",
"3 5 499\n",
"3 10 0\n",
"3 10 1\n",
"3 10 2\n",
"3 10 3\n",
"3 10 4\n",
"3 10 5\n",
"3 10 6\n",
"3 10 7\n",
"3 10 8\n",
"3 10 9\n",
"3 10 10\n",
"3 10 11\n",
"3 10 12\n",
"3 10 13\n",
"3 10 14\n",
"3 10 15\n",
"3 10 16\n",
"3 10 17\n",
"3 10 18\n",
"3 10 19\n",
"3 10 20\n",
"3 10 21\n",
"3 10 22\n",
"3 10 23\n",
"3 10 24\n",
"3 10 25\n",
"3 10 26\n",
"3 10 27\n",
"3 10 28\n",
"3 10 29\n",
"3 10 30\n",
"3 10 31\n",
"3 10 32\n",
"3 10 33\n",
"3 10 34\n",
"3 10 35\n",
"3 10 36\n",
"3 10 37\n",
"3 10 38\n",
"3 10 39\n",
"3 10 40\n",
"3 10 41\n",
"3 10 42\n",
"3 10 43\n",
"3 10 44\n",
"3 10 45\n",
"3 10 46\n",
"3 10 47\n",
"3 10 48\n",
"3 10 49\n",
"3 10 50\n",
"3 10 51\n",
"3 10 52\n",
"3 10 53\n",
"3 10 54\n",
"3 10 55\n",
"3 10 56\n",
"3 10 57\n",
"3 10 58\n",
"3 10 59\n",
"3 10 60\n",
"3 10 61\n",
"3 10 62\n",
"3 10 63\n",
"3 10 64\n",
"3 10 65\n",
"3 10 66\n",
"3 10 67\n",
"3 10 68\n",
"3 10 69\n",
"3 10 70\n",
"3 10 71\n",
"3 10 72\n",
"3 10 73\n",
"3 10 74\n",
"3 10 75\n",
"3 10 76\n",
"3 10 77\n",
"3 10 78\n",
"3 10 79\n",
"3 10 80\n",
"3 10 81\n",
"3 10 82\n",
"3 10 83\n",
"3 10 84\n",
"3 10 85\n",
"3 10 86\n",
"3 10 87\n",
"3 10 88\n",
"3 10 89\n",
"3 10 90\n",
"3 10 91\n",
"3 10 92\n",
"3 10 93\n",
"3 10 94\n",
"3 10 95\n",
"3 10 96\n",
"3 10 97\n",
"3 10 98\n",
"3 10 99\n",
"3 10 100\n",
"3 10 101\n",
"3 10 102\n",
"3 10 103\n",
"3 10 104\n",
"3 10 105\n",
"3 10 106\n",
"3 10 107\n",
"3 10 108\n",
"3 10 109\n",
"3 10 110\n",
"3 10 111\n",
"3 10 112\n",
"3 10 113\n",
"3 10 114\n",
"3 10 115\n",
"3 10 116\n",
"3 10 117\n",
"3 10 118\n",
"3 10 119\n",
"3 10 120\n",
"3 10 121\n",
"3 10 122\n",
"3 10 123\n",
"3 10 124\n",
"3 10 125\n",
"3 10 126\n",
"3 10 127\n",
"3 10 128\n",
"3 10 129\n",
"3 10 130\n",
"3 10 131\n",
"3 10 132\n",
"3 10 133\n",
"3 10 134\n",
"3 10 135\n",
"3 10 136\n",
"3 10 137\n",
"3 10 138\n",
"3 10 139\n",
"3 10 140\n",
"3 10 141\n",
"3 10 142\n",
"3 10 143\n",
"3 10 144\n",
"3 10 145\n",
"3 10 146\n",
"3 10 147\n",
"3 10 148\n",
"3 10 149\n",
"3 10 150\n",
"3 10 151\n",
"3 10 152\n",
"3 10 153\n",
"3 10 154\n",
"3 10 155\n",
"3 10 156\n",
"3 10 157\n",
"3 10 158\n",
"3 10 159\n",
"3 10 160\n",
"3 10 161\n",
"3 10 162\n",
"3 10 163\n",
"3 10 164\n",
"3 10 165\n",
"3 10 166\n",
"3 10 167\n",
"3 10 168\n",
"3 10 169\n",
"3 10 170\n",
"3 10 171\n",
"3 10 172\n",
"3 10 173\n",
"3 10 174\n",
"3 10 175\n",
"3 10 176\n",
"3 10 177\n",
"3 10 178\n",
"3 10 179\n",
"3 10 180\n",
"3 10 181\n",
"3 10 182\n",
"3 10 183\n",
"3 10 184\n",
"3 10 185\n",
"3 10 186\n",
"3 10 187\n",
"3 10 188\n",
"3 10 189\n",
"3 10 190\n",
"3 10 191\n",
"3 10 192\n",
"3 10 193\n",
"3 10 194\n",
"3 10 195\n",
"3 10 196\n",
"3 10 197\n",
"3 10 198\n",
"3 10 199\n",
"3 10 200\n",
"3 10 201\n",
"3 10 202\n",
"3 10 203\n",
"3 10 204\n",
"3 10 205\n",
"3 10 206\n",
"3 10 207\n",
"3 10 208\n",
"3 10 209\n",
"3 10 210\n",
"3 10 211\n",
"3 10 212\n",
"3 10 213\n",
"3 10 214\n",
"3 10 215\n",
"3 10 216\n",
"3 10 217\n",
"3 10 218\n",
"3 10 219\n",
"3 10 220\n",
"3 10 221\n",
"3 10 222\n",
"3 10 223\n",
"3 10 224\n",
"3 10 225\n",
"3 10 226\n",
"3 10 227\n",
"3 10 228\n",
"3 10 229\n",
"3 10 230\n",
"3 10 231\n",
"3 10 232\n",
"3 10 233\n",
"3 10 234\n",
"3 10 235\n",
"3 10 236\n",
"3 10 237\n",
"3 10 238\n",
"3 10 239\n",
"3 10 240\n",
"3 10 241\n",
"3 10 242\n",
"3 10 243\n",
"3 10 244\n",
"3 10 245\n",
"3 10 246\n",
"3 10 247\n",
"3 10 248\n",
"3 10 249\n",
"3 10 250\n",
"3 10 251\n",
"3 10 252\n",
"3 10 253\n",
"3 10 254\n",
"3 10 255\n",
"3 10 256\n",
"3 10 257\n",
"3 10 258\n",
"3 10 259\n",
"3 10 260\n",
"3 10 261\n",
"3 10 262\n",
"3 10 263\n",
"3 10 264\n",
"3 10 265\n",
"3 10 266\n",
"3 10 267\n",
"3 10 268\n",
"3 10 269\n",
"3 10 270\n",
"3 10 271\n",
"3 10 272\n",
"3 10 273\n",
"3 10 274\n",
"3 10 275\n",
"3 10 276\n",
"3 10 277\n",
"3 10 278\n",
"3 10 279\n",
"3 10 280\n",
"3 10 281\n",
"3 10 282\n",
"3 10 283\n",
"3 10 284\n",
"3 10 285\n",
"3 10 286\n",
"3 10 287\n",
"3 10 288\n",
"3 10 289\n",
"3 10 290\n",
"3 10 291\n",
"3 10 292\n",
"3 10 293\n",
"3 10 294\n",
"3 10 295\n",
"3 10 296\n",
"3 10 297\n",
"3 10 298\n",
"3 10 299\n",
"3 10 300\n",
"3 10 301\n",
"3 10 302\n",
"3 10 303\n",
"3 10 304\n",
"3 10 305\n",
"3 10 306\n",
"3 10 307\n",
"3 10 308\n",
"3 10 309\n",
"3 10 310\n",
"3 10 311\n",
"3 10 312\n",
"3 10 313\n",
"3 10 314\n",
"3 10 315\n",
"3 10 316\n",
"3 10 317\n",
"3 10 318\n",
"3 10 319\n",
"3 10 320\n",
"3 10 321\n",
"3 10 322\n",
"3 10 323\n",
"3 10 324\n",
"3 10 325\n",
"3 10 326\n",
"3 10 327\n",
"3 10 328\n",
"3 10 329\n",
"3 10 330\n",
"3 10 331\n",
"3 10 332\n",
"3 10 333\n",
"3 10 334\n",
"3 10 335\n",
"3 10 336\n",
"3 10 337\n",
"3 10 338\n",
"3 10 339\n",
"3 10 340\n",
"3 10 341\n",
"3 10 342\n",
"3 10 343\n",
"3 10 344\n",
"3 10 345\n",
"3 10 346\n",
"3 10 347\n",
"3 10 348\n",
"3 10 349\n",
"3 10 350\n",
"3 10 351\n",
"3 10 352\n",
"3 10 353\n",
"3 10 354\n",
"3 10 355\n",
"3 10 356\n",
"3 10 357\n",
"3 10 358\n",
"3 10 359\n",
"3 10 360\n",
"3 10 361\n",
"3 10 362\n",
"3 10 363\n",
"3 10 364\n",
"3 10 365\n",
"3 10 366\n",
"3 10 367\n",
"3 10 368\n",
"3 10 369\n",
"3 10 370\n",
"3 10 371\n",
"3 10 372\n",
"3 10 373\n",
"3 10 374\n",
"3 10 375\n",
"3 10 376\n",
"3 10 377\n",
"3 10 378\n",
"3 10 379\n",
"3 10 380\n",
"3 10 381\n",
"3 10 382\n",
"3 10 383\n",
"3 10 384\n",
"3 10 385\n",
"3 10 386\n",
"3 10 387\n",
"3 10 388\n",
"3 10 389\n",
"3 10 390\n",
"3 10 391\n",
"3 10 392\n",
"3 10 393\n",
"3 10 394\n",
"3 10 395\n",
"3 10 396\n",
"3 10 397\n",
"3 10 398\n",
"3 10 399\n",
"3 10 400\n",
"3 10 401\n",
"3 10 402\n",
"3 10 403\n",
"3 10 404\n",
"3 10 405\n",
"3 10 406\n",
"3 10 407\n",
"3 10 408\n",
"3 10 409\n",
"3 10 410\n",
"3 10 411\n",
"3 10 412\n",
"3 10 413\n",
"3 10 414\n",
"3 10 415\n",
"3 10 416\n",
"3 10 417\n",
"3 10 418\n",
"3 10 419\n",
"3 10 420\n",
"3 10 421\n",
"3 10 422\n",
"3 10 423\n",
"3 10 424\n",
"3 10 425\n",
"3 10 426\n",
"3 10 427\n",
"3 10 428\n",
"3 10 429\n",
"3 10 430\n",
"3 10 431\n",
"3 10 432\n",
"3 10 433\n",
"3 10 434\n",
"3 10 435\n",
"3 10 436\n",
"3 10 437\n",
"3 10 438\n",
"3 10 439\n",
"3 10 440\n",
"3 10 441\n",
"3 10 442\n",
"3 10 443\n",
"3 10 444\n",
"3 10 445\n",
"3 10 446\n",
"3 10 447\n",
"3 10 448\n",
"3 10 449\n",
"3 10 450\n",
"3 10 451\n",
"3 10 452\n",
"3 10 453\n",
"3 10 454\n",
"3 10 455\n",
"3 10 456\n",
"3 10 457\n",
"3 10 458\n",
"3 10 459\n",
"3 10 460\n",
"3 10 461\n",
"3 10 462\n",
"3 10 463\n",
"3 10 464\n",
"3 10 465\n",
"3 10 466\n",
"3 10 467\n",
"3 10 468\n",
"3 10 469\n",
"3 10 470\n",
"3 10 471\n",
"3 10 472\n",
"3 10 473\n",
"3 10 474\n",
"3 10 475\n",
"3 10 476\n",
"3 10 477\n",
"3 10 478\n",
"3 10 479\n",
"3 10 480\n",
"3 10 481\n",
"3 10 482\n",
"3 10 483\n",
"3 10 484\n",
"3 10 485\n",
"3 10 486\n",
"3 10 487\n",
"3 10 488\n",
"3 10 489\n",
"3 10 490\n",
"3 10 491\n",
"3 10 492\n",
"3 10 493\n",
"3 10 494\n",
"3 10 495\n",
"3 10 496\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 10 497\n",
"3 10 498\n",
"3 10 499\n",
"3 15 0\n",
"3 15 1\n",
"3 15 2\n",
"3 15 3\n",
"3 15 4\n",
"3 15 5\n",
"3 15 6\n",
"3 15 7\n",
"3 15 8\n",
"3 15 9\n",
"3 15 10\n",
"3 15 11\n",
"3 15 12\n",
"3 15 13\n",
"3 15 14\n",
"3 15 15\n",
"3 15 16\n",
"3 15 17\n",
"3 15 18\n",
"3 15 19\n",
"3 15 20\n",
"3 15 21\n",
"3 15 22\n",
"3 15 23\n",
"3 15 24\n",
"3 15 25\n",
"3 15 26\n",
"3 15 27\n",
"3 15 28\n",
"3 15 29\n",
"3 15 30\n",
"3 15 31\n",
"3 15 32\n",
"3 15 33\n",
"3 15 34\n",
"3 15 35\n",
"3 15 36\n",
"3 15 37\n",
"3 15 38\n",
"3 15 39\n",
"3 15 40\n",
"3 15 41\n",
"3 15 42\n",
"3 15 43\n",
"3 15 44\n",
"3 15 45\n",
"3 15 46\n",
"3 15 47\n",
"3 15 48\n",
"3 15 49\n",
"3 15 50\n",
"3 15 51\n",
"3 15 52\n",
"3 15 53\n",
"3 15 54\n",
"3 15 55\n",
"3 15 56\n",
"3 15 57\n",
"3 15 58\n",
"3 15 59\n",
"3 15 60\n",
"3 15 61\n",
"3 15 62\n",
"3 15 63\n",
"3 15 64\n",
"3 15 65\n",
"3 15 66\n",
"3 15 67\n",
"3 15 68\n",
"3 15 69\n",
"3 15 70\n",
"3 15 71\n",
"3 15 72\n",
"3 15 73\n",
"3 15 74\n",
"3 15 75\n",
"3 15 76\n",
"3 15 77\n",
"3 15 78\n",
"3 15 79\n",
"3 15 80\n",
"3 15 81\n",
"3 15 82\n",
"3 15 83\n",
"3 15 84\n",
"3 15 85\n",
"3 15 86\n",
"3 15 87\n",
"3 15 88\n",
"3 15 89\n",
"3 15 90\n",
"3 15 91\n",
"3 15 92\n",
"3 15 93\n",
"3 15 94\n",
"3 15 95\n",
"3 15 96\n",
"3 15 97\n",
"3 15 98\n",
"3 15 99\n",
"3 15 100\n",
"3 15 101\n",
"3 15 102\n",
"3 15 103\n",
"3 15 104\n",
"3 15 105\n",
"3 15 106\n",
"3 15 107\n",
"3 15 108\n",
"3 15 109\n",
"3 15 110\n",
"3 15 111\n",
"3 15 112\n",
"3 15 113\n",
"3 15 114\n",
"3 15 115\n",
"3 15 116\n",
"3 15 117\n",
"3 15 118\n",
"3 15 119\n",
"3 15 120\n",
"3 15 121\n",
"3 15 122\n",
"3 15 123\n",
"3 15 124\n",
"3 15 125\n",
"3 15 126\n",
"3 15 127\n",
"3 15 128\n",
"3 15 129\n",
"3 15 130\n",
"3 15 131\n",
"3 15 132\n",
"3 15 133\n",
"3 15 134\n",
"3 15 135\n",
"3 15 136\n",
"3 15 137\n",
"3 15 138\n",
"3 15 139\n",
"3 15 140\n",
"3 15 141\n",
"3 15 142\n",
"3 15 143\n",
"3 15 144\n",
"3 15 145\n",
"3 15 146\n",
"3 15 147\n",
"3 15 148\n",
"3 15 149\n",
"3 15 150\n",
"3 15 151\n",
"3 15 152\n",
"3 15 153\n",
"3 15 154\n",
"3 15 155\n",
"3 15 156\n",
"3 15 157\n",
"3 15 158\n",
"3 15 159\n",
"3 15 160\n",
"3 15 161\n",
"3 15 162\n",
"3 15 163\n",
"3 15 164\n",
"3 15 165\n",
"3 15 166\n",
"3 15 167\n",
"3 15 168\n",
"3 15 169\n",
"3 15 170\n",
"3 15 171\n",
"3 15 172\n",
"3 15 173\n",
"3 15 174\n",
"3 15 175\n",
"3 15 176\n",
"3 15 177\n",
"3 15 178\n",
"3 15 179\n",
"3 15 180\n",
"3 15 181\n",
"3 15 182\n",
"3 15 183\n",
"3 15 184\n",
"3 15 185\n",
"3 15 186\n",
"3 15 187\n",
"3 15 188\n",
"3 15 189\n",
"3 15 190\n",
"3 15 191\n",
"3 15 192\n",
"3 15 193\n",
"3 15 194\n",
"3 15 195\n",
"3 15 196\n",
"3 15 197\n",
"3 15 198\n",
"3 15 199\n",
"3 15 200\n",
"3 15 201\n",
"3 15 202\n",
"3 15 203\n",
"3 15 204\n",
"3 15 205\n",
"3 15 206\n",
"3 15 207\n",
"3 15 208\n",
"3 15 209\n",
"3 15 210\n",
"3 15 211\n",
"3 15 212\n",
"3 15 213\n",
"3 15 214\n",
"3 15 215\n",
"3 15 216\n",
"3 15 217\n",
"3 15 218\n",
"3 15 219\n",
"3 15 220\n",
"3 15 221\n",
"3 15 222\n",
"3 15 223\n",
"3 15 224\n",
"3 15 225\n",
"3 15 226\n",
"3 15 227\n",
"3 15 228\n",
"3 15 229\n",
"3 15 230\n",
"3 15 231\n",
"3 15 232\n",
"3 15 233\n",
"3 15 234\n",
"3 15 235\n",
"3 15 236\n",
"3 15 237\n",
"3 15 238\n",
"3 15 239\n",
"3 15 240\n",
"3 15 241\n",
"3 15 242\n",
"3 15 243\n",
"3 15 244\n",
"3 15 245\n",
"3 15 246\n",
"3 15 247\n",
"3 15 248\n",
"3 15 249\n",
"3 15 250\n",
"3 15 251\n",
"3 15 252\n",
"3 15 253\n",
"3 15 254\n",
"3 15 255\n",
"3 15 256\n",
"3 15 257\n",
"3 15 258\n",
"3 15 259\n",
"3 15 260\n",
"3 15 261\n",
"3 15 262\n",
"3 15 263\n",
"3 15 264\n",
"3 15 265\n",
"3 15 266\n",
"3 15 267\n",
"3 15 268\n",
"3 15 269\n",
"3 15 270\n",
"3 15 271\n",
"3 15 272\n",
"3 15 273\n",
"3 15 274\n",
"3 15 275\n",
"3 15 276\n",
"3 15 277\n",
"3 15 278\n",
"3 15 279\n",
"3 15 280\n",
"3 15 281\n",
"3 15 282\n",
"3 15 283\n",
"3 15 284\n",
"3 15 285\n",
"3 15 286\n",
"3 15 287\n",
"3 15 288\n",
"3 15 289\n",
"3 15 290\n",
"3 15 291\n",
"3 15 292\n",
"3 15 293\n",
"3 15 294\n",
"3 15 295\n",
"3 15 296\n",
"3 15 297\n",
"3 15 298\n",
"3 15 299\n",
"3 15 300\n",
"3 15 301\n",
"3 15 302\n",
"3 15 303\n",
"3 15 304\n",
"3 15 305\n",
"3 15 306\n",
"3 15 307\n",
"3 15 308\n",
"3 15 309\n",
"3 15 310\n",
"3 15 311\n",
"3 15 312\n",
"3 15 313\n",
"3 15 314\n",
"3 15 315\n",
"3 15 316\n",
"3 15 317\n",
"3 15 318\n",
"3 15 319\n",
"3 15 320\n",
"3 15 321\n",
"3 15 322\n",
"3 15 323\n",
"3 15 324\n",
"3 15 325\n",
"3 15 326\n",
"3 15 327\n",
"3 15 328\n",
"3 15 329\n",
"3 15 330\n",
"3 15 331\n",
"3 15 332\n",
"3 15 333\n",
"3 15 334\n",
"3 15 335\n",
"3 15 336\n",
"3 15 337\n",
"3 15 338\n",
"3 15 339\n",
"3 15 340\n",
"3 15 341\n",
"3 15 342\n",
"3 15 343\n",
"3 15 344\n",
"3 15 345\n",
"3 15 346\n",
"3 15 347\n",
"3 15 348\n",
"3 15 349\n",
"3 15 350\n",
"3 15 351\n",
"3 15 352\n",
"3 15 353\n",
"3 15 354\n",
"3 15 355\n",
"3 15 356\n",
"3 15 357\n",
"3 15 358\n",
"3 15 359\n",
"3 15 360\n",
"3 15 361\n",
"3 15 362\n",
"3 15 363\n",
"3 15 364\n",
"3 15 365\n",
"3 15 366\n",
"3 15 367\n",
"3 15 368\n",
"3 15 369\n",
"3 15 370\n",
"3 15 371\n",
"3 15 372\n",
"3 15 373\n",
"3 15 374\n",
"3 15 375\n",
"3 15 376\n",
"3 15 377\n",
"3 15 378\n",
"3 15 379\n",
"3 15 380\n",
"3 15 381\n",
"3 15 382\n",
"3 15 383\n",
"3 15 384\n",
"3 15 385\n",
"3 15 386\n",
"3 15 387\n",
"3 15 388\n",
"3 15 389\n",
"3 15 390\n",
"3 15 391\n",
"3 15 392\n",
"3 15 393\n",
"3 15 394\n",
"3 15 395\n",
"3 15 396\n",
"3 15 397\n",
"3 15 398\n",
"3 15 399\n",
"3 15 400\n",
"3 15 401\n",
"3 15 402\n",
"3 15 403\n",
"3 15 404\n",
"3 15 405\n",
"3 15 406\n",
"3 15 407\n",
"3 15 408\n",
"3 15 409\n",
"3 15 410\n",
"3 15 411\n",
"3 15 412\n",
"3 15 413\n",
"3 15 414\n",
"3 15 415\n",
"3 15 416\n",
"3 15 417\n",
"3 15 418\n",
"3 15 419\n",
"3 15 420\n",
"3 15 421\n",
"3 15 422\n",
"3 15 423\n",
"3 15 424\n",
"3 15 425\n",
"3 15 426\n",
"3 15 427\n",
"3 15 428\n",
"3 15 429\n",
"3 15 430\n",
"3 15 431\n",
"3 15 432\n",
"3 15 433\n",
"3 15 434\n",
"3 15 435\n",
"3 15 436\n",
"3 15 437\n",
"3 15 438\n",
"3 15 439\n",
"3 15 440\n",
"3 15 441\n",
"3 15 442\n",
"3 15 443\n",
"3 15 444\n",
"3 15 445\n",
"3 15 446\n",
"3 15 447\n",
"3 15 448\n",
"3 15 449\n",
"3 15 450\n",
"3 15 451\n",
"3 15 452\n",
"3 15 453\n",
"3 15 454\n",
"3 15 455\n",
"3 15 456\n",
"3 15 457\n",
"3 15 458\n",
"3 15 459\n",
"3 15 460\n",
"3 15 461\n",
"3 15 462\n",
"3 15 463\n",
"3 15 464\n",
"3 15 465\n",
"3 15 466\n",
"3 15 467\n",
"3 15 468\n",
"3 15 469\n",
"3 15 470\n",
"3 15 471\n",
"3 15 472\n",
"3 15 473\n",
"3 15 474\n",
"3 15 475\n",
"3 15 476\n",
"3 15 477\n",
"3 15 478\n",
"3 15 479\n",
"3 15 480\n",
"3 15 481\n",
"3 15 482\n",
"3 15 483\n",
"3 15 484\n",
"3 15 485\n",
"3 15 486\n",
"3 15 487\n",
"3 15 488\n",
"3 15 489\n",
"3 15 490\n",
"3 15 491\n",
"3 15 492\n",
"3 15 493\n",
"3 15 494\n",
"3 15 495\n",
"3 15 496\n",
"3 15 497\n",
"3 15 498\n",
"3 15 499\n",
"3 20 0\n",
"3 20 1\n",
"3 20 2\n",
"3 20 3\n",
"3 20 4\n",
"3 20 5\n",
"3 20 6\n",
"3 20 7\n",
"3 20 8\n",
"3 20 9\n",
"3 20 10\n",
"3 20 11\n",
"3 20 12\n",
"3 20 13\n",
"3 20 14\n",
"3 20 15\n",
"3 20 16\n",
"3 20 17\n",
"3 20 18\n",
"3 20 19\n",
"3 20 20\n",
"3 20 21\n",
"3 20 22\n",
"3 20 23\n",
"3 20 24\n",
"3 20 25\n",
"3 20 26\n",
"3 20 27\n",
"3 20 28\n",
"3 20 29\n",
"3 20 30\n",
"3 20 31\n",
"3 20 32\n",
"3 20 33\n",
"3 20 34\n",
"3 20 35\n",
"3 20 36\n",
"3 20 37\n",
"3 20 38\n",
"3 20 39\n",
"3 20 40\n",
"3 20 41\n",
"3 20 42\n",
"3 20 43\n",
"3 20 44\n",
"3 20 45\n",
"3 20 46\n",
"3 20 47\n",
"3 20 48\n",
"3 20 49\n",
"3 20 50\n",
"3 20 51\n",
"3 20 52\n",
"3 20 53\n",
"3 20 54\n",
"3 20 55\n",
"3 20 56\n",
"3 20 57\n",
"3 20 58\n",
"3 20 59\n",
"3 20 60\n",
"3 20 61\n",
"3 20 62\n",
"3 20 63\n",
"3 20 64\n",
"3 20 65\n",
"3 20 66\n",
"3 20 67\n",
"3 20 68\n",
"3 20 69\n",
"3 20 70\n",
"3 20 71\n",
"3 20 72\n",
"3 20 73\n",
"3 20 74\n",
"3 20 75\n",
"3 20 76\n",
"3 20 77\n",
"3 20 78\n",
"3 20 79\n",
"3 20 80\n",
"3 20 81\n",
"3 20 82\n",
"3 20 83\n",
"3 20 84\n",
"3 20 85\n",
"3 20 86\n",
"3 20 87\n",
"3 20 88\n",
"3 20 89\n",
"3 20 90\n",
"3 20 91\n",
"3 20 92\n",
"3 20 93\n",
"3 20 94\n",
"3 20 95\n",
"3 20 96\n",
"3 20 97\n",
"3 20 98\n",
"3 20 99\n",
"3 20 100\n",
"3 20 101\n",
"3 20 102\n",
"3 20 103\n",
"3 20 104\n",
"3 20 105\n",
"3 20 106\n",
"3 20 107\n",
"3 20 108\n",
"3 20 109\n",
"3 20 110\n",
"3 20 111\n",
"3 20 112\n",
"3 20 113\n",
"3 20 114\n",
"3 20 115\n",
"3 20 116\n",
"3 20 117\n",
"3 20 118\n",
"3 20 119\n",
"3 20 120\n",
"3 20 121\n",
"3 20 122\n",
"3 20 123\n",
"3 20 124\n",
"3 20 125\n",
"3 20 126\n",
"3 20 127\n",
"3 20 128\n",
"3 20 129\n",
"3 20 130\n",
"3 20 131\n",
"3 20 132\n",
"3 20 133\n",
"3 20 134\n",
"3 20 135\n",
"3 20 136\n",
"3 20 137\n",
"3 20 138\n",
"3 20 139\n",
"3 20 140\n",
"3 20 141\n",
"3 20 142\n",
"3 20 143\n",
"3 20 144\n",
"3 20 145\n",
"3 20 146\n",
"3 20 147\n",
"3 20 148\n",
"3 20 149\n",
"3 20 150\n",
"3 20 151\n",
"3 20 152\n",
"3 20 153\n",
"3 20 154\n",
"3 20 155\n",
"3 20 156\n",
"3 20 157\n",
"3 20 158\n",
"3 20 159\n",
"3 20 160\n",
"3 20 161\n",
"3 20 162\n",
"3 20 163\n",
"3 20 164\n",
"3 20 165\n",
"3 20 166\n",
"3 20 167\n",
"3 20 168\n",
"3 20 169\n",
"3 20 170\n",
"3 20 171\n",
"3 20 172\n",
"3 20 173\n",
"3 20 174\n",
"3 20 175\n",
"3 20 176\n",
"3 20 177\n",
"3 20 178\n",
"3 20 179\n",
"3 20 180\n",
"3 20 181\n",
"3 20 182\n",
"3 20 183\n",
"3 20 184\n",
"3 20 185\n",
"3 20 186\n",
"3 20 187\n",
"3 20 188\n",
"3 20 189\n",
"3 20 190\n",
"3 20 191\n",
"3 20 192\n",
"3 20 193\n",
"3 20 194\n",
"3 20 195\n",
"3 20 196\n",
"3 20 197\n",
"3 20 198\n",
"3 20 199\n",
"3 20 200\n",
"3 20 201\n",
"3 20 202\n",
"3 20 203\n",
"3 20 204\n",
"3 20 205\n",
"3 20 206\n",
"3 20 207\n",
"3 20 208\n",
"3 20 209\n",
"3 20 210\n",
"3 20 211\n",
"3 20 212\n",
"3 20 213\n",
"3 20 214\n",
"3 20 215\n",
"3 20 216\n",
"3 20 217\n",
"3 20 218\n",
"3 20 219\n",
"3 20 220\n",
"3 20 221\n",
"3 20 222\n",
"3 20 223\n",
"3 20 224\n",
"3 20 225\n",
"3 20 226\n",
"3 20 227\n",
"3 20 228\n",
"3 20 229\n",
"3 20 230\n",
"3 20 231\n",
"3 20 232\n",
"3 20 233\n",
"3 20 234\n",
"3 20 235\n",
"3 20 236\n",
"3 20 237\n",
"3 20 238\n",
"3 20 239\n",
"3 20 240\n",
"3 20 241\n",
"3 20 242\n",
"3 20 243\n",
"3 20 244\n",
"3 20 245\n",
"3 20 246\n",
"3 20 247\n",
"3 20 248\n",
"3 20 249\n",
"3 20 250\n",
"3 20 251\n",
"3 20 252\n",
"3 20 253\n",
"3 20 254\n",
"3 20 255\n",
"3 20 256\n",
"3 20 257\n",
"3 20 258\n",
"3 20 259\n",
"3 20 260\n",
"3 20 261\n",
"3 20 262\n",
"3 20 263\n",
"3 20 264\n",
"3 20 265\n",
"3 20 266\n",
"3 20 267\n",
"3 20 268\n",
"3 20 269\n",
"3 20 270\n",
"3 20 271\n",
"3 20 272\n",
"3 20 273\n",
"3 20 274\n",
"3 20 275\n",
"3 20 276\n",
"3 20 277\n",
"3 20 278\n",
"3 20 279\n",
"3 20 280\n",
"3 20 281\n",
"3 20 282\n",
"3 20 283\n",
"3 20 284\n",
"3 20 285\n",
"3 20 286\n",
"3 20 287\n",
"3 20 288\n",
"3 20 289\n",
"3 20 290\n",
"3 20 291\n",
"3 20 292\n",
"3 20 293\n",
"3 20 294\n",
"3 20 295\n",
"3 20 296\n",
"3 20 297\n",
"3 20 298\n",
"3 20 299\n",
"3 20 300\n",
"3 20 301\n",
"3 20 302\n",
"3 20 303\n",
"3 20 304\n",
"3 20 305\n",
"3 20 306\n",
"3 20 307\n",
"3 20 308\n",
"3 20 309\n",
"3 20 310\n",
"3 20 311\n",
"3 20 312\n",
"3 20 313\n",
"3 20 314\n",
"3 20 315\n",
"3 20 316\n",
"3 20 317\n",
"3 20 318\n",
"3 20 319\n",
"3 20 320\n",
"3 20 321\n",
"3 20 322\n",
"3 20 323\n",
"3 20 324\n",
"3 20 325\n",
"3 20 326\n",
"3 20 327\n",
"3 20 328\n",
"3 20 329\n",
"3 20 330\n",
"3 20 331\n",
"3 20 332\n",
"3 20 333\n",
"3 20 334\n",
"3 20 335\n",
"3 20 336\n",
"3 20 337\n",
"3 20 338\n",
"3 20 339\n",
"3 20 340\n",
"3 20 341\n",
"3 20 342\n",
"3 20 343\n",
"3 20 344\n",
"3 20 345\n",
"3 20 346\n",
"3 20 347\n",
"3 20 348\n",
"3 20 349\n",
"3 20 350\n",
"3 20 351\n",
"3 20 352\n",
"3 20 353\n",
"3 20 354\n",
"3 20 355\n",
"3 20 356\n",
"3 20 357\n",
"3 20 358\n",
"3 20 359\n",
"3 20 360\n",
"3 20 361\n",
"3 20 362\n",
"3 20 363\n",
"3 20 364\n",
"3 20 365\n",
"3 20 366\n",
"3 20 367\n",
"3 20 368\n",
"3 20 369\n",
"3 20 370\n",
"3 20 371\n",
"3 20 372\n",
"3 20 373\n",
"3 20 374\n",
"3 20 375\n",
"3 20 376\n",
"3 20 377\n",
"3 20 378\n",
"3 20 379\n",
"3 20 380\n",
"3 20 381\n",
"3 20 382\n",
"3 20 383\n",
"3 20 384\n",
"3 20 385\n",
"3 20 386\n",
"3 20 387\n",
"3 20 388\n",
"3 20 389\n",
"3 20 390\n",
"3 20 391\n",
"3 20 392\n",
"3 20 393\n",
"3 20 394\n",
"3 20 395\n",
"3 20 396\n",
"3 20 397\n",
"3 20 398\n",
"3 20 399\n",
"3 20 400\n",
"3 20 401\n",
"3 20 402\n",
"3 20 403\n",
"3 20 404\n",
"3 20 405\n",
"3 20 406\n",
"3 20 407\n",
"3 20 408\n",
"3 20 409\n",
"3 20 410\n",
"3 20 411\n",
"3 20 412\n",
"3 20 413\n",
"3 20 414\n",
"3 20 415\n",
"3 20 416\n",
"3 20 417\n",
"3 20 418\n",
"3 20 419\n",
"3 20 420\n",
"3 20 421\n",
"3 20 422\n",
"3 20 423\n",
"3 20 424\n",
"3 20 425\n",
"3 20 426\n",
"3 20 427\n",
"3 20 428\n",
"3 20 429\n",
"3 20 430\n",
"3 20 431\n",
"3 20 432\n",
"3 20 433\n",
"3 20 434\n",
"3 20 435\n",
"3 20 436\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"3 20 437\n",
"3 20 438\n",
"3 20 439\n",
"3 20 440\n",
"3 20 441\n",
"3 20 442\n",
"3 20 443\n",
"3 20 444\n",
"3 20 445\n",
"3 20 446\n",
"3 20 447\n",
"3 20 448\n",
"3 20 449\n",
"3 20 450\n",
"3 20 451\n",
"3 20 452\n",
"3 20 453\n",
"3 20 454\n",
"3 20 455\n",
"3 20 456\n",
"3 20 457\n",
"3 20 458\n",
"3 20 459\n",
"3 20 460\n",
"3 20 461\n",
"3 20 462\n",
"3 20 463\n",
"3 20 464\n",
"3 20 465\n",
"3 20 466\n",
"3 20 467\n",
"3 20 468\n",
"3 20 469\n",
"3 20 470\n",
"3 20 471\n",
"3 20 472\n",
"3 20 473\n",
"3 20 474\n",
"3 20 475\n",
"3 20 476\n",
"3 20 477\n",
"3 20 478\n",
"3 20 479\n",
"3 20 480\n",
"3 20 481\n",
"3 20 482\n",
"3 20 483\n",
"3 20 484\n",
"3 20 485\n",
"3 20 486\n",
"3 20 487\n",
"3 20 488\n",
"3 20 489\n",
"3 20 490\n",
"3 20 491\n",
"3 20 492\n",
"3 20 493\n",
"3 20 494\n",
"3 20 495\n",
"3 20 496\n",
"3 20 497\n",
"3 20 498\n",
"3 20 499\n",
"==================== \n",
" n= 5\n",
"5 10 0\n",
"5 10 1\n",
"5 10 2\n",
"5 10 3\n",
"5 10 4\n",
"5 10 5\n",
"5 10 6\n",
"5 10 7\n",
"5 10 8\n",
"5 10 9\n",
"5 10 10\n",
"5 10 11\n",
"5 10 12\n",
"5 10 13\n",
"5 10 14\n",
"5 10 15\n",
"5 10 16\n",
"5 10 17\n",
"5 10 18\n",
"5 10 19\n",
"5 10 20\n",
"5 10 21\n",
"5 10 22\n",
"5 10 23\n",
"5 10 24\n",
"5 10 25\n",
"5 10 26\n",
"5 10 27\n",
"5 10 28\n",
"5 10 29\n",
"5 10 30\n",
"5 10 31\n",
"5 10 32\n",
"5 10 33\n",
"5 10 34\n",
"5 10 35\n",
"5 10 36\n",
"5 10 37\n",
"5 10 38\n",
"5 10 39\n",
"5 10 40\n",
"5 10 41\n",
"5 10 42\n",
"5 10 43\n",
"5 10 44\n",
"5 10 45\n",
"5 10 46\n",
"5 10 47\n",
"5 10 48\n",
"5 10 49\n",
"5 10 50\n",
"5 10 51\n",
"5 10 52\n",
"5 10 53\n",
"5 10 54\n",
"5 10 55\n",
"5 10 56\n",
"5 10 57\n",
"5 10 58\n",
"5 10 59\n",
"5 10 60\n",
"5 10 61\n",
"5 10 62\n",
"5 10 63\n",
"5 10 64\n",
"5 10 65\n",
"5 10 66\n",
"5 10 67\n",
"5 10 68\n",
"5 10 69\n",
"5 10 70\n",
"5 10 71\n",
"5 10 72\n",
"5 10 73\n",
"5 10 74\n",
"5 10 75\n",
"5 10 76\n",
"5 10 77\n",
"5 10 78\n",
"5 10 79\n",
"5 10 80\n",
"5 10 81\n",
"5 10 82\n",
"5 10 83\n",
"5 10 84\n",
"5 10 85\n",
"5 10 86\n",
"5 10 87\n",
"5 10 88\n",
"5 10 89\n",
"5 10 90\n",
"5 10 91\n",
"5 10 92\n",
"5 10 93\n",
"5 10 94\n",
"5 10 95\n",
"5 10 96\n",
"5 10 97\n",
"5 10 98\n",
"5 10 99\n",
"5 10 100\n",
"5 10 101\n",
"5 10 102\n",
"5 10 103\n",
"5 10 104\n",
"5 10 105\n",
"5 10 106\n",
"5 10 107\n",
"5 10 108\n",
"5 10 109\n",
"5 10 110\n",
"5 10 111\n",
"5 10 112\n",
"5 10 113\n",
"5 10 114\n",
"5 10 115\n",
"5 10 116\n",
"5 10 117\n",
"5 10 118\n",
"5 10 119\n",
"5 10 120\n",
"5 10 121\n",
"5 10 122\n",
"5 10 123\n",
"5 10 124\n",
"5 10 125\n",
"5 10 126\n",
"5 10 127\n",
"5 10 128\n",
"5 10 129\n",
"5 10 130\n",
"5 10 131\n",
"5 10 132\n",
"5 10 133\n",
"5 10 134\n",
"5 10 135\n",
"5 10 136\n",
"5 10 137\n",
"5 10 138\n",
"5 10 139\n",
"5 10 140\n",
"5 10 141\n",
"5 10 142\n",
"5 10 143\n",
"5 10 144\n",
"5 10 145\n",
"5 10 146\n",
"5 10 147\n",
"5 10 148\n",
"5 10 149\n",
"5 10 150\n",
"5 10 151\n",
"5 10 152\n",
"5 10 153\n",
"5 10 154\n",
"5 10 155\n",
"5 10 156\n",
"5 10 157\n",
"5 10 158\n",
"5 10 159\n",
"5 10 160\n",
"5 10 161\n",
"5 10 162\n",
"5 10 163\n",
"5 10 164\n",
"5 10 165\n",
"5 10 166\n",
"5 10 167\n",
"5 10 168\n",
"5 10 169\n",
"5 10 170\n",
"5 10 171\n",
"5 10 172\n",
"5 10 173\n",
"5 10 174\n",
"5 10 175\n",
"5 10 176\n",
"5 10 177\n",
"5 10 178\n",
"5 10 179\n",
"5 10 180\n",
"5 10 181\n",
"5 10 182\n",
"5 10 183\n",
"5 10 184\n",
"5 10 185\n",
"5 10 186\n",
"5 10 187\n",
"5 10 188\n",
"5 10 189\n",
"5 10 190\n",
"5 10 191\n",
"5 10 192\n",
"5 10 193\n",
"5 10 194\n",
"5 10 195\n",
"5 10 196\n",
"5 10 197\n",
"5 10 198\n",
"5 10 199\n",
"5 10 200\n",
"5 10 201\n",
"5 10 202\n",
"5 10 203\n",
"5 10 204\n",
"5 10 205\n",
"5 10 206\n",
"5 10 207\n",
"5 10 208\n",
"5 10 209\n",
"5 10 210\n",
"5 10 211\n",
"5 10 212\n",
"5 10 213\n",
"5 10 214\n",
"5 10 215\n",
"5 10 216\n",
"5 10 217\n",
"5 10 218\n",
"5 10 219\n",
"5 10 220\n",
"5 10 221\n",
"5 10 222\n",
"5 10 223\n",
"5 10 224\n",
"5 10 225\n",
"5 10 226\n",
"5 10 227\n",
"5 10 228\n",
"5 10 229\n",
"5 10 230\n",
"5 10 231\n",
"5 10 232\n",
"5 10 233\n",
"5 10 234\n",
"5 10 235\n",
"5 10 236\n",
"5 10 237\n",
"5 10 238\n",
"5 10 239\n",
"5 10 240\n",
"5 10 241\n",
"5 10 242\n",
"5 10 243\n",
"5 10 244\n",
"5 10 245\n",
"5 10 246\n",
"5 10 247\n",
"5 10 248\n",
"5 10 249\n",
"5 10 250\n",
"5 10 251\n",
"5 10 252\n",
"5 10 253\n",
"5 10 254\n",
"5 10 255\n",
"5 10 256\n",
"5 10 257\n",
"5 10 258\n",
"5 10 259\n",
"5 10 260\n",
"5 10 261\n",
"5 10 262\n",
"5 10 263\n",
"5 10 264\n",
"5 10 265\n",
"5 10 266\n",
"5 10 267\n",
"5 10 268\n",
"5 10 269\n",
"5 10 270\n",
"5 10 271\n",
"5 10 272\n",
"5 10 273\n",
"5 10 274\n",
"5 10 275\n",
"5 10 276\n",
"5 10 277\n",
"5 10 278\n",
"5 10 279\n",
"5 10 280\n",
"5 10 281\n",
"5 10 282\n",
"5 10 283\n",
"5 10 284\n",
"5 10 285\n",
"5 10 286\n",
"5 10 287\n",
"5 10 288\n",
"5 10 289\n",
"5 10 290\n",
"5 10 291\n",
"5 10 292\n",
"5 10 293\n",
"5 10 294\n",
"5 10 295\n",
"5 10 296\n",
"5 10 297\n",
"5 10 298\n",
"5 10 299\n",
"5 10 300\n",
"5 10 301\n",
"5 10 302\n",
"5 10 303\n",
"5 10 304\n",
"5 10 305\n",
"5 10 306\n",
"5 10 307\n",
"5 10 308\n",
"5 10 309\n",
"5 10 310\n",
"5 10 311\n",
"5 10 312\n",
"5 10 313\n",
"5 10 314\n",
"5 10 315\n",
"5 10 316\n",
"5 10 317\n",
"5 10 318\n",
"5 10 319\n",
"5 10 320\n",
"5 10 321\n",
"5 10 322\n",
"5 10 323\n",
"5 10 324\n",
"5 10 325\n",
"5 10 326\n",
"5 10 327\n",
"5 10 328\n",
"5 10 329\n",
"5 10 330\n",
"5 10 331\n",
"5 10 332\n",
"5 10 333\n",
"5 10 334\n",
"5 10 335\n",
"5 10 336\n",
"5 10 337\n",
"5 10 338\n",
"5 10 339\n",
"5 10 340\n",
"5 10 341\n",
"5 10 342\n",
"5 10 343\n",
"5 10 344\n",
"5 10 345\n",
"5 10 346\n",
"5 10 347\n",
"5 10 348\n",
"5 10 349\n",
"5 10 350\n",
"5 10 351\n",
"5 10 352\n",
"5 10 353\n",
"5 10 354\n",
"5 10 355\n",
"5 10 356\n",
"5 10 357\n",
"5 10 358\n",
"5 10 359\n",
"5 10 360\n",
"5 10 361\n",
"5 10 362\n",
"5 10 363\n",
"5 10 364\n",
"5 10 365\n",
"5 10 366\n",
"5 10 367\n",
"5 10 368\n",
"5 10 369\n",
"5 10 370\n",
"5 10 371\n",
"5 10 372\n",
"5 10 373\n",
"5 10 374\n",
"5 10 375\n",
"5 10 376\n",
"5 10 377\n",
"5 10 378\n",
"5 10 379\n",
"5 10 380\n",
"5 10 381\n",
"5 10 382\n",
"5 10 383\n",
"5 10 384\n",
"5 10 385\n",
"5 10 386\n",
"5 10 387\n",
"5 10 388\n",
"5 10 389\n",
"5 10 390\n",
"5 10 391\n",
"5 10 392\n",
"5 10 393\n",
"5 10 394\n",
"5 10 395\n",
"5 10 396\n",
"5 10 397\n",
"5 10 398\n",
"5 10 399\n",
"5 10 400\n",
"5 10 401\n",
"5 10 402\n",
"5 10 403\n",
"5 10 404\n",
"5 10 405\n",
"5 10 406\n",
"5 10 407\n",
"5 10 408\n",
"5 10 409\n",
"5 10 410\n",
"5 10 411\n",
"5 10 412\n",
"5 10 413\n",
"5 10 414\n",
"5 10 415\n",
"5 10 416\n",
"5 10 417\n",
"5 10 418\n",
"5 10 419\n",
"5 10 420\n",
"5 10 421\n",
"5 10 422\n",
"5 10 423\n",
"5 10 424\n",
"5 10 425\n",
"5 10 426\n",
"5 10 427\n",
"5 10 428\n",
"5 10 429\n",
"5 10 430\n",
"5 10 431\n",
"5 10 432\n",
"5 10 433\n",
"5 10 434\n",
"5 10 435\n",
"5 10 436\n",
"5 10 437\n",
"5 10 438\n",
"5 10 439\n",
"5 10 440\n",
"5 10 441\n",
"5 10 442\n",
"5 10 443\n",
"5 10 444\n",
"5 10 445\n",
"5 10 446\n",
"5 10 447\n",
"5 10 448\n",
"5 10 449\n",
"5 10 450\n",
"5 10 451\n",
"5 10 452\n",
"5 10 453\n",
"5 10 454\n",
"5 10 455\n",
"5 10 456\n",
"5 10 457\n",
"5 10 458\n",
"5 10 459\n",
"5 10 460\n",
"5 10 461\n",
"5 10 462\n",
"5 10 463\n",
"5 10 464\n",
"5 10 465\n",
"5 10 466\n",
"5 10 467\n",
"5 10 468\n",
"5 10 469\n",
"5 10 470\n",
"5 10 471\n",
"5 10 472\n",
"5 10 473\n",
"5 10 474\n",
"5 10 475\n",
"5 10 476\n",
"5 10 477\n",
"5 10 478\n",
"5 10 479\n",
"5 10 480\n",
"5 10 481\n",
"5 10 482\n",
"5 10 483\n",
"5 10 484\n",
"5 10 485\n",
"5 10 486\n",
"5 10 487\n",
"5 10 488\n",
"5 10 489\n",
"5 10 490\n",
"5 10 491\n",
"5 10 492\n",
"5 10 493\n",
"5 10 494\n",
"5 10 495\n",
"5 10 496\n",
"5 10 497\n",
"5 10 498\n",
"5 10 499\n",
"5 15 0\n",
"5 15 1\n",
"5 15 2\n",
"5 15 3\n",
"5 15 4\n",
"5 15 5\n",
"5 15 6\n",
"5 15 7\n",
"5 15 8\n",
"5 15 9\n",
"5 15 10\n",
"5 15 11\n",
"5 15 12\n",
"5 15 13\n",
"5 15 14\n",
"5 15 15\n",
"5 15 16\n",
"5 15 17\n",
"5 15 18\n",
"5 15 19\n",
"5 15 20\n",
"5 15 21\n",
"5 15 22\n",
"5 15 23\n",
"5 15 24\n",
"5 15 25\n",
"5 15 26\n",
"5 15 27\n",
"5 15 28\n",
"5 15 29\n",
"5 15 30\n",
"5 15 31\n",
"5 15 32\n",
"5 15 33\n",
"5 15 34\n",
"5 15 35\n",
"5 15 36\n",
"5 15 37\n",
"5 15 38\n",
"5 15 39\n",
"5 15 40\n",
"5 15 41\n",
"5 15 42\n",
"5 15 43\n",
"5 15 44\n",
"5 15 45\n",
"5 15 46\n",
"5 15 47\n",
"5 15 48\n",
"5 15 49\n",
"5 15 50\n",
"5 15 51\n",
"5 15 52\n",
"5 15 53\n",
"5 15 54\n",
"5 15 55\n",
"5 15 56\n",
"5 15 57\n",
"5 15 58\n",
"5 15 59\n",
"5 15 60\n",
"5 15 61\n",
"5 15 62\n",
"5 15 63\n",
"5 15 64\n",
"5 15 65\n",
"5 15 66\n",
"5 15 67\n",
"5 15 68\n",
"5 15 69\n",
"5 15 70\n",
"5 15 71\n",
"5 15 72\n",
"5 15 73\n",
"5 15 74\n",
"5 15 75\n",
"5 15 76\n",
"5 15 77\n",
"5 15 78\n",
"5 15 79\n",
"5 15 80\n",
"5 15 81\n",
"5 15 82\n",
"5 15 83\n",
"5 15 84\n",
"5 15 85\n",
"5 15 86\n",
"5 15 87\n",
"5 15 88\n",
"5 15 89\n",
"5 15 90\n",
"5 15 91\n",
"5 15 92\n",
"5 15 93\n",
"5 15 94\n",
"5 15 95\n",
"5 15 96\n",
"5 15 97\n",
"5 15 98\n",
"5 15 99\n",
"5 15 100\n",
"5 15 101\n",
"5 15 102\n",
"5 15 103\n",
"5 15 104\n",
"5 15 105\n",
"5 15 106\n",
"5 15 107\n",
"5 15 108\n",
"5 15 109\n",
"5 15 110\n",
"5 15 111\n",
"5 15 112\n",
"5 15 113\n",
"5 15 114\n",
"5 15 115\n",
"5 15 116\n",
"5 15 117\n",
"5 15 118\n",
"5 15 119\n",
"5 15 120\n",
"5 15 121\n",
"5 15 122\n",
"5 15 123\n",
"5 15 124\n",
"5 15 125\n",
"5 15 126\n",
"5 15 127\n",
"5 15 128\n",
"5 15 129\n",
"5 15 130\n",
"5 15 131\n",
"5 15 132\n",
"5 15 133\n",
"5 15 134\n",
"5 15 135\n",
"5 15 136\n",
"5 15 137\n",
"5 15 138\n",
"5 15 139\n",
"5 15 140\n",
"5 15 141\n",
"5 15 142\n",
"5 15 143\n",
"5 15 144\n",
"5 15 145\n",
"5 15 146\n",
"5 15 147\n",
"5 15 148\n",
"5 15 149\n",
"5 15 150\n",
"5 15 151\n",
"5 15 152\n",
"5 15 153\n",
"5 15 154\n",
"5 15 155\n",
"5 15 156\n",
"5 15 157\n",
"5 15 158\n",
"5 15 159\n",
"5 15 160\n",
"5 15 161\n",
"5 15 162\n",
"5 15 163\n",
"5 15 164\n",
"5 15 165\n",
"5 15 166\n",
"5 15 167\n",
"5 15 168\n",
"5 15 169\n",
"5 15 170\n",
"5 15 171\n",
"5 15 172\n",
"5 15 173\n",
"5 15 174\n",
"5 15 175\n",
"5 15 176\n",
"5 15 177\n",
"5 15 178\n",
"5 15 179\n",
"5 15 180\n",
"5 15 181\n",
"5 15 182\n",
"5 15 183\n",
"5 15 184\n",
"5 15 185\n",
"5 15 186\n",
"5 15 187\n",
"5 15 188\n",
"5 15 189\n",
"5 15 190\n",
"5 15 191\n",
"5 15 192\n",
"5 15 193\n",
"5 15 194\n",
"5 15 195\n",
"5 15 196\n",
"5 15 197\n",
"5 15 198\n",
"5 15 199\n",
"5 15 200\n",
"5 15 201\n",
"5 15 202\n",
"5 15 203\n",
"5 15 204\n",
"5 15 205\n",
"5 15 206\n",
"5 15 207\n",
"5 15 208\n",
"5 15 209\n",
"5 15 210\n",
"5 15 211\n",
"5 15 212\n",
"5 15 213\n",
"5 15 214\n",
"5 15 215\n",
"5 15 216\n",
"5 15 217\n",
"5 15 218\n",
"5 15 219\n",
"5 15 220\n",
"5 15 221\n",
"5 15 222\n",
"5 15 223\n",
"5 15 224\n",
"5 15 225\n",
"5 15 226\n",
"5 15 227\n",
"5 15 228\n",
"5 15 229\n",
"5 15 230\n",
"5 15 231\n",
"5 15 232\n",
"5 15 233\n",
"5 15 234\n",
"5 15 235\n",
"5 15 236\n",
"5 15 237\n",
"5 15 238\n",
"5 15 239\n",
"5 15 240\n",
"5 15 241\n",
"5 15 242\n",
"5 15 243\n",
"5 15 244\n",
"5 15 245\n",
"5 15 246\n",
"5 15 247\n",
"5 15 248\n",
"5 15 249\n",
"5 15 250\n",
"5 15 251\n",
"5 15 252\n",
"5 15 253\n",
"5 15 254\n",
"5 15 255\n",
"5 15 256\n",
"5 15 257\n",
"5 15 258\n",
"5 15 259\n",
"5 15 260\n",
"5 15 261\n",
"5 15 262\n",
"5 15 263\n",
"5 15 264\n",
"5 15 265\n",
"5 15 266\n",
"5 15 267\n",
"5 15 268\n",
"5 15 269\n",
"5 15 270\n",
"5 15 271\n",
"5 15 272\n",
"5 15 273\n",
"5 15 274\n",
"5 15 275\n",
"5 15 276\n",
"5 15 277\n",
"5 15 278\n",
"5 15 279\n",
"5 15 280\n",
"5 15 281\n",
"5 15 282\n",
"5 15 283\n",
"5 15 284\n",
"5 15 285\n",
"5 15 286\n",
"5 15 287\n",
"5 15 288\n",
"5 15 289\n",
"5 15 290\n",
"5 15 291\n",
"5 15 292\n",
"5 15 293\n",
"5 15 294\n",
"5 15 295\n",
"5 15 296\n",
"5 15 297\n",
"5 15 298\n",
"5 15 299\n",
"5 15 300\n",
"5 15 301\n",
"5 15 302\n",
"5 15 303\n",
"5 15 304\n",
"5 15 305\n",
"5 15 306\n",
"5 15 307\n",
"5 15 308\n",
"5 15 309\n",
"5 15 310\n",
"5 15 311\n",
"5 15 312\n",
"5 15 313\n",
"5 15 314\n",
"5 15 315\n",
"5 15 316\n",
"5 15 317\n",
"5 15 318\n",
"5 15 319\n",
"5 15 320\n",
"5 15 321\n",
"5 15 322\n",
"5 15 323\n",
"5 15 324\n",
"5 15 325\n",
"5 15 326\n",
"5 15 327\n",
"5 15 328\n",
"5 15 329\n",
"5 15 330\n",
"5 15 331\n",
"5 15 332\n",
"5 15 333\n",
"5 15 334\n",
"5 15 335\n",
"5 15 336\n",
"5 15 337\n",
"5 15 338\n",
"5 15 339\n",
"5 15 340\n",
"5 15 341\n",
"5 15 342\n",
"5 15 343\n",
"5 15 344\n",
"5 15 345\n",
"5 15 346\n",
"5 15 347\n",
"5 15 348\n",
"5 15 349\n",
"5 15 350\n",
"5 15 351\n",
"5 15 352\n",
"5 15 353\n",
"5 15 354\n",
"5 15 355\n",
"5 15 356\n",
"5 15 357\n",
"5 15 358\n",
"5 15 359\n",
"5 15 360\n",
"5 15 361\n",
"5 15 362\n",
"5 15 363\n",
"5 15 364\n",
"5 15 365\n",
"5 15 366\n",
"5 15 367\n",
"5 15 368\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"5 15 369\n",
"5 15 370\n",
"5 15 371\n",
"5 15 372\n",
"5 15 373\n",
"5 15 374\n",
"5 15 375\n",
"5 15 376\n",
"5 15 377\n",
"5 15 378\n",
"5 15 379\n",
"5 15 380\n",
"5 15 381\n",
"5 15 382\n",
"5 15 383\n",
"5 15 384\n",
"5 15 385\n",
"5 15 386\n",
"5 15 387\n",
"5 15 388\n",
"5 15 389\n",
"5 15 390\n",
"5 15 391\n",
"5 15 392\n",
"5 15 393\n",
"5 15 394\n",
"5 15 395\n",
"5 15 396\n",
"5 15 397\n",
"5 15 398\n",
"5 15 399\n",
"5 15 400\n",
"5 15 401\n",
"5 15 402\n",
"5 15 403\n",
"5 15 404\n",
"5 15 405\n",
"5 15 406\n",
"5 15 407\n",
"5 15 408\n",
"5 15 409\n",
"5 15 410\n",
"5 15 411\n",
"5 15 412\n",
"5 15 413\n",
"5 15 414\n",
"5 15 415\n",
"5 15 416\n",
"5 15 417\n",
"5 15 418\n",
"5 15 419\n",
"5 15 420\n",
"5 15 421\n",
"5 15 422\n",
"5 15 423\n",
"5 15 424\n",
"5 15 425\n",
"5 15 426\n",
"5 15 427\n",
"5 15 428\n",
"5 15 429\n",
"5 15 430\n",
"5 15 431\n",
"5 15 432\n",
"5 15 433\n",
"5 15 434\n",
"5 15 435\n",
"5 15 436\n",
"5 15 437\n",
"5 15 438\n",
"5 15 439\n",
"5 15 440\n",
"5 15 441\n",
"5 15 442\n",
"5 15 443\n",
"5 15 444\n",
"5 15 445\n",
"5 15 446\n",
"5 15 447\n",
"5 15 448\n",
"5 15 449\n",
"5 15 450\n",
"5 15 451\n",
"5 15 452\n",
"5 15 453\n",
"5 15 454\n",
"5 15 455\n",
"5 15 456\n",
"5 15 457\n",
"5 15 458\n",
"5 15 459\n",
"5 15 460\n",
"5 15 461\n",
"5 15 462\n",
"5 15 463\n",
"5 15 464\n",
"5 15 465\n",
"5 15 466\n",
"5 15 467\n",
"5 15 468\n",
"5 15 469\n",
"5 15 470\n",
"5 15 471\n",
"5 15 472\n",
"5 15 473\n",
"5 15 474\n",
"5 15 475\n",
"5 15 476\n",
"5 15 477\n",
"5 15 478\n",
"5 15 479\n",
"5 15 480\n",
"5 15 481\n",
"5 15 482\n",
"5 15 483\n",
"5 15 484\n",
"5 15 485\n",
"5 15 486\n",
"5 15 487\n",
"5 15 488\n",
"5 15 489\n",
"5 15 490\n",
"5 15 491\n",
"5 15 492\n",
"5 15 493\n",
"5 15 494\n",
"5 15 495\n",
"5 15 496\n",
"5 15 497\n",
"5 15 498\n",
"5 15 499\n",
"5 20 0\n",
"5 20 1\n",
"5 20 2\n",
"5 20 3\n",
"5 20 4\n",
"5 20 5\n",
"5 20 6\n",
"5 20 7\n",
"5 20 8\n",
"5 20 9\n",
"5 20 10\n",
"5 20 11\n",
"5 20 12\n",
"5 20 13\n",
"5 20 14\n",
"5 20 15\n",
"5 20 16\n",
"5 20 17\n",
"5 20 18\n",
"5 20 19\n",
"5 20 20\n",
"5 20 21\n",
"5 20 22\n",
"5 20 23\n",
"5 20 24\n",
"5 20 25\n",
"5 20 26\n",
"5 20 27\n",
"5 20 28\n",
"5 20 29\n",
"5 20 30\n",
"5 20 31\n",
"5 20 32\n",
"5 20 33\n",
"5 20 34\n",
"5 20 35\n",
"5 20 36\n",
"5 20 37\n",
"5 20 38\n",
"5 20 39\n",
"5 20 40\n",
"5 20 41\n",
"5 20 42\n",
"5 20 43\n",
"5 20 44\n",
"5 20 45\n",
"5 20 46\n",
"5 20 47\n",
"5 20 48\n",
"5 20 49\n",
"5 20 50\n",
"5 20 51\n",
"5 20 52\n",
"5 20 53\n",
"5 20 54\n",
"5 20 55\n",
"5 20 56\n",
"5 20 57\n",
"5 20 58\n",
"5 20 59\n",
"5 20 60\n",
"5 20 61\n",
"5 20 62\n",
"5 20 63\n",
"5 20 64\n",
"5 20 65\n",
"5 20 66\n",
"5 20 67\n",
"5 20 68\n",
"5 20 69\n",
"5 20 70\n",
"5 20 71\n",
"5 20 72\n",
"5 20 73\n",
"5 20 74\n",
"5 20 75\n",
"5 20 76\n",
"5 20 77\n",
"5 20 78\n",
"5 20 79\n",
"5 20 80\n",
"5 20 81\n",
"5 20 82\n",
"5 20 83\n",
"5 20 84\n",
"5 20 85\n",
"5 20 86\n",
"5 20 87\n",
"5 20 88\n",
"5 20 89\n",
"5 20 90\n",
"5 20 91\n",
"5 20 92\n",
"5 20 93\n",
"5 20 94\n",
"5 20 95\n",
"5 20 96\n",
"5 20 97\n",
"5 20 98\n",
"5 20 99\n",
"5 20 100\n",
"5 20 101\n",
"5 20 102\n",
"5 20 103\n",
"5 20 104\n",
"5 20 105\n",
"5 20 106\n",
"5 20 107\n",
"5 20 108\n",
"5 20 109\n",
"5 20 110\n",
"5 20 111\n",
"5 20 112\n",
"5 20 113\n",
"5 20 114\n",
"5 20 115\n",
"5 20 116\n",
"5 20 117\n",
"5 20 118\n",
"5 20 119\n",
"5 20 120\n",
"5 20 121\n",
"5 20 122\n",
"5 20 123\n",
"5 20 124\n",
"5 20 125\n",
"5 20 126\n",
"5 20 127\n",
"5 20 128\n",
"5 20 129\n",
"5 20 130\n",
"5 20 131\n",
"5 20 132\n",
"5 20 133\n",
"5 20 134\n",
"5 20 135\n",
"5 20 136\n",
"5 20 137\n",
"5 20 138\n",
"5 20 139\n",
"5 20 140\n",
"5 20 141\n",
"5 20 142\n",
"5 20 143\n",
"5 20 144\n",
"5 20 145\n",
"5 20 146\n",
"5 20 147\n",
"5 20 148\n",
"5 20 149\n",
"5 20 150\n",
"5 20 151\n",
"5 20 152\n",
"5 20 153\n",
"5 20 154\n",
"5 20 155\n",
"5 20 156\n",
"5 20 157\n",
"5 20 158\n",
"5 20 159\n",
"5 20 160\n",
"5 20 161\n",
"5 20 162\n",
"5 20 163\n",
"5 20 164\n",
"5 20 165\n",
"5 20 166\n",
"5 20 167\n",
"5 20 168\n",
"5 20 169\n",
"5 20 170\n",
"5 20 171\n",
"5 20 172\n",
"5 20 173\n",
"5 20 174\n",
"5 20 175\n",
"5 20 176\n",
"5 20 177\n",
"5 20 178\n",
"5 20 179\n",
"5 20 180\n",
"5 20 181\n",
"5 20 182\n",
"5 20 183\n",
"5 20 184\n",
"5 20 185\n",
"5 20 186\n",
"5 20 187\n",
"5 20 188\n",
"5 20 189\n",
"5 20 190\n",
"5 20 191\n",
"5 20 192\n",
"5 20 193\n",
"5 20 194\n",
"5 20 195\n",
"5 20 196\n",
"5 20 197\n",
"5 20 198\n",
"5 20 199\n",
"5 20 200\n",
"5 20 201\n",
"5 20 202\n",
"5 20 203\n",
"5 20 204\n",
"5 20 205\n",
"5 20 206\n",
"5 20 207\n",
"5 20 208\n",
"5 20 209\n",
"5 20 210\n",
"5 20 211\n",
"5 20 212\n",
"5 20 213\n",
"5 20 214\n",
"5 20 215\n",
"5 20 216\n",
"5 20 217\n",
"5 20 218\n",
"5 20 219\n",
"5 20 220\n",
"5 20 221\n",
"5 20 222\n",
"5 20 223\n",
"5 20 224\n",
"5 20 225\n",
"5 20 226\n",
"5 20 227\n",
"5 20 228\n",
"5 20 229\n",
"5 20 230\n",
"5 20 231\n",
"5 20 232\n",
"5 20 233\n",
"5 20 234\n",
"5 20 235\n",
"5 20 236\n",
"5 20 237\n",
"5 20 238\n",
"5 20 239\n",
"5 20 240\n",
"5 20 241\n",
"5 20 242\n",
"5 20 243\n",
"5 20 244\n",
"5 20 245\n",
"5 20 246\n",
"5 20 247\n",
"5 20 248\n",
"5 20 249\n",
"5 20 250\n",
"5 20 251\n",
"5 20 252\n",
"5 20 253\n",
"5 20 254\n",
"5 20 255\n",
"5 20 256\n",
"5 20 257\n",
"5 20 258\n",
"5 20 259\n",
"5 20 260\n",
"5 20 261\n",
"5 20 262\n",
"5 20 263\n",
"5 20 264\n",
"5 20 265\n",
"5 20 266\n",
"5 20 267\n",
"5 20 268\n",
"5 20 269\n",
"5 20 270\n",
"5 20 271\n",
"5 20 272\n",
"5 20 273\n",
"5 20 274\n",
"5 20 275\n",
"5 20 276\n",
"5 20 277\n",
"5 20 278\n",
"5 20 279\n",
"5 20 280\n",
"5 20 281\n",
"5 20 282\n",
"5 20 283\n",
"5 20 284\n",
"5 20 285\n",
"5 20 286\n",
"5 20 287\n",
"5 20 288\n",
"5 20 289\n",
"5 20 290\n",
"5 20 291\n",
"5 20 292\n",
"5 20 293\n",
"5 20 294\n",
"5 20 295\n",
"5 20 296\n",
"5 20 297\n",
"5 20 298\n",
"5 20 299\n",
"5 20 300\n",
"5 20 301\n",
"5 20 302\n",
"5 20 303\n",
"5 20 304\n",
"5 20 305\n",
"5 20 306\n",
"5 20 307\n",
"5 20 308\n",
"5 20 309\n",
"5 20 310\n",
"5 20 311\n",
"5 20 312\n",
"5 20 313\n",
"5 20 314\n",
"5 20 315\n",
"5 20 316\n",
"5 20 317\n",
"5 20 318\n",
"5 20 319\n",
"5 20 320\n",
"5 20 321\n",
"5 20 322\n",
"5 20 323\n",
"5 20 324\n",
"5 20 325\n",
"5 20 326\n",
"5 20 327\n",
"5 20 328\n",
"5 20 329\n",
"5 20 330\n",
"5 20 331\n",
"5 20 332\n",
"5 20 333\n",
"5 20 334\n",
"5 20 335\n",
"5 20 336\n",
"5 20 337\n",
"5 20 338\n",
"5 20 339\n",
"5 20 340\n",
"5 20 341\n",
"5 20 342\n",
"5 20 343\n",
"5 20 344\n",
"5 20 345\n",
"5 20 346\n",
"5 20 347\n",
"5 20 348\n",
"5 20 349\n",
"5 20 350\n",
"5 20 351\n",
"5 20 352\n",
"5 20 353\n",
"5 20 354\n",
"5 20 355\n",
"5 20 356\n",
"5 20 357\n",
"5 20 358\n",
"5 20 359\n",
"5 20 360\n",
"5 20 361\n",
"5 20 362\n",
"5 20 363\n",
"5 20 364\n",
"5 20 365\n",
"5 20 366\n",
"5 20 367\n",
"5 20 368\n",
"5 20 369\n",
"5 20 370\n",
"5 20 371\n",
"5 20 372\n",
"5 20 373\n",
"5 20 374\n",
"5 20 375\n",
"5 20 376\n",
"5 20 377\n",
"5 20 378\n",
"5 20 379\n",
"5 20 380\n",
"5 20 381\n",
"5 20 382\n",
"5 20 383\n",
"5 20 384\n",
"5 20 385\n",
"5 20 386\n",
"5 20 387\n",
"5 20 388\n",
"5 20 389\n",
"5 20 390\n",
"5 20 391\n",
"5 20 392\n",
"5 20 393\n",
"5 20 394\n",
"5 20 395\n",
"5 20 396\n",
"5 20 397\n",
"5 20 398\n",
"5 20 399\n",
"5 20 400\n",
"5 20 401\n",
"5 20 402\n",
"5 20 403\n",
"5 20 404\n",
"5 20 405\n",
"5 20 406\n",
"5 20 407\n",
"5 20 408\n",
"5 20 409\n",
"5 20 410\n",
"5 20 411\n",
"5 20 412\n",
"5 20 413\n",
"5 20 414\n",
"5 20 415\n",
"5 20 416\n",
"5 20 417\n",
"5 20 418\n",
"5 20 419\n",
"5 20 420\n",
"5 20 421\n",
"5 20 422\n",
"5 20 423\n",
"5 20 424\n",
"5 20 425\n",
"5 20 426\n",
"5 20 427\n",
"5 20 428\n",
"5 20 429\n",
"5 20 430\n",
"5 20 431\n",
"5 20 432\n",
"5 20 433\n",
"5 20 434\n",
"5 20 435\n",
"5 20 436\n",
"5 20 437\n",
"5 20 438\n",
"5 20 439\n",
"5 20 440\n",
"5 20 441\n",
"5 20 442\n",
"5 20 443\n",
"5 20 444\n",
"5 20 445\n",
"5 20 446\n",
"5 20 447\n",
"5 20 448\n",
"5 20 449\n",
"5 20 450\n",
"5 20 451\n",
"5 20 452\n",
"5 20 453\n",
"5 20 454\n",
"5 20 455\n",
"5 20 456\n",
"5 20 457\n",
"5 20 458\n",
"5 20 459\n",
"5 20 460\n",
"5 20 461\n",
"5 20 462\n",
"5 20 463\n",
"5 20 464\n",
"5 20 465\n",
"5 20 466\n",
"5 20 467\n",
"5 20 468\n",
"5 20 469\n",
"5 20 470\n",
"5 20 471\n",
"5 20 472\n",
"5 20 473\n",
"5 20 474\n",
"5 20 475\n",
"5 20 476\n",
"5 20 477\n",
"5 20 478\n",
"5 20 479\n",
"5 20 480\n",
"5 20 481\n",
"5 20 482\n",
"5 20 483\n",
"5 20 484\n",
"5 20 485\n",
"5 20 486\n",
"5 20 487\n",
"5 20 488\n",
"5 20 489\n",
"5 20 490\n",
"5 20 491\n",
"5 20 492\n",
"5 20 493\n",
"5 20 494\n",
"5 20 495\n",
"5 20 496\n",
"5 20 497\n",
"5 20 498\n",
"5 20 499\n",
"==================== \n",
" n= 10\n",
"10 15 0\n",
"10 15 1\n",
"10 15 2\n",
"10 15 3\n",
"10 15 4\n",
"10 15 5\n",
"10 15 6\n",
"10 15 7\n",
"10 15 8\n",
"10 15 9\n",
"10 15 10\n",
"10 15 11\n",
"10 15 12\n",
"10 15 13\n",
"10 15 14\n",
"10 15 15\n",
"10 15 16\n",
"10 15 17\n",
"10 15 18\n",
"10 15 19\n",
"10 15 20\n",
"10 15 21\n",
"10 15 22\n",
"10 15 23\n",
"10 15 24\n",
"10 15 25\n",
"10 15 26\n",
"10 15 27\n",
"10 15 28\n",
"10 15 29\n",
"10 15 30\n",
"10 15 31\n",
"10 15 32\n",
"10 15 33\n",
"10 15 34\n",
"10 15 35\n",
"10 15 36\n",
"10 15 37\n",
"10 15 38\n",
"10 15 39\n",
"10 15 40\n",
"10 15 41\n",
"10 15 42\n",
"10 15 43\n",
"10 15 44\n",
"10 15 45\n",
"10 15 46\n",
"10 15 47\n",
"10 15 48\n",
"10 15 49\n",
"10 15 50\n",
"10 15 51\n",
"10 15 52\n",
"10 15 53\n",
"10 15 54\n",
"10 15 55\n",
"10 15 56\n",
"10 15 57\n",
"10 15 58\n",
"10 15 59\n",
"10 15 60\n",
"10 15 61\n",
"10 15 62\n",
"10 15 63\n",
"10 15 64\n",
"10 15 65\n",
"10 15 66\n",
"10 15 67\n",
"10 15 68\n",
"10 15 69\n",
"10 15 70\n",
"10 15 71\n",
"10 15 72\n",
"10 15 73\n",
"10 15 74\n",
"10 15 75\n",
"10 15 76\n",
"10 15 77\n",
"10 15 78\n",
"10 15 79\n",
"10 15 80\n",
"10 15 81\n",
"10 15 82\n",
"10 15 83\n",
"10 15 84\n",
"10 15 85\n",
"10 15 86\n",
"10 15 87\n",
"10 15 88\n",
"10 15 89\n",
"10 15 90\n",
"10 15 91\n",
"10 15 92\n",
"10 15 93\n",
"10 15 94\n",
"10 15 95\n",
"10 15 96\n",
"10 15 97\n",
"10 15 98\n",
"10 15 99\n",
"10 15 100\n",
"10 15 101\n",
"10 15 102\n",
"10 15 103\n",
"10 15 104\n",
"10 15 105\n",
"10 15 106\n",
"10 15 107\n",
"10 15 108\n",
"10 15 109\n",
"10 15 110\n",
"10 15 111\n",
"10 15 112\n",
"10 15 113\n",
"10 15 114\n",
"10 15 115\n",
"10 15 116\n",
"10 15 117\n",
"10 15 118\n",
"10 15 119\n",
"10 15 120\n",
"10 15 121\n",
"10 15 122\n",
"10 15 123\n",
"10 15 124\n",
"10 15 125\n",
"10 15 126\n",
"10 15 127\n",
"10 15 128\n",
"10 15 129\n",
"10 15 130\n",
"10 15 131\n",
"10 15 132\n",
"10 15 133\n",
"10 15 134\n",
"10 15 135\n",
"10 15 136\n",
"10 15 137\n",
"10 15 138\n",
"10 15 139\n",
"10 15 140\n",
"10 15 141\n",
"10 15 142\n",
"10 15 143\n",
"10 15 144\n",
"10 15 145\n",
"10 15 146\n",
"10 15 147\n",
"10 15 148\n",
"10 15 149\n",
"10 15 150\n",
"10 15 151\n",
"10 15 152\n",
"10 15 153\n",
"10 15 154\n",
"10 15 155\n",
"10 15 156\n",
"10 15 157\n",
"10 15 158\n",
"10 15 159\n",
"10 15 160\n",
"10 15 161\n",
"10 15 162\n",
"10 15 163\n",
"10 15 164\n",
"10 15 165\n",
"10 15 166\n",
"10 15 167\n",
"10 15 168\n",
"10 15 169\n",
"10 15 170\n",
"10 15 171\n",
"10 15 172\n",
"10 15 173\n",
"10 15 174\n",
"10 15 175\n",
"10 15 176\n",
"10 15 177\n",
"10 15 178\n",
"10 15 179\n",
"10 15 180\n",
"10 15 181\n",
"10 15 182\n",
"10 15 183\n",
"10 15 184\n",
"10 15 185\n",
"10 15 186\n",
"10 15 187\n",
"10 15 188\n",
"10 15 189\n",
"10 15 190\n",
"10 15 191\n",
"10 15 192\n",
"10 15 193\n",
"10 15 194\n",
"10 15 195\n",
"10 15 196\n",
"10 15 197\n",
"10 15 198\n",
"10 15 199\n",
"10 15 200\n",
"10 15 201\n",
"10 15 202\n",
"10 15 203\n",
"10 15 204\n",
"10 15 205\n",
"10 15 206\n",
"10 15 207\n",
"10 15 208\n",
"10 15 209\n",
"10 15 210\n",
"10 15 211\n",
"10 15 212\n",
"10 15 213\n",
"10 15 214\n",
"10 15 215\n",
"10 15 216\n",
"10 15 217\n",
"10 15 218\n",
"10 15 219\n",
"10 15 220\n",
"10 15 221\n",
"10 15 222\n",
"10 15 223\n",
"10 15 224\n",
"10 15 225\n",
"10 15 226\n",
"10 15 227\n",
"10 15 228\n",
"10 15 229\n",
"10 15 230\n",
"10 15 231\n",
"10 15 232\n",
"10 15 233\n",
"10 15 234\n",
"10 15 235\n",
"10 15 236\n",
"10 15 237\n",
"10 15 238\n",
"10 15 239\n",
"10 15 240\n",
"10 15 241\n",
"10 15 242\n",
"10 15 243\n",
"10 15 244\n",
"10 15 245\n",
"10 15 246\n",
"10 15 247\n",
"10 15 248\n",
"10 15 249\n",
"10 15 250\n",
"10 15 251\n",
"10 15 252\n",
"10 15 253\n",
"10 15 254\n",
"10 15 255\n",
"10 15 256\n",
"10 15 257\n",
"10 15 258\n",
"10 15 259\n",
"10 15 260\n",
"10 15 261\n",
"10 15 262\n",
"10 15 263\n",
"10 15 264\n",
"10 15 265\n",
"10 15 266\n",
"10 15 267\n",
"10 15 268\n",
"10 15 269\n",
"10 15 270\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"10 15 271\n",
"10 15 272\n",
"10 15 273\n",
"10 15 274\n",
"10 15 275\n",
"10 15 276\n",
"10 15 277\n",
"10 15 278\n",
"10 15 279\n",
"10 15 280\n",
"10 15 281\n",
"10 15 282\n",
"10 15 283\n",
"10 15 284\n",
"10 15 285\n",
"10 15 286\n",
"10 15 287\n",
"10 15 288\n",
"10 15 289\n",
"10 15 290\n",
"10 15 291\n",
"10 15 292\n",
"10 15 293\n",
"10 15 294\n",
"10 15 295\n",
"10 15 296\n",
"10 15 297\n",
"10 15 298\n",
"10 15 299\n",
"10 15 300\n",
"10 15 301\n",
"10 15 302\n",
"10 15 303\n",
"10 15 304\n",
"10 15 305\n",
"10 15 306\n",
"10 15 307\n",
"10 15 308\n",
"10 15 309\n",
"10 15 310\n",
"10 15 311\n",
"10 15 312\n",
"10 15 313\n",
"10 15 314\n",
"10 15 315\n",
"10 15 316\n",
"10 15 317\n",
"10 15 318\n",
"10 15 319\n",
"10 15 320\n",
"10 15 321\n",
"10 15 322\n",
"10 15 323\n",
"10 15 324\n",
"10 15 325\n",
"10 15 326\n",
"10 15 327\n",
"10 15 328\n",
"10 15 329\n",
"10 15 330\n",
"10 15 331\n",
"10 15 332\n",
"10 15 333\n",
"10 15 334\n",
"10 15 335\n",
"10 15 336\n",
"10 15 337\n",
"10 15 338\n",
"10 15 339\n",
"10 15 340\n",
"10 15 341\n",
"10 15 342\n",
"10 15 343\n",
"10 15 344\n",
"10 15 345\n",
"10 15 346\n",
"10 15 347\n",
"10 15 348\n",
"10 15 349\n",
"10 15 350\n",
"10 15 351\n",
"10 15 352\n",
"10 15 353\n",
"10 15 354\n",
"10 15 355\n",
"10 15 356\n",
"10 15 357\n",
"10 15 358\n",
"10 15 359\n",
"10 15 360\n",
"10 15 361\n",
"10 15 362\n",
"10 15 363\n",
"10 15 364\n",
"10 15 365\n",
"10 15 366\n",
"10 15 367\n",
"10 15 368\n",
"10 15 369\n",
"10 15 370\n",
"10 15 371\n",
"10 15 372\n",
"10 15 373\n",
"10 15 374\n",
"10 15 375\n",
"10 15 376\n",
"10 15 377\n",
"10 15 378\n",
"10 15 379\n",
"10 15 380\n",
"10 15 381\n",
"10 15 382\n",
"10 15 383\n",
"10 15 384\n",
"10 15 385\n",
"10 15 386\n",
"10 15 387\n",
"10 15 388\n",
"10 15 389\n",
"10 15 390\n",
"10 15 391\n",
"10 15 392\n",
"10 15 393\n",
"10 15 394\n",
"10 15 395\n",
"10 15 396\n",
"10 15 397\n",
"10 15 398\n",
"10 15 399\n",
"10 15 400\n",
"10 15 401\n",
"10 15 402\n",
"10 15 403\n",
"10 15 404\n",
"10 15 405\n",
"10 15 406\n",
"10 15 407\n",
"10 15 408\n",
"10 15 409\n",
"10 15 410\n",
"10 15 411\n",
"10 15 412\n",
"10 15 413\n",
"10 15 414\n",
"10 15 415\n",
"10 15 416\n",
"10 15 417\n",
"10 15 418\n",
"10 15 419\n",
"10 15 420\n",
"10 15 421\n",
"10 15 422\n",
"10 15 423\n",
"10 15 424\n",
"10 15 425\n",
"10 15 426\n",
"10 15 427\n",
"10 15 428\n",
"10 15 429\n",
"10 15 430\n",
"10 15 431\n",
"10 15 432\n",
"10 15 433\n",
"10 15 434\n",
"10 15 435\n",
"10 15 436\n",
"10 15 437\n",
"10 15 438\n",
"10 15 439\n",
"10 15 440\n",
"10 15 441\n",
"10 15 442\n",
"10 15 443\n",
"10 15 444\n",
"10 15 445\n",
"10 15 446\n",
"10 15 447\n",
"10 15 448\n",
"10 15 449\n",
"10 15 450\n",
"10 15 451\n",
"10 15 452\n",
"10 15 453\n",
"10 15 454\n",
"10 15 455\n",
"10 15 456\n",
"10 15 457\n",
"10 15 458\n",
"10 15 459\n",
"10 15 460\n",
"10 15 461\n",
"10 15 462\n",
"10 15 463\n",
"10 15 464\n",
"10 15 465\n",
"10 15 466\n",
"10 15 467\n",
"10 15 468\n",
"10 15 469\n",
"10 15 470\n",
"10 15 471\n",
"10 15 472\n",
"10 15 473\n",
"10 15 474\n",
"10 15 475\n",
"10 15 476\n",
"10 15 477\n",
"10 15 478\n",
"10 15 479\n",
"10 15 480\n",
"10 15 481\n",
"10 15 482\n",
"10 15 483\n",
"10 15 484\n",
"10 15 485\n",
"10 15 486\n",
"10 15 487\n",
"10 15 488\n",
"10 15 489\n",
"10 15 490\n",
"10 15 491\n",
"10 15 492\n",
"10 15 493\n",
"10 15 494\n",
"10 15 495\n",
"10 15 496\n",
"10 15 497\n",
"10 15 498\n",
"10 15 499\n",
"10 20 0\n",
"10 20 1\n",
"10 20 2\n",
"10 20 3\n",
"10 20 4\n",
"10 20 5\n",
"10 20 6\n",
"10 20 7\n",
"10 20 8\n",
"10 20 9\n",
"10 20 10\n",
"10 20 11\n",
"10 20 12\n",
"10 20 13\n",
"10 20 14\n",
"10 20 15\n",
"10 20 16\n",
"10 20 17\n",
"10 20 18\n",
"10 20 19\n",
"10 20 20\n",
"10 20 21\n",
"10 20 22\n",
"10 20 23\n",
"10 20 24\n",
"10 20 25\n",
"10 20 26\n",
"10 20 27\n",
"10 20 28\n",
"10 20 29\n",
"10 20 30\n",
"10 20 31\n",
"10 20 32\n",
"10 20 33\n",
"10 20 34\n",
"10 20 35\n",
"10 20 36\n",
"10 20 37\n",
"10 20 38\n",
"10 20 39\n",
"10 20 40\n",
"10 20 41\n",
"10 20 42\n",
"10 20 43\n",
"10 20 44\n",
"10 20 45\n",
"10 20 46\n",
"10 20 47\n",
"10 20 48\n",
"10 20 49\n",
"10 20 50\n",
"10 20 51\n",
"10 20 52\n",
"10 20 53\n",
"10 20 54\n",
"10 20 55\n",
"10 20 56\n",
"10 20 57\n",
"10 20 58\n",
"10 20 59\n",
"10 20 60\n",
"10 20 61\n",
"10 20 62\n",
"10 20 63\n",
"10 20 64\n",
"10 20 65\n",
"10 20 66\n",
"10 20 67\n",
"10 20 68\n",
"10 20 69\n",
"10 20 70\n",
"10 20 71\n",
"10 20 72\n",
"10 20 73\n",
"10 20 74\n",
"10 20 75\n",
"10 20 76\n",
"10 20 77\n",
"10 20 78\n",
"10 20 79\n",
"10 20 80\n",
"10 20 81\n",
"10 20 82\n",
"10 20 83\n",
"10 20 84\n",
"10 20 85\n",
"10 20 86\n",
"10 20 87\n",
"10 20 88\n",
"10 20 89\n",
"10 20 90\n",
"10 20 91\n",
"10 20 92\n",
"10 20 93\n",
"10 20 94\n",
"10 20 95\n",
"10 20 96\n",
"10 20 97\n",
"10 20 98\n",
"10 20 99\n",
"10 20 100\n",
"10 20 101\n",
"10 20 102\n",
"10 20 103\n",
"10 20 104\n",
"10 20 105\n",
"10 20 106\n",
"10 20 107\n",
"10 20 108\n",
"10 20 109\n",
"10 20 110\n",
"10 20 111\n",
"10 20 112\n",
"10 20 113\n",
"10 20 114\n",
"10 20 115\n",
"10 20 116\n",
"10 20 117\n",
"10 20 118\n",
"10 20 119\n",
"10 20 120\n",
"10 20 121\n",
"10 20 122\n",
"10 20 123\n",
"10 20 124\n",
"10 20 125\n",
"10 20 126\n",
"10 20 127\n",
"10 20 128\n",
"10 20 129\n",
"10 20 130\n",
"10 20 131\n",
"10 20 132\n",
"10 20 133\n",
"10 20 134\n",
"10 20 135\n",
"10 20 136\n",
"10 20 137\n",
"10 20 138\n",
"10 20 139\n",
"10 20 140\n",
"10 20 141\n",
"10 20 142\n",
"10 20 143\n",
"10 20 144\n",
"10 20 145\n",
"10 20 146\n",
"10 20 147\n",
"10 20 148\n",
"10 20 149\n",
"10 20 150\n",
"10 20 151\n",
"10 20 152\n",
"10 20 153\n",
"10 20 154\n",
"10 20 155\n",
"10 20 156\n",
"10 20 157\n",
"10 20 158\n",
"10 20 159\n",
"10 20 160\n",
"10 20 161\n",
"10 20 162\n",
"10 20 163\n",
"10 20 164\n",
"10 20 165\n",
"10 20 166\n",
"10 20 167\n",
"10 20 168\n",
"10 20 169\n",
"10 20 170\n",
"10 20 171\n",
"10 20 172\n",
"10 20 173\n",
"10 20 174\n",
"10 20 175\n",
"10 20 176\n",
"10 20 177\n",
"10 20 178\n",
"10 20 179\n",
"10 20 180\n",
"10 20 181\n",
"10 20 182\n",
"10 20 183\n",
"10 20 184\n",
"10 20 185\n",
"10 20 186\n",
"10 20 187\n",
"10 20 188\n",
"10 20 189\n",
"10 20 190\n",
"10 20 191\n",
"10 20 192\n",
"10 20 193\n",
"10 20 194\n",
"10 20 195\n",
"10 20 196\n",
"10 20 197\n",
"10 20 198\n",
"10 20 199\n",
"10 20 200\n",
"10 20 201\n",
"10 20 202\n",
"10 20 203\n",
"10 20 204\n",
"10 20 205\n",
"10 20 206\n",
"10 20 207\n",
"10 20 208\n",
"10 20 209\n",
"10 20 210\n",
"10 20 211\n",
"10 20 212\n",
"10 20 213\n",
"10 20 214\n",
"10 20 215\n",
"10 20 216\n",
"10 20 217\n",
"10 20 218\n",
"10 20 219\n",
"10 20 220\n",
"10 20 221\n",
"10 20 222\n",
"10 20 223\n",
"10 20 224\n",
"10 20 225\n",
"10 20 226\n",
"10 20 227\n",
"10 20 228\n",
"10 20 229\n",
"10 20 230\n",
"10 20 231\n",
"10 20 232\n",
"10 20 233\n",
"10 20 234\n",
"10 20 235\n",
"10 20 236\n",
"10 20 237\n",
"10 20 238\n",
"10 20 239\n",
"10 20 240\n",
"10 20 241\n",
"10 20 242\n",
"10 20 243\n",
"10 20 244\n",
"10 20 245\n",
"10 20 246\n",
"10 20 247\n",
"10 20 248\n",
"10 20 249\n",
"10 20 250\n",
"10 20 251\n",
"10 20 252\n",
"10 20 253\n",
"10 20 254\n",
"10 20 255\n",
"10 20 256\n",
"10 20 257\n",
"10 20 258\n",
"10 20 259\n",
"10 20 260\n",
"10 20 261\n",
"10 20 262\n",
"10 20 263\n",
"10 20 264\n",
"10 20 265\n",
"10 20 266\n",
"10 20 267\n",
"10 20 268\n",
"10 20 269\n",
"10 20 270\n",
"10 20 271\n",
"10 20 272\n",
"10 20 273\n",
"10 20 274\n",
"10 20 275\n",
"10 20 276\n",
"10 20 277\n",
"10 20 278\n",
"10 20 279\n",
"10 20 280\n",
"10 20 281\n",
"10 20 282\n",
"10 20 283\n",
"10 20 284\n",
"10 20 285\n",
"10 20 286\n",
"10 20 287\n",
"10 20 288\n",
"10 20 289\n",
"10 20 290\n",
"10 20 291\n",
"10 20 292\n",
"10 20 293\n",
"10 20 294\n",
"10 20 295\n",
"10 20 296\n",
"10 20 297\n",
"10 20 298\n",
"10 20 299\n",
"10 20 300\n",
"10 20 301\n",
"10 20 302\n",
"10 20 303\n",
"10 20 304\n",
"10 20 305\n",
"10 20 306\n",
"10 20 307\n",
"10 20 308\n",
"10 20 309\n",
"10 20 310\n",
"10 20 311\n",
"10 20 312\n",
"10 20 313\n",
"10 20 314\n",
"10 20 315\n",
"10 20 316\n",
"10 20 317\n",
"10 20 318\n",
"10 20 319\n",
"10 20 320\n",
"10 20 321\n",
"10 20 322\n",
"10 20 323\n",
"10 20 324\n",
"10 20 325\n",
"10 20 326\n",
"10 20 327\n",
"10 20 328\n",
"10 20 329\n",
"10 20 330\n",
"10 20 331\n",
"10 20 332\n",
"10 20 333\n",
"10 20 334\n",
"10 20 335\n",
"10 20 336\n",
"10 20 337\n",
"10 20 338\n",
"10 20 339\n",
"10 20 340\n",
"10 20 341\n",
"10 20 342\n",
"10 20 343\n",
"10 20 344\n",
"10 20 345\n",
"10 20 346\n",
"10 20 347\n",
"10 20 348\n",
"10 20 349\n",
"10 20 350\n",
"10 20 351\n",
"10 20 352\n",
"10 20 353\n",
"10 20 354\n",
"10 20 355\n",
"10 20 356\n",
"10 20 357\n",
"10 20 358\n",
"10 20 359\n",
"10 20 360\n",
"10 20 361\n",
"10 20 362\n",
"10 20 363\n",
"10 20 364\n",
"10 20 365\n",
"10 20 366\n",
"10 20 367\n",
"10 20 368\n",
"10 20 369\n",
"10 20 370\n",
"10 20 371\n",
"10 20 372\n",
"10 20 373\n",
"10 20 374\n",
"10 20 375\n",
"10 20 376\n",
"10 20 377\n",
"10 20 378\n",
"10 20 379\n",
"10 20 380\n",
"10 20 381\n",
"10 20 382\n",
"10 20 383\n",
"10 20 384\n",
"10 20 385\n",
"10 20 386\n",
"10 20 387\n",
"10 20 388\n",
"10 20 389\n",
"10 20 390\n",
"10 20 391\n",
"10 20 392\n",
"10 20 393\n",
"10 20 394\n",
"10 20 395\n",
"10 20 396\n",
"10 20 397\n",
"10 20 398\n",
"10 20 399\n",
"10 20 400\n",
"10 20 401\n",
"10 20 402\n",
"10 20 403\n",
"10 20 404\n",
"10 20 405\n",
"10 20 406\n",
"10 20 407\n",
"10 20 408\n",
"10 20 409\n",
"10 20 410\n",
"10 20 411\n",
"10 20 412\n",
"10 20 413\n",
"10 20 414\n",
"10 20 415\n",
"10 20 416\n",
"10 20 417\n",
"10 20 418\n",
"10 20 419\n",
"10 20 420\n",
"10 20 421\n",
"10 20 422\n",
"10 20 423\n",
"10 20 424\n",
"10 20 425\n",
"10 20 426\n",
"10 20 427\n",
"10 20 428\n",
"10 20 429\n",
"10 20 430\n",
"10 20 431\n",
"10 20 432\n",
"10 20 433\n",
"10 20 434\n",
"10 20 435\n",
"10 20 436\n",
"10 20 437\n",
"10 20 438\n",
"10 20 439\n",
"10 20 440\n",
"10 20 441\n",
"10 20 442\n",
"10 20 443\n",
"10 20 444\n",
"10 20 445\n",
"10 20 446\n",
"10 20 447\n",
"10 20 448\n",
"10 20 449\n",
"10 20 450\n",
"10 20 451\n",
"10 20 452\n",
"10 20 453\n",
"10 20 454\n",
"10 20 455\n",
"10 20 456\n",
"10 20 457\n",
"10 20 458\n",
"10 20 459\n",
"10 20 460\n",
"10 20 461\n",
"10 20 462\n",
"10 20 463\n",
"10 20 464\n",
"10 20 465\n",
"10 20 466\n",
"10 20 467\n",
"10 20 468\n",
"10 20 469\n",
"10 20 470\n",
"10 20 471\n",
"10 20 472\n",
"10 20 473\n",
"10 20 474\n",
"10 20 475\n",
"10 20 476\n",
"10 20 477\n",
"10 20 478\n",
"10 20 479\n",
"10 20 480\n",
"10 20 481\n",
"10 20 482\n",
"10 20 483\n",
"10 20 484\n",
"10 20 485\n",
"10 20 486\n",
"10 20 487\n",
"10 20 488\n",
"10 20 489\n",
"10 20 490\n",
"10 20 491\n",
"10 20 492\n",
"10 20 493\n",
"10 20 494\n",
"10 20 495\n",
"10 20 496\n",
"10 20 497\n",
"10 20 498\n",
"10 20 499\n",
"==================== \n",
" n= 15\n",
"15 20 0\n",
"15 20 1\n",
"15 20 2\n",
"15 20 3\n",
"15 20 4\n",
"15 20 5\n",
"15 20 6\n",
"15 20 7\n",
"15 20 8\n",
"15 20 9\n",
"15 20 10\n",
"15 20 11\n",
"15 20 12\n",
"15 20 13\n",
"15 20 14\n",
"15 20 15\n",
"15 20 16\n",
"15 20 17\n",
"15 20 18\n",
"15 20 19\n",
"15 20 20\n",
"15 20 21\n",
"15 20 22\n",
"15 20 23\n",
"15 20 24\n",
"15 20 25\n",
"15 20 26\n",
"15 20 27\n",
"15 20 28\n",
"15 20 29\n",
"15 20 30\n",
"15 20 31\n",
"15 20 32\n",
"15 20 33\n",
"15 20 34\n",
"15 20 35\n",
"15 20 36\n",
"15 20 37\n",
"15 20 38\n",
"15 20 39\n",
"15 20 40\n",
"15 20 41\n",
"15 20 42\n",
"15 20 43\n",
"15 20 44\n",
"15 20 45\n",
"15 20 46\n",
"15 20 47\n",
"15 20 48\n",
"15 20 49\n",
"15 20 50\n",
"15 20 51\n",
"15 20 52\n",
"15 20 53\n",
"15 20 54\n",
"15 20 55\n",
"15 20 56\n",
"15 20 57\n",
"15 20 58\n",
"15 20 59\n",
"15 20 60\n",
"15 20 61\n",
"15 20 62\n",
"15 20 63\n",
"15 20 64\n",
"15 20 65\n",
"15 20 66\n",
"15 20 67\n",
"15 20 68\n",
"15 20 69\n",
"15 20 70\n",
"15 20 71\n",
"15 20 72\n",
"15 20 73\n",
"15 20 74\n",
"15 20 75\n",
"15 20 76\n",
"15 20 77\n",
"15 20 78\n",
"15 20 79\n",
"15 20 80\n",
"15 20 81\n",
"15 20 82\n",
"15 20 83\n",
"15 20 84\n",
"15 20 85\n",
"15 20 86\n",
"15 20 87\n",
"15 20 88\n",
"15 20 89\n",
"15 20 90\n",
"15 20 91\n",
"15 20 92\n",
"15 20 93\n",
"15 20 94\n",
"15 20 95\n",
"15 20 96\n",
"15 20 97\n",
"15 20 98\n",
"15 20 99\n",
"15 20 100\n",
"15 20 101\n",
"15 20 102\n",
"15 20 103\n",
"15 20 104\n",
"15 20 105\n",
"15 20 106\n",
"15 20 107\n",
"15 20 108\n",
"15 20 109\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"15 20 110\n",
"15 20 111\n",
"15 20 112\n",
"15 20 113\n",
"15 20 114\n",
"15 20 115\n",
"15 20 116\n",
"15 20 117\n",
"15 20 118\n",
"15 20 119\n",
"15 20 120\n",
"15 20 121\n",
"15 20 122\n",
"15 20 123\n",
"15 20 124\n",
"15 20 125\n",
"15 20 126\n",
"15 20 127\n",
"15 20 128\n",
"15 20 129\n",
"15 20 130\n",
"15 20 131\n",
"15 20 132\n",
"15 20 133\n",
"15 20 134\n",
"15 20 135\n",
"15 20 136\n",
"15 20 137\n",
"15 20 138\n",
"15 20 139\n",
"15 20 140\n",
"15 20 141\n",
"15 20 142\n",
"15 20 143\n",
"15 20 144\n",
"15 20 145\n",
"15 20 146\n",
"15 20 147\n",
"15 20 148\n",
"15 20 149\n",
"15 20 150\n",
"15 20 151\n",
"15 20 152\n",
"15 20 153\n",
"15 20 154\n",
"15 20 155\n",
"15 20 156\n",
"15 20 157\n",
"15 20 158\n",
"15 20 159\n",
"15 20 160\n",
"15 20 161\n",
"15 20 162\n",
"15 20 163\n",
"15 20 164\n",
"15 20 165\n",
"15 20 166\n",
"15 20 167\n",
"15 20 168\n",
"15 20 169\n",
"15 20 170\n",
"15 20 171\n",
"15 20 172\n",
"15 20 173\n",
"15 20 174\n",
"15 20 175\n",
"15 20 176\n",
"15 20 177\n",
"15 20 178\n",
"15 20 179\n",
"15 20 180\n",
"15 20 181\n",
"15 20 182\n",
"15 20 183\n",
"15 20 184\n",
"15 20 185\n",
"15 20 186\n",
"15 20 187\n",
"15 20 188\n",
"15 20 189\n",
"15 20 190\n",
"15 20 191\n",
"15 20 192\n",
"15 20 193\n",
"15 20 194\n",
"15 20 195\n",
"15 20 196\n",
"15 20 197\n",
"15 20 198\n",
"15 20 199\n",
"15 20 200\n",
"15 20 201\n",
"15 20 202\n",
"15 20 203\n",
"15 20 204\n",
"15 20 205\n",
"15 20 206\n",
"15 20 207\n",
"15 20 208\n",
"15 20 209\n",
"15 20 210\n",
"15 20 211\n",
"15 20 212\n",
"15 20 213\n",
"15 20 214\n",
"15 20 215\n",
"15 20 216\n",
"15 20 217\n",
"15 20 218\n",
"15 20 219\n",
"15 20 220\n",
"15 20 221\n",
"15 20 222\n",
"15 20 223\n",
"15 20 224\n",
"15 20 225\n",
"15 20 226\n",
"15 20 227\n",
"15 20 228\n",
"15 20 229\n",
"15 20 230\n",
"15 20 231\n",
"15 20 232\n",
"15 20 233\n",
"15 20 234\n",
"15 20 235\n",
"15 20 236\n",
"15 20 237\n",
"15 20 238\n",
"15 20 239\n",
"15 20 240\n",
"15 20 241\n",
"15 20 242\n",
"15 20 243\n",
"15 20 244\n",
"15 20 245\n",
"15 20 246\n",
"15 20 247\n",
"15 20 248\n",
"15 20 249\n",
"15 20 250\n",
"15 20 251\n",
"15 20 252\n",
"15 20 253\n",
"15 20 254\n",
"15 20 255\n",
"15 20 256\n",
"15 20 257\n",
"15 20 258\n",
"15 20 259\n",
"15 20 260\n",
"15 20 261\n",
"15 20 262\n",
"15 20 263\n",
"15 20 264\n",
"15 20 265\n",
"15 20 266\n",
"15 20 267\n",
"15 20 268\n",
"15 20 269\n",
"15 20 270\n",
"15 20 271\n",
"15 20 272\n",
"15 20 273\n",
"15 20 274\n",
"15 20 275\n",
"15 20 276\n",
"15 20 277\n",
"15 20 278\n",
"15 20 279\n",
"15 20 280\n",
"15 20 281\n",
"15 20 282\n",
"15 20 283\n",
"15 20 284\n",
"15 20 285\n",
"15 20 286\n",
"15 20 287\n",
"15 20 288\n",
"15 20 289\n",
"15 20 290\n",
"15 20 291\n",
"15 20 292\n",
"15 20 293\n",
"15 20 294\n",
"15 20 295\n",
"15 20 296\n",
"15 20 297\n",
"15 20 298\n",
"15 20 299\n",
"15 20 300\n",
"15 20 301\n",
"15 20 302\n",
"15 20 303\n",
"15 20 304\n",
"15 20 305\n",
"15 20 306\n",
"15 20 307\n",
"15 20 308\n",
"15 20 309\n",
"15 20 310\n",
"15 20 311\n",
"15 20 312\n",
"15 20 313\n",
"15 20 314\n",
"15 20 315\n",
"15 20 316\n",
"15 20 317\n",
"15 20 318\n",
"15 20 319\n",
"15 20 320\n",
"15 20 321\n",
"15 20 322\n",
"15 20 323\n",
"15 20 324\n",
"15 20 325\n",
"15 20 326\n",
"15 20 327\n",
"15 20 328\n",
"15 20 329\n",
"15 20 330\n",
"15 20 331\n",
"15 20 332\n",
"15 20 333\n",
"15 20 334\n",
"15 20 335\n",
"15 20 336\n",
"15 20 337\n",
"15 20 338\n",
"15 20 339\n",
"15 20 340\n",
"15 20 341\n",
"15 20 342\n",
"15 20 343\n",
"15 20 344\n",
"15 20 345\n",
"15 20 346\n",
"15 20 347\n",
"15 20 348\n",
"15 20 349\n",
"15 20 350\n",
"15 20 351\n",
"15 20 352\n",
"15 20 353\n",
"15 20 354\n",
"15 20 355\n",
"15 20 356\n",
"15 20 357\n",
"15 20 358\n",
"15 20 359\n",
"15 20 360\n",
"15 20 361\n",
"15 20 362\n",
"15 20 363\n",
"15 20 364\n",
"15 20 365\n",
"15 20 366\n",
"15 20 367\n",
"15 20 368\n",
"15 20 369\n",
"15 20 370\n",
"15 20 371\n",
"15 20 372\n",
"15 20 373\n",
"15 20 374\n",
"15 20 375\n",
"15 20 376\n",
"15 20 377\n",
"15 20 378\n",
"15 20 379\n",
"15 20 380\n",
"15 20 381\n",
"15 20 382\n",
"15 20 383\n",
"15 20 384\n",
"15 20 385\n",
"15 20 386\n",
"15 20 387\n",
"15 20 388\n",
"15 20 389\n",
"15 20 390\n",
"15 20 391\n",
"15 20 392\n",
"15 20 393\n",
"15 20 394\n",
"15 20 395\n",
"15 20 396\n",
"15 20 397\n",
"15 20 398\n",
"15 20 399\n",
"15 20 400\n",
"15 20 401\n",
"15 20 402\n",
"15 20 403\n",
"15 20 404\n",
"15 20 405\n",
"15 20 406\n",
"15 20 407\n",
"15 20 408\n",
"15 20 409\n",
"15 20 410\n",
"15 20 411\n",
"15 20 412\n",
"15 20 413\n",
"15 20 414\n",
"15 20 415\n",
"15 20 416\n",
"15 20 417\n",
"15 20 418\n",
"15 20 419\n",
"15 20 420\n",
"15 20 421\n",
"15 20 422\n",
"15 20 423\n",
"15 20 424\n",
"15 20 425\n",
"15 20 426\n",
"15 20 427\n",
"15 20 428\n",
"15 20 429\n",
"15 20 430\n",
"15 20 431\n",
"15 20 432\n",
"15 20 433\n",
"15 20 434\n",
"15 20 435\n",
"15 20 436\n",
"15 20 437\n",
"15 20 438\n",
"15 20 439\n",
"15 20 440\n",
"15 20 441\n",
"15 20 442\n",
"15 20 443\n",
"15 20 444\n",
"15 20 445\n",
"15 20 446\n",
"15 20 447\n",
"15 20 448\n",
"15 20 449\n",
"15 20 450\n",
"15 20 451\n",
"15 20 452\n",
"15 20 453\n",
"15 20 454\n",
"15 20 455\n",
"15 20 456\n",
"15 20 457\n",
"15 20 458\n",
"15 20 459\n",
"15 20 460\n",
"15 20 461\n",
"15 20 462\n",
"15 20 463\n",
"15 20 464\n",
"15 20 465\n",
"15 20 466\n",
"15 20 467\n",
"15 20 468\n",
"15 20 469\n",
"15 20 470\n",
"15 20 471\n",
"15 20 472\n",
"15 20 473\n",
"15 20 474\n",
"15 20 475\n",
"15 20 476\n",
"15 20 477\n",
"15 20 478\n",
"15 20 479\n",
"15 20 480\n",
"15 20 481\n",
"15 20 482\n",
"15 20 483\n",
"15 20 484\n",
"15 20 485\n",
"15 20 486\n",
"15 20 487\n",
"15 20 488\n",
"15 20 489\n",
"15 20 490\n",
"15 20 491\n",
"15 20 492\n",
"15 20 493\n",
"15 20 494\n",
"15 20 495\n",
"15 20 496\n",
"15 20 497\n",
"15 20 498\n",
"15 20 499\n",
"Done\n"
]
}
],
"source": [
"N,n_max=500,20\n",
"D=10 # Percent to report\n",
"k,j=0,0 # Counter\n",
"d,j={},0\n",
"Q_o={ 3:[5,10,15,20], 5:[10,15,20], 10:[15,20], 15:[20] }\n",
"for n in Q_o.keys():\n",
" print(\"=\"*20,\"\\n\",\"n=\",n)\n",
" for q_o in Q_o[n]:\n",
" d[n,q_o]=np.zeros(N)\n",
" for i in range(N):\n",
" j+=1\n",
" print(n,q_o,i)\n",
"# q_i=np.random.randint(n,q_o+1)\n",
" z_i=pp.zonotope(G=np.random.normal(size=(n,n)))\n",
" z_o=pp.zonotope(G=np.random.normal(size=(n,q_o)))\n",
"# x=time.time()\n",
" a_suf=alpha_sufficient(z_i,z_o)\n",
"# print(\"sufficient:\",time.time()-x)\n",
"# x=time.time()\n",
" a_nec=alpha_necessary(z_i,z_o)\n",
"# print(\"necessary:\",time.time()-x)\n",
" d[n,q_o][i]=1-a_suf/a_nec\n",
"# print(\"gap:\",n,q_o,\"=\",d[n,q_o][i])\n",
" j+=1\n",
"print(\"Done\")"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"(3, 5) [496, 496, 500, 500] 0.049187775432405045\n",
"(3, 10) [496, 499, 500, 500] 0.04621478578855909\n",
"(3, 15) [496, 499, 500, 500] 0.025488967420588482\n",
"(3, 20) [500, 500, 500, 500] 0.0008762382316735984\n",
"(5, 10) [476, 489, 500, 500] 0.02700849729590582\n",
"(5, 15) [472, 490, 500, 500] 0.04464416234485524\n",
"(5, 20) [480, 493, 500, 500] 0.04962959408652312\n",
"(10, 15) [421, 453, 492, 500] 0.07280827975640214\n",
"(10, 20) [332, 381, 474, 499] 0.10066627720859611\n",
"(15, 20) [370, 420, 489, 500] 0.08882584056299803\n"
]
}
],
"source": [
"T=[0.001,0.01,0.05,0.1]\n",
"for key in d.keys():\n",
" print(key,[np.sum(d[key]<t) for t in T],np.max(d[key]))"
]
},
{
"cell_type": "code",
"execution_count": 35,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"10000"
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"j"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
| 0.475362 | 0.478407 |
```
from sklearn.model_selection import train_test_split
import os,cv2
import numpy as np
import matplotlib.pyplot as plt
from torch.nn import *
import torch,torchvision
from tqdm import tqdm
device = 'cuda'
PROJECT_NAME = 'Satellite-Image-Classification'
def load_data():
idx = -1
labels = {}
data = []
for folder in tqdm(os.listdir('./data/')):
idx += 1
labels[folder] = idx
for file in os.listdir(f'./data/{folder}'):
file = f'./data/{folder}/{file}'
img = cv2.imread(file)
img = cv2.resize(img,(112,112))
data.append([img/255.0,labels[folder]])
np.random.shuffle(data)
X = []
y = []
for d in data:
X.append(d[0])
y.append(d[1])
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.0625)
return data,X,y,idx,labels,X_train,X_test,y_train,y_test
data,X,y,idx,labels,X_train,X_test,y_train,y_test = load_data()
X_train = torch.from_numpy(np.array(X_train)).view(-1,3,112,112).to(device).float()
X_test = torch.from_numpy(np.array(X_test)).view(-1,3,112,112).to(device).float()
y_train = torch.from_numpy(np.array(y_train)).to(device)
y_test = torch.from_numpy(np.array(y_test)).to(device)
labels_r = {}
for l_key,l_val in zip(labels.keys(),labels.values()):
labels_r[l_val] = l_key
labels
len(X_train)
len(y_train)
# plt.figure(figsize=(12,6))
# plt.title(f'{labels_r[int(y_test[2])]}')
# plt.imshow(X_test[2].view(112,112,3))
def get_loss(model,X,y,criterion):
preds = model(X)
loss = criterion(preds,y)
return loss.item()
def get_accuracy(model,X,y):
total = -1
correct = -1
preds = model(X)
for y_batch,pred in zip(y,preds):
pred = torch.argmax(pred)
if y_batch == pred:
correct += 1
total += 1
acc = round(correct/total,3)*100
return acc
def predict(model):
model.eval()
preds = []
for img in os.listdir('./test_data/'):
img_name = img
img = cv2.imread(f'./test_data/{img}')
img = cv2.resize(img,(112,112))
img = img / 255.0
pred = model(torch.from_numpy(np.array(img)).view(1,3,112,112).float().to(device))
pred = torch.argmax(pred)
plt.figure(figsize=(12,7))
plt.imshow(img)
plt.title(f'{labels_r[int(pred)]}')
plt.savefig(f'./preds/{img_name}')
plt.close()
model.train()
class Model(Module):
def __init__(self):
super().__init__()
self.activation = ReLU()
self.max_pool2d = MaxPool2d((2,2),(2,2))
self.conv1 = Conv2d(3,6,(3,3))
self.conv1batchnorm = BatchNorm2d(6)
self.conv2 = Conv2d(6,12,(3,3))
self.conv2batchnorm = BatchNorm2d(12)
self.conv3 = Conv2d(12,24,(3,3))
self.conv3batchnorm = BatchNorm2d(24)
self.conv4 = Conv2d(24,48,(3,3))
self.conv4batchnorm = BatchNorm2d(48)
self.conv5 = Conv2d(48,96,(3,3))
self.conv5batchnorm = BatchNorm2d(96)
self.linear1 = Linear(96*1*1,128)
self.linear1batchnorm = BatchNorm1d(128)
self.linear2 = Linear(128,256)
self.linear2batchnorm = BatchNorm1d(256)
self.linear3 = Linear(256,512)
self.linear3batchnorm = BatchNorm1d(512)
self.linear4 = Linear(512,1024)
self.linear4batchnorm = BatchNorm1d(1024)
self.linear5 = Linear(1024,512)
self.linear5batchnorm = BatchNorm1d(512)
self.output = Linear(512,4)
def forward(self,X):
preds = X
preds = self.activation(self.max_pool2d(self.conv1batchnorm(self.conv1(preds))))
preds = self.activation(self.max_pool2d(self.conv2batchnorm(self.conv2(preds))))
preds = self.activation(self.max_pool2d(self.conv3batchnorm(self.conv3(preds))))
preds = self.activation(self.max_pool2d(self.conv4batchnorm(self.conv4(preds))))
preds = self.activation(self.max_pool2d(self.conv5batchnorm(self.conv5(preds))))
preds = preds.view(-1,96*1*1)
preds = self.activation(self.linear1batchnorm(self.linear1(preds)))
preds = self.activation(self.linear2batchnorm(self.linear2(preds)))
preds = self.activation(self.linear3batchnorm(self.linear3(preds)))
preds = self.activation(self.linear4batchnorm(self.linear4(preds)))
preds = self.activation(self.linear5batchnorm(self.linear5(preds)))
preds = self.output(preds)
return preds
model = Model().to(device)
criterion = CrossEntropyLoss()
from torch.optim import *
optimizer = Adam(model.parameters(),lr=0.001)
epochs = 100
batch_size = 32
import wandb
wandb.init(project=PROJECT_NAME,name='baseline')
wandb.watch(model)
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),idx):
X_batch = X_train[i:i+batch_size].float().to(device).view(-1,3,112,112)
y_batch = y_train[i:i+batch_size].to(device)
model.to(device)
preds = model(X_batch)
loss = criterion(preds,y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
wandb.log({'Loss':get_loss(model,X_train,y_train,criterion)})
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
wandb.log({'Acc':get_accuracy(model,X_train,y_train)})
wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)})
predict(model)
for file in os.listdir('./preds/'):
wandb.log({f'Img/{file}':wandb.Image(cv2.imread(f'./preds/{file}'))})
wandb.watch(model)
wandb.finish()
```
|
github_jupyter
|
from sklearn.model_selection import train_test_split
import os,cv2
import numpy as np
import matplotlib.pyplot as plt
from torch.nn import *
import torch,torchvision
from tqdm import tqdm
device = 'cuda'
PROJECT_NAME = 'Satellite-Image-Classification'
def load_data():
idx = -1
labels = {}
data = []
for folder in tqdm(os.listdir('./data/')):
idx += 1
labels[folder] = idx
for file in os.listdir(f'./data/{folder}'):
file = f'./data/{folder}/{file}'
img = cv2.imread(file)
img = cv2.resize(img,(112,112))
data.append([img/255.0,labels[folder]])
np.random.shuffle(data)
X = []
y = []
for d in data:
X.append(d[0])
y.append(d[1])
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.0625)
return data,X,y,idx,labels,X_train,X_test,y_train,y_test
data,X,y,idx,labels,X_train,X_test,y_train,y_test = load_data()
X_train = torch.from_numpy(np.array(X_train)).view(-1,3,112,112).to(device).float()
X_test = torch.from_numpy(np.array(X_test)).view(-1,3,112,112).to(device).float()
y_train = torch.from_numpy(np.array(y_train)).to(device)
y_test = torch.from_numpy(np.array(y_test)).to(device)
labels_r = {}
for l_key,l_val in zip(labels.keys(),labels.values()):
labels_r[l_val] = l_key
labels
len(X_train)
len(y_train)
# plt.figure(figsize=(12,6))
# plt.title(f'{labels_r[int(y_test[2])]}')
# plt.imshow(X_test[2].view(112,112,3))
def get_loss(model,X,y,criterion):
preds = model(X)
loss = criterion(preds,y)
return loss.item()
def get_accuracy(model,X,y):
total = -1
correct = -1
preds = model(X)
for y_batch,pred in zip(y,preds):
pred = torch.argmax(pred)
if y_batch == pred:
correct += 1
total += 1
acc = round(correct/total,3)*100
return acc
def predict(model):
model.eval()
preds = []
for img in os.listdir('./test_data/'):
img_name = img
img = cv2.imread(f'./test_data/{img}')
img = cv2.resize(img,(112,112))
img = img / 255.0
pred = model(torch.from_numpy(np.array(img)).view(1,3,112,112).float().to(device))
pred = torch.argmax(pred)
plt.figure(figsize=(12,7))
plt.imshow(img)
plt.title(f'{labels_r[int(pred)]}')
plt.savefig(f'./preds/{img_name}')
plt.close()
model.train()
class Model(Module):
def __init__(self):
super().__init__()
self.activation = ReLU()
self.max_pool2d = MaxPool2d((2,2),(2,2))
self.conv1 = Conv2d(3,6,(3,3))
self.conv1batchnorm = BatchNorm2d(6)
self.conv2 = Conv2d(6,12,(3,3))
self.conv2batchnorm = BatchNorm2d(12)
self.conv3 = Conv2d(12,24,(3,3))
self.conv3batchnorm = BatchNorm2d(24)
self.conv4 = Conv2d(24,48,(3,3))
self.conv4batchnorm = BatchNorm2d(48)
self.conv5 = Conv2d(48,96,(3,3))
self.conv5batchnorm = BatchNorm2d(96)
self.linear1 = Linear(96*1*1,128)
self.linear1batchnorm = BatchNorm1d(128)
self.linear2 = Linear(128,256)
self.linear2batchnorm = BatchNorm1d(256)
self.linear3 = Linear(256,512)
self.linear3batchnorm = BatchNorm1d(512)
self.linear4 = Linear(512,1024)
self.linear4batchnorm = BatchNorm1d(1024)
self.linear5 = Linear(1024,512)
self.linear5batchnorm = BatchNorm1d(512)
self.output = Linear(512,4)
def forward(self,X):
preds = X
preds = self.activation(self.max_pool2d(self.conv1batchnorm(self.conv1(preds))))
preds = self.activation(self.max_pool2d(self.conv2batchnorm(self.conv2(preds))))
preds = self.activation(self.max_pool2d(self.conv3batchnorm(self.conv3(preds))))
preds = self.activation(self.max_pool2d(self.conv4batchnorm(self.conv4(preds))))
preds = self.activation(self.max_pool2d(self.conv5batchnorm(self.conv5(preds))))
preds = preds.view(-1,96*1*1)
preds = self.activation(self.linear1batchnorm(self.linear1(preds)))
preds = self.activation(self.linear2batchnorm(self.linear2(preds)))
preds = self.activation(self.linear3batchnorm(self.linear3(preds)))
preds = self.activation(self.linear4batchnorm(self.linear4(preds)))
preds = self.activation(self.linear5batchnorm(self.linear5(preds)))
preds = self.output(preds)
return preds
model = Model().to(device)
criterion = CrossEntropyLoss()
from torch.optim import *
optimizer = Adam(model.parameters(),lr=0.001)
epochs = 100
batch_size = 32
import wandb
wandb.init(project=PROJECT_NAME,name='baseline')
wandb.watch(model)
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),idx):
X_batch = X_train[i:i+batch_size].float().to(device).view(-1,3,112,112)
y_batch = y_train[i:i+batch_size].to(device)
model.to(device)
preds = model(X_batch)
loss = criterion(preds,y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
wandb.log({'Loss':get_loss(model,X_train,y_train,criterion)})
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
wandb.log({'Acc':get_accuracy(model,X_train,y_train)})
wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)})
predict(model)
for file in os.listdir('./preds/'):
wandb.log({f'Img/{file}':wandb.Image(cv2.imread(f'./preds/{file}'))})
wandb.watch(model)
wandb.finish()
| 0.740737 | 0.46873 |
<a href="https://colab.research.google.com/github/claytonchagas/intpy_prod/blob/main/5_3_automatic_evaluation_quicksort_fixed_recursive_ast_only_DB.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!sudo apt-get update
!sudo apt-get install python3.9
!python3.9 -V
!which python3.9
```
#**i. Colab hardware and software specs:**
- n1-highmem-2 instance
- 2vCPU @ 2.3GHz
- 13GB RAM
- 100GB Free Space
- idle cut-off 90 minutes
- maximum lifetime 12 hours
```
# Colab hardware info (processor and memory):
# !cat /proc/cpuinfo
# !cat /proc/memoinfo
# !lscpu
!lscpu | egrep 'Model name|Socket|Thread|NUMA|CPU\(s\)'
print("---------------------------------")
!free -m
# Colab SO structure and version
!ls -a
print("---------------------------------")
!ls -l /
print("---------------------------------")
!lsb_release -a
```
#**ii. Cloning IntPy repository:**
- https://github.com/claytonchagas/intpy_dev.git
```
!git clone https://github.com/claytonchagas/intpy_dev.git
!ls -a
print("---------------------------------")
%cd intpy_dev/
!git checkout c27b261
!ls -a
print("---------------------------------")
!git branch
print("---------------------------------")
#!git log --pretty=oneline --abbrev-commit
#!git log --all --decorate --oneline --graph
```
#**iii. Quicksort random's evolutions and cutoff by approach**
- This evaluation does not make sense as the collection of samples is fixed.
#**iv. Quicksort random from 1 to 100.000, 10.000 samples, 5.000 samples and 2.500 samples, three mixed trials**
- This evaluation does not make sense as the collection of samples is fixed.
#**1. Fast execution, all versions (v0.1.x and from v0.2.1.x to v0.2.7.x)**
##**1.1 Fast execution: only intra-cache**
###**1.1.1 Fast execution: only intra-cache => experiment's executions**
```
!rm -rf .intpy;\
rm -rf stats_intra.dat;\
echo "IntPy only intra-cache";\
experimento=quicksort_recursive_fixed.py;\
echo "Experiment: $experimento";\
for i in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do rm -rf output_intra_$i.dat;\
rm -rf .intpy;\
echo "---------------------------------";\
echo "IntPy version $i";\
for j in {1..5};\
do echo "Execution $j";\
rm -rf .intpy;\
if [ "$i" = "--no-cache" ]; then python3.9 $experimento $i >> output_intra_$i.dat;\
else python3.9 $experimento -v $i >> output_intra_$i.dat;\
fi;\
echo "Done execution $j $i";\
done;\
echo "Done IntPy version $i";\
done;\
echo "---------------------------------";\
echo "---------------------------------";\
echo "Statistics evaluation:";\
for k in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do echo "Statistics version $k" >> stats_intra.dat;\
echo "Statistics version $k";\
python3.9 stats_colab.py output_intra_$k.dat;\
python3.9 stats_colab.py output_intra_$k.dat >> stats_intra.dat;\
echo "---------------------------------";\
done;\
```
###**1.1.2 Fast execution: only intra-cache => charts generation**
```
%matplotlib inline
import matplotlib.pyplot as plt
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
filev = "f_intra_"
data = "data_intra_"
dataf = "dataf_intra_"
for i, j in zip(versions, colors):
filev_version = filev+i
data_version = data+i
dataf_version = dataf+i
file_intra = open("output_intra_"+i+".dat", "r")
data_intra = []
dataf_intra = []
for x in file_intra.readlines()[3::4]:
data_intra.append(float(x))
file_intra.close()
for y in data_intra:
dataf_intra.append(round(y, 5))
print(i+": ",dataf_intra)
running1_1 = ['1st', '2nd', '3rd', '4th', '5th']
plt.figure(figsize = (10, 5))
plt.bar(running1_1, dataf_intra, color =j, width = 0.4)
plt.grid(axis='y')
for index, datas in enumerate(dataf_intra):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Running only with intra cache "+i, fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Chart "+i+" intra - Quicksort random (1..100.000, 10.000 spls) - with intra cache, no inter cache - IntPy "+i+" version", fontweight='bold')
plt.savefig("chart_intra_"+i+".png")
plt.close()
#plt.show()
import matplotlib.pyplot as plt
file_intra = open("stats_intra.dat", "r")
data_intra = []
for x in file_intra.readlines()[5::8]:
data_intra.append(round(float(x[8::]), 5))
file_intra.close()
print(data_intra)
versions = ["--no-cache", "0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.7.x"]
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
plt.figure(figsize = (10, 5))
plt.bar(versions, data_intra, color = colors, width = 0.7)
plt.grid(axis='y')
for index, datas in enumerate(data_intra):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Median for 5 executions in each version, intra cache", fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Quicksort random (1..100.000, 10.000 spls), cache intra-running, all versions", fontweight='bold')
plt.savefig('compare_median_intra.png')
plt.close()
#plt.show()
```
##**1.2 Fast execution: full cache -> intra and inter-cache**
###**1.2.1 Fast execution: full cache -> intra and inter-cache => experiment's executions**
```
!rm -rf .intpy;\
rm -rf stats_full.dat;\
echo "IntPy full cache -> intra and inter-cache";\
experimento=quicksort_recursive_fixed.py;\
echo "Experiment: $experimento";\
for i in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do rm -rf output_full_$i.dat;\
rm -rf .intpy;\
echo "---------------------------------";\
echo "IntPy version $i";\
for j in {1..5};\
do echo "Execution $j";\
if [ "$i" = "--no-cache" ]; then python3.9 $experimento $i >> output_full_$i.dat;\
else python3.9 $experimento -v $i >> output_full_$i.dat;\
fi;\
echo "Done execution $j $i";\
done;\
echo "Done IntPy version $i";\
done;\
echo "---------------------------------";\
echo "---------------------------------";\
echo "Statistics evaluation:";\
for k in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do echo "Statistics version $k" >> stats_full.dat;\
echo "Statistics version $k";\
python3.9 stats_colab.py output_full_$k.dat;\
python3.9 stats_colab.py output_full_$k.dat >> stats_full.dat;\
echo "---------------------------------";\
done;\
```
###**1.2.2 Fast execution: full cache -> intra and inter-cache => charts generation**
```
%matplotlib inline
import matplotlib.pyplot as plt
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
filev = "f_full_"
data = "data_full_"
dataf = "dataf_full_"
for i, j in zip(versions, colors):
filev_version = filev+i
data_version = data+i
dataf_version = dataf+i
file_full = open("output_full_"+i+".dat", "r")
data_full = []
dataf_full = []
for x in file_full.readlines()[3::4]:
data_full.append(float(x))
file_full.close()
for y in data_full:
dataf_full.append(round(y, 5))
print(i+": ",dataf_full)
running1_1 = ['1st', '2nd', '3rd', '4th', '5th']
plt.figure(figsize = (10, 5))
plt.bar(running1_1, dataf_full, color =j, width = 0.4)
plt.grid(axis='y')
for index, datas in enumerate(dataf_full):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Running full cache "+i, fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Chart "+i+" full - Quicksort random (1..100.000, 10.000 spls) - with intra and inter cache - IntPy "+i+" version", fontweight='bold')
plt.savefig("chart_full_"+i+".png")
plt.close()
#plt.show()
import matplotlib.pyplot as plt
file_full = open("stats_full.dat", "r")
data_full = []
for x in file_full.readlines()[5::8]:
data_full.append(round(float(x[8::]), 5))
file_full.close()
print(data_full)
versions = ["--no-cache", "0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.7.x"]
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
plt.figure(figsize = (10, 5))
plt.bar(versions, data_full, color = colors, width = 0.7)
plt.grid(axis='y')
for index, datas in enumerate(data_full):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Median for 5 executions in each version, full cache", fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Quicksort random (1..100.000, 10.000 spls), cache intra and inter-running, all versions", fontweight='bold')
plt.savefig('compare_median_full.png')
plt.close()
#plt.show()
```
##**1.3 Displaying charts to all versions**
###**1.3.1 Only intra-cache charts**
```
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
from IPython.display import Image, display
for i in versions:
display(Image("chart_intra_"+i+".png"))
print("=====================================================================================")
```
###**1.3.2 Full cache charts -> intra and inter-cache**
```
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
from IPython.display import Image, display
for i in versions:
display(Image("chart_full_"+i+".png"))
print("=====================================================================================")
```
###**1.3.3 Only intra-cache: median comparison chart of all versions**
```
from IPython.display import Image, display
display(Image("compare_median_intra.png"))
```
###**1.3.4 Full cache -> intra and inter-cache: median comparison chart of all versions**
```
from IPython.display import Image, display
display(Image("compare_median_full.png"))
```
|
github_jupyter
|
!sudo apt-get update
!sudo apt-get install python3.9
!python3.9 -V
!which python3.9
# Colab hardware info (processor and memory):
# !cat /proc/cpuinfo
# !cat /proc/memoinfo
# !lscpu
!lscpu | egrep 'Model name|Socket|Thread|NUMA|CPU\(s\)'
print("---------------------------------")
!free -m
# Colab SO structure and version
!ls -a
print("---------------------------------")
!ls -l /
print("---------------------------------")
!lsb_release -a
!git clone https://github.com/claytonchagas/intpy_dev.git
!ls -a
print("---------------------------------")
%cd intpy_dev/
!git checkout c27b261
!ls -a
print("---------------------------------")
!git branch
print("---------------------------------")
#!git log --pretty=oneline --abbrev-commit
#!git log --all --decorate --oneline --graph
!rm -rf .intpy;\
rm -rf stats_intra.dat;\
echo "IntPy only intra-cache";\
experimento=quicksort_recursive_fixed.py;\
echo "Experiment: $experimento";\
for i in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do rm -rf output_intra_$i.dat;\
rm -rf .intpy;\
echo "---------------------------------";\
echo "IntPy version $i";\
for j in {1..5};\
do echo "Execution $j";\
rm -rf .intpy;\
if [ "$i" = "--no-cache" ]; then python3.9 $experimento $i >> output_intra_$i.dat;\
else python3.9 $experimento -v $i >> output_intra_$i.dat;\
fi;\
echo "Done execution $j $i";\
done;\
echo "Done IntPy version $i";\
done;\
echo "---------------------------------";\
echo "---------------------------------";\
echo "Statistics evaluation:";\
for k in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do echo "Statistics version $k" >> stats_intra.dat;\
echo "Statistics version $k";\
python3.9 stats_colab.py output_intra_$k.dat;\
python3.9 stats_colab.py output_intra_$k.dat >> stats_intra.dat;\
echo "---------------------------------";\
done;\
%matplotlib inline
import matplotlib.pyplot as plt
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
filev = "f_intra_"
data = "data_intra_"
dataf = "dataf_intra_"
for i, j in zip(versions, colors):
filev_version = filev+i
data_version = data+i
dataf_version = dataf+i
file_intra = open("output_intra_"+i+".dat", "r")
data_intra = []
dataf_intra = []
for x in file_intra.readlines()[3::4]:
data_intra.append(float(x))
file_intra.close()
for y in data_intra:
dataf_intra.append(round(y, 5))
print(i+": ",dataf_intra)
running1_1 = ['1st', '2nd', '3rd', '4th', '5th']
plt.figure(figsize = (10, 5))
plt.bar(running1_1, dataf_intra, color =j, width = 0.4)
plt.grid(axis='y')
for index, datas in enumerate(dataf_intra):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Running only with intra cache "+i, fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Chart "+i+" intra - Quicksort random (1..100.000, 10.000 spls) - with intra cache, no inter cache - IntPy "+i+" version", fontweight='bold')
plt.savefig("chart_intra_"+i+".png")
plt.close()
#plt.show()
import matplotlib.pyplot as plt
file_intra = open("stats_intra.dat", "r")
data_intra = []
for x in file_intra.readlines()[5::8]:
data_intra.append(round(float(x[8::]), 5))
file_intra.close()
print(data_intra)
versions = ["--no-cache", "0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.7.x"]
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
plt.figure(figsize = (10, 5))
plt.bar(versions, data_intra, color = colors, width = 0.7)
plt.grid(axis='y')
for index, datas in enumerate(data_intra):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Median for 5 executions in each version, intra cache", fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Quicksort random (1..100.000, 10.000 spls), cache intra-running, all versions", fontweight='bold')
plt.savefig('compare_median_intra.png')
plt.close()
#plt.show()
!rm -rf .intpy;\
rm -rf stats_full.dat;\
echo "IntPy full cache -> intra and inter-cache";\
experimento=quicksort_recursive_fixed.py;\
echo "Experiment: $experimento";\
for i in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do rm -rf output_full_$i.dat;\
rm -rf .intpy;\
echo "---------------------------------";\
echo "IntPy version $i";\
for j in {1..5};\
do echo "Execution $j";\
if [ "$i" = "--no-cache" ]; then python3.9 $experimento $i >> output_full_$i.dat;\
else python3.9 $experimento -v $i >> output_full_$i.dat;\
fi;\
echo "Done execution $j $i";\
done;\
echo "Done IntPy version $i";\
done;\
echo "---------------------------------";\
echo "---------------------------------";\
echo "Statistics evaluation:";\
for k in "--no-cache" "v01x" "v021x" "v022x" "v023x" "v024x" "v025x" "v027x";\
do echo "Statistics version $k" >> stats_full.dat;\
echo "Statistics version $k";\
python3.9 stats_colab.py output_full_$k.dat;\
python3.9 stats_colab.py output_full_$k.dat >> stats_full.dat;\
echo "---------------------------------";\
done;\
%matplotlib inline
import matplotlib.pyplot as plt
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
filev = "f_full_"
data = "data_full_"
dataf = "dataf_full_"
for i, j in zip(versions, colors):
filev_version = filev+i
data_version = data+i
dataf_version = dataf+i
file_full = open("output_full_"+i+".dat", "r")
data_full = []
dataf_full = []
for x in file_full.readlines()[3::4]:
data_full.append(float(x))
file_full.close()
for y in data_full:
dataf_full.append(round(y, 5))
print(i+": ",dataf_full)
running1_1 = ['1st', '2nd', '3rd', '4th', '5th']
plt.figure(figsize = (10, 5))
plt.bar(running1_1, dataf_full, color =j, width = 0.4)
plt.grid(axis='y')
for index, datas in enumerate(dataf_full):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Running full cache "+i, fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Chart "+i+" full - Quicksort random (1..100.000, 10.000 spls) - with intra and inter cache - IntPy "+i+" version", fontweight='bold')
plt.savefig("chart_full_"+i+".png")
plt.close()
#plt.show()
import matplotlib.pyplot as plt
file_full = open("stats_full.dat", "r")
data_full = []
for x in file_full.readlines()[5::8]:
data_full.append(round(float(x[8::]), 5))
file_full.close()
print(data_full)
versions = ["--no-cache", "0.1.x", "0.2.1.x", "0.2.2.x", "0.2.3.x", "0.2.4.x", "0.2.5.x", "0.2.7.x"]
colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:purple', 'tab:grey', 'tab:olive', 'tab:cyan', 'tab:pink']
plt.figure(figsize = (10, 5))
plt.bar(versions, data_full, color = colors, width = 0.7)
plt.grid(axis='y')
for index, datas in enumerate(data_full):
plt.text(x=index, y=datas, s=datas, ha = 'center', va = 'bottom', fontweight='bold')
plt.xlabel("Median for 5 executions in each version, full cache", fontweight='bold')
plt.ylabel("Time in seconds", fontweight='bold')
plt.title("Quicksort random (1..100.000, 10.000 spls), cache intra and inter-running, all versions", fontweight='bold')
plt.savefig('compare_median_full.png')
plt.close()
#plt.show()
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
from IPython.display import Image, display
for i in versions:
display(Image("chart_intra_"+i+".png"))
print("=====================================================================================")
versions = ['--no-cache', 'v01x', 'v021x', 'v022x', 'v023x', 'v024x', 'v025x', 'v027x']
from IPython.display import Image, display
for i in versions:
display(Image("chart_full_"+i+".png"))
print("=====================================================================================")
from IPython.display import Image, display
display(Image("compare_median_intra.png"))
from IPython.display import Image, display
display(Image("compare_median_full.png"))
| 0.136839 | 0.706114 |
# Parallel simulation of a bubble raft
**Important**: This code also works on multi-host TPU setup without any changes !! The key thing to do with a multi-host TPU setup is to ssh the file and run it on all the host at the same time. In order to do that please refer to this [notebook](https://github.com/probml/probml-notebooks/blob/main/notebooks/tpu_colab_tutorial.ipynb).
This notebook is based on the first example from the [JAX MD cookbook](https://colab.research.google.com/github/google/jax-md/blob/master/notebooks/jax_md_cookbook.ipynb) i.e the simulating bubble raft example 
## Installation
```
!pip install -q git+https://www.github.com/google/jax-md
import jax
try:
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
except KeyError:
import os
jax.devices()
import os
import jax.numpy as np
from jax import jit
from jax import vmap, pmap
import jax.numpy as jnp
from jax import random
from jax import lax
from jax_md import space
from jax_md import simulate
from jax_md import energy
os.environ["XLA_USE_32BIT_LONG"] = "1"
jax.local_device_count()
```
## Hyperparameters
```
f32 = np.float32
ensemble_size = 1000
nlocal = 8
N = 32
dt = 1e-1
simulation_steps = np.arange(1000)
key = random.PRNGKey(0)
```
## Defining the box and the energy function
```
def box_size_at_number_density(particle_count, number_density):
return f32((particle_count / number_density) ** 0.5)
box_size = box_size_at_number_density(particle_count=N, number_density=1)
displacement, shift = space.periodic(box_size)
energy_fun = energy.soft_sphere_pair(displacement)
```
## Defining the solution
```
def simulation(key, temperature):
pos_key, sim_key = random.split(key)
R = random.uniform(pos_key, (N, 2), maxval=box_size)
init_fn, apply_fn = simulate.brownian(energy_fun, shift, dt, temperature)
state = init_fn(sim_key, R)
do_step = lambda state, t: (apply_fn(state, t=t), t)
state, _ = lax.scan(do_step, state, simulation_steps)
return state.position
```
## Parallelsing the simulation
```
vectorized_simulation = vmap(simulation, in_axes=(0, None))
parallel_vectorized_simulation = pmap(vectorized_simulation, in_axes=(0, None))
vectorized_energy = vmap(energy_fun)
parallel_vectorized_energy = pmap(vectorized_energy)
```
## Getting the random keys
```
simulation_keys_lst = []
for i in range(nlocal):
key, *simulation_keys = random.split(key, ensemble_size + 1)
simulation_keys = jnp.stack(simulation_keys)
simulation_keys_lst.append(simulation_keys)
simulation_keys = jnp.stack(simulation_keys_lst)
```
## Running the simulation
```
bubble_positions = parallel_vectorized_simulation(simulation_keys, 1e-5)
bubble_energies = parallel_vectorized_energy(bubble_positions)
import numpy as onp
import matplotlib.pyplot as plt
def format_plot(x, y):
plt.xlabel(x, fontsize=20)
plt.ylabel(y, fontsize=20)
bubble_energies = jax.pmap(lambda x: jax.lax.all_gather(x, "i"), axis_name="i")(bubble_energies)[0]
counts, bins = onp.histogram(bubble_energies, bins=50)
plt.plot(bins[:-1] * 10**5, counts, "o")
format_plot("$E\\times 10 ^{-5}$", "$P(E)$")
plt.savefig("plot.png")
```
|
github_jupyter
|
!pip install -q git+https://www.github.com/google/jax-md
import jax
try:
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
except KeyError:
import os
jax.devices()
import os
import jax.numpy as np
from jax import jit
from jax import vmap, pmap
import jax.numpy as jnp
from jax import random
from jax import lax
from jax_md import space
from jax_md import simulate
from jax_md import energy
os.environ["XLA_USE_32BIT_LONG"] = "1"
jax.local_device_count()
f32 = np.float32
ensemble_size = 1000
nlocal = 8
N = 32
dt = 1e-1
simulation_steps = np.arange(1000)
key = random.PRNGKey(0)
def box_size_at_number_density(particle_count, number_density):
return f32((particle_count / number_density) ** 0.5)
box_size = box_size_at_number_density(particle_count=N, number_density=1)
displacement, shift = space.periodic(box_size)
energy_fun = energy.soft_sphere_pair(displacement)
def simulation(key, temperature):
pos_key, sim_key = random.split(key)
R = random.uniform(pos_key, (N, 2), maxval=box_size)
init_fn, apply_fn = simulate.brownian(energy_fun, shift, dt, temperature)
state = init_fn(sim_key, R)
do_step = lambda state, t: (apply_fn(state, t=t), t)
state, _ = lax.scan(do_step, state, simulation_steps)
return state.position
vectorized_simulation = vmap(simulation, in_axes=(0, None))
parallel_vectorized_simulation = pmap(vectorized_simulation, in_axes=(0, None))
vectorized_energy = vmap(energy_fun)
parallel_vectorized_energy = pmap(vectorized_energy)
simulation_keys_lst = []
for i in range(nlocal):
key, *simulation_keys = random.split(key, ensemble_size + 1)
simulation_keys = jnp.stack(simulation_keys)
simulation_keys_lst.append(simulation_keys)
simulation_keys = jnp.stack(simulation_keys_lst)
bubble_positions = parallel_vectorized_simulation(simulation_keys, 1e-5)
bubble_energies = parallel_vectorized_energy(bubble_positions)
import numpy as onp
import matplotlib.pyplot as plt
def format_plot(x, y):
plt.xlabel(x, fontsize=20)
plt.ylabel(y, fontsize=20)
bubble_energies = jax.pmap(lambda x: jax.lax.all_gather(x, "i"), axis_name="i")(bubble_energies)[0]
counts, bins = onp.histogram(bubble_energies, bins=50)
plt.plot(bins[:-1] * 10**5, counts, "o")
format_plot("$E\\times 10 ^{-5}$", "$P(E)$")
plt.savefig("plot.png")
| 0.51879 | 0.971074 |
# Zelros technical test : improved version (word embedding)
## Word embedding using glove given in the competition data
Import of necessary libraries
```
import sys
import os
dir_path = os.path.dirname(os.path.realpath("./src"))
sys.path.insert(0, dir_path)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import tensorflow as tf
from tqdm._tqdm_notebook import tqdm_notebook
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import math
import re
import heapq
from sklearn import metrics
from keras_tqdm_mod.tqdm_notebook_callback import TQDMNotebookCallback
tqdm_notebook.pandas()
Input = tf.keras.layers.Input
Bidirectional = tf.keras.layers.Bidirectional
CuDNNLSTM = tf.compat.v1.keras.layers.CuDNNLSTM
Dense = tf.keras.layers.Dense
Dropout = tf.keras.layers.Dropout
GlobalMaxPool1D = tf.keras.layers.GlobalMaxPool1D
print("Tensorflow version : {}".format(tf.__version__))
gpus = tf.config.experimental.list_physical_devices('GPU')
print("Num GPUs available: ", len(gpus))
tf.config.experimental.set_memory_growth(gpus[0], True)
```
Global variables to use in this code
```
maxlen = 30 # maximum number of words to be used by question
batch_size = 200 # Training batch size
validation_batch_size = 200 # Validation batch size
epochs = 2 # number of epoch
```
Create the network
```
model = tf.keras.Sequential()
model.add(Bidirectional(CuDNNLSTM(64, return_sequences = True), input_shape=(maxlen,300)))
model.add(Bidirectional(CuDNNLSTM(64, return_sequences = True)))
model.add(GlobalMaxPool1D())
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.1))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
```
Get the Glove pretrained data
```
# Source https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
embeddings_index = {}
f = open("../dataset/embeddings/glove.840B.300d/glove.840B.300d.txt", "r", encoding="utf8")
for line in tqdm_notebook(f, desc="Reading Glove data"):
values = line.split(" ")
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
print("Found %s word vectors." % len(embeddings_index))
```
Read the dataset
```
data_df = pd.read_csv("../dataset/train.csv")
print("{} training data available".format(data_df.shape[0]))
```
Remove ponctuations and unnecessary spaces in sentences as well as transfer to lowercase
```
def rm_double_spaces(sentence):
sentence = re.sub(r'\s+',' ',sentence) # Remove multiple space
if sentence[-1]==" ": # Remove useless space at the end of the sentence
sentence = sentence[:-1]
return sentence
data_df["question_text"] = data_df["question_text"].progress_apply(rm_double_spaces)
print("Check the result on the first sentence : {}".format(data_df["question_text"][0]))
```
Split the dataset in train data and validation data
```
train_df, val_df = train_test_split(data_df, test_size=0.1)
del data_df #no longer needed
```
Test the data repartition
```
percentage_in_train = train_df.groupby("target").count()["qid"][1]/train_df.shape[0]
percentage_in_val = val_df.groupby("target").count()["qid"][1]/val_df.shape[0]
print(f"Train dataset size: {train_df.shape[0]}, validation size: {val_df.shape[0]}, "
f"{math.floor(val_df.shape[0]*100/train_df.shape[0])}% of the training dataset size")
print("Percentage of positives in train = {:.2f} and in val {:.2f}".format(percentage_in_train,percentage_in_val))
```
Create the ```Glove embedding``` from the train data (a word is a vector of 300)
```
#Convert to embeddings
def question_to_vect(question):
empty_emb = np.zeros(300)
words = question.split()[:maxlen]
embeds = [embeddings_index.get(x, empty_emb) for x in words] # Get the embedding if it exists otherwise empty_emb
embeds+= [empty_emb] * (maxlen - len(embeds)) # Fill the list of vectors with empty_emb if the question it shorter
return np.array(embeds, dtype=np.float16)
```
Define a training generator to feed data to the network, and a validation data generator to check the progress
```
def training_generator(_train_df):
nb_batches = _train_df.shape[0]//batch_size
while True:
_train_df = _train_df.sample(frac=1) # shuffle the data
for i in range(nb_batches):
questions = _train_df.iloc[i*batch_size:(i+1)*batch_size, 1]
vect_questions = np.asarray([question_to_vect(question) for question in questions])
yield (np.asarray(vect_questions), np.asarray(_train_df["target"][i*batch_size:(i+1)*batch_size].values))
def validation_generator(_val_df, predict=False):
nb_batches = _val_df.shape[0]//validation_batch_size
while True:
for i in range(nb_batches):
questions = _val_df.iloc[i*batch_size:(i+1)*batch_size, 1]
vect_questions = np.asarray([question_to_vect(question) for question in questions])
if not predict:
yield (np.asarray(vect_questions),np.asarray(_val_df["target"][i*batch_size:(i+1)*batch_size].values))
else:
yield np.asarray(vect_questions)
```
Train the network
```
generator = training_generator(train_df)
print("steps per epoch = {}, epochs = {}, batch_size = {}".format(train_df.shape[0] // batch_size, epochs, batch_size))
model.fit_generator(generator, steps_per_epoch=train_df.shape[0] // batch_size, epochs=epochs, verbose=0,
callbacks=[TQDMNotebookCallback()])
```
Evaluate the network
```
results = model.evaluate_generator(validation_generator(val_df),val_df.shape[0]//validation_batch_size)
print("On the validation dataset the loss is {:.3f} and accuracy is {:.3f}".format(results[0], results[1]))
```
Compute the predictions for all validation data
```
predictions_val = model.predict_generator(validation_generator(val_df, predict=True),
steps = val_df.shape[0]//validation_batch_size)
```
Use the F1 score to compute the threshold for insincere questions
```
_max=0
threshold = 0
for thresh_test in np.arange(0.01, 0.51, 0.01):
thresh_test = np.round(thresh_test,2)
F1_score = metrics.f1_score(val_df.iloc[:(val_df.shape[0]//validation_batch_size)*validation_batch_size, 2],
(predictions_val>thresh_test).astype(int))
if F1_score>_max: _max,threshold = F1_score, thresh_test
print("F1 score at the threshold {} is {}".format(thresh_test,F1_score))
print("\nBest results for a threshold of {} with F1 score of {}".format(threshold, _max))
```
|
github_jupyter
|
import sys
import os
dir_path = os.path.dirname(os.path.realpath("./src"))
sys.path.insert(0, dir_path)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import tensorflow as tf
from tqdm._tqdm_notebook import tqdm_notebook
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import math
import re
import heapq
from sklearn import metrics
from keras_tqdm_mod.tqdm_notebook_callback import TQDMNotebookCallback
tqdm_notebook.pandas()
Input = tf.keras.layers.Input
Bidirectional = tf.keras.layers.Bidirectional
CuDNNLSTM = tf.compat.v1.keras.layers.CuDNNLSTM
Dense = tf.keras.layers.Dense
Dropout = tf.keras.layers.Dropout
GlobalMaxPool1D = tf.keras.layers.GlobalMaxPool1D
print("Tensorflow version : {}".format(tf.__version__))
gpus = tf.config.experimental.list_physical_devices('GPU')
print("Num GPUs available: ", len(gpus))
tf.config.experimental.set_memory_growth(gpus[0], True)
maxlen = 30 # maximum number of words to be used by question
batch_size = 200 # Training batch size
validation_batch_size = 200 # Validation batch size
epochs = 2 # number of epoch
model = tf.keras.Sequential()
model.add(Bidirectional(CuDNNLSTM(64, return_sequences = True), input_shape=(maxlen,300)))
model.add(Bidirectional(CuDNNLSTM(64, return_sequences = True)))
model.add(GlobalMaxPool1D())
model.add(Dense(16, activation="relu"))
model.add(Dropout(0.1))
model.add(Dense(1, activation="sigmoid"))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
# Source https://blog.keras.io/using-pre-trained-word-embeddings-in-a-keras-model.html
embeddings_index = {}
f = open("../dataset/embeddings/glove.840B.300d/glove.840B.300d.txt", "r", encoding="utf8")
for line in tqdm_notebook(f, desc="Reading Glove data"):
values = line.split(" ")
word = values[0]
coefs = np.asarray(values[1:], dtype="float32")
embeddings_index[word] = coefs
f.close()
print("Found %s word vectors." % len(embeddings_index))
data_df = pd.read_csv("../dataset/train.csv")
print("{} training data available".format(data_df.shape[0]))
def rm_double_spaces(sentence):
sentence = re.sub(r'\s+',' ',sentence) # Remove multiple space
if sentence[-1]==" ": # Remove useless space at the end of the sentence
sentence = sentence[:-1]
return sentence
data_df["question_text"] = data_df["question_text"].progress_apply(rm_double_spaces)
print("Check the result on the first sentence : {}".format(data_df["question_text"][0]))
train_df, val_df = train_test_split(data_df, test_size=0.1)
del data_df #no longer needed
percentage_in_train = train_df.groupby("target").count()["qid"][1]/train_df.shape[0]
percentage_in_val = val_df.groupby("target").count()["qid"][1]/val_df.shape[0]
print(f"Train dataset size: {train_df.shape[0]}, validation size: {val_df.shape[0]}, "
f"{math.floor(val_df.shape[0]*100/train_df.shape[0])}% of the training dataset size")
print("Percentage of positives in train = {:.2f} and in val {:.2f}".format(percentage_in_train,percentage_in_val))
#Convert to embeddings
def question_to_vect(question):
empty_emb = np.zeros(300)
words = question.split()[:maxlen]
embeds = [embeddings_index.get(x, empty_emb) for x in words] # Get the embedding if it exists otherwise empty_emb
embeds+= [empty_emb] * (maxlen - len(embeds)) # Fill the list of vectors with empty_emb if the question it shorter
return np.array(embeds, dtype=np.float16)
def training_generator(_train_df):
nb_batches = _train_df.shape[0]//batch_size
while True:
_train_df = _train_df.sample(frac=1) # shuffle the data
for i in range(nb_batches):
questions = _train_df.iloc[i*batch_size:(i+1)*batch_size, 1]
vect_questions = np.asarray([question_to_vect(question) for question in questions])
yield (np.asarray(vect_questions), np.asarray(_train_df["target"][i*batch_size:(i+1)*batch_size].values))
def validation_generator(_val_df, predict=False):
nb_batches = _val_df.shape[0]//validation_batch_size
while True:
for i in range(nb_batches):
questions = _val_df.iloc[i*batch_size:(i+1)*batch_size, 1]
vect_questions = np.asarray([question_to_vect(question) for question in questions])
if not predict:
yield (np.asarray(vect_questions),np.asarray(_val_df["target"][i*batch_size:(i+1)*batch_size].values))
else:
yield np.asarray(vect_questions)
generator = training_generator(train_df)
print("steps per epoch = {}, epochs = {}, batch_size = {}".format(train_df.shape[0] // batch_size, epochs, batch_size))
model.fit_generator(generator, steps_per_epoch=train_df.shape[0] // batch_size, epochs=epochs, verbose=0,
callbacks=[TQDMNotebookCallback()])
results = model.evaluate_generator(validation_generator(val_df),val_df.shape[0]//validation_batch_size)
print("On the validation dataset the loss is {:.3f} and accuracy is {:.3f}".format(results[0], results[1]))
predictions_val = model.predict_generator(validation_generator(val_df, predict=True),
steps = val_df.shape[0]//validation_batch_size)
_max=0
threshold = 0
for thresh_test in np.arange(0.01, 0.51, 0.01):
thresh_test = np.round(thresh_test,2)
F1_score = metrics.f1_score(val_df.iloc[:(val_df.shape[0]//validation_batch_size)*validation_batch_size, 2],
(predictions_val>thresh_test).astype(int))
if F1_score>_max: _max,threshold = F1_score, thresh_test
print("F1 score at the threshold {} is {}".format(thresh_test,F1_score))
print("\nBest results for a threshold of {} with F1 score of {}".format(threshold, _max))
| 0.470007 | 0.751146 |
# Example 3: Acceleration due to gravity
To illustrate how to use `emulator` and `calibrator` objects, we will give another example of a free-falling objects.
First, import the main libraries we use for this example:
```
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats as sps
from surmise.emulation import emulator
from surmise.calibration import calibrator
```
## Data: Falling ball
The data include 63 field observations at 21 heights, with three replicates at each height. Let's read the real data first, and then visualize:
```
# Read the data
ball = np.loadtxt('ball.csv', delimiter=',')
m = len(ball)
# height
xrep = np.reshape(ball[:, 0], (m, 1))
x = xrep[0:21]
# time
y = np.reshape(ball[:, 1], ((m, 1)))
# Observe the data
plt.scatter(xrep, y, color='red')
plt.xlabel("height (meters)")
plt.ylabel("time (seconds)")
plt.show()
```
## Computer model experiments
The time $t$ to drop the ball at a distance $h$ is $t = \sqrt{2h/g}$ for gravity $g$. Here, the gravity $g$ is our calibration parameter $\theta$ bacause we don't know the actual value of $g$ for the location the ball is dropped. We consider the computer implementation $f(x, \theta)$ of the mathematical model that maps $(h, g)$ to $(x, \theta)$ in $[0, 1]$ to compute $t$.
```
# Computer implementation of the mathematical model
def timedrop(x, theta, hr, gr):
'''
Parameters
----------
x : m x 1 array
Input settings.
theta : n x 1 array
Parameters to be calibrated.
hr : Array of size 2
min and max value of height.
gr : Array of size 2
min and max value of gravity.
Returns
-------
m x n array
m x n computer model evaluations.
'''
# Assume x and theta are within (0, 1)
min_g = min(gr)
range_g = max(gr) - min(gr)
min_h = min(hr)
range_h = max(hr) - min_h
f = np.zeros((theta.shape[0], x.shape[0]))
for k in range(0, theta.shape[0]):
g = range_g*theta[k] + min_g
h = range_h*x + min_h
f[k, :] = np.sqrt(2*h/g).reshape(x.shape[0])
return f.T
```
We run the `timedrop()` function at 21 different unique locations such that $\mathbf{x}$ is $m \times p$ input matrix with $m = 21$ and $p = 1$.
## Prior specification
For this example, we define a uniform prior for $g$ such that $g$ ~ $U(6, 15)$. We perform computer model simulations at $n = 100$ random settings of the calibration parameter $g$, and obtain $m \times n$ model output matrix $\mathbf{f}$.
```
# Define prior
class prior_balldrop:
""" This defines the class instance of priors provided to the method. """
def lpdf(theta):
return sps.uniform.logpdf(theta[:, 0], 0, 1).reshape((len(theta), 1))
def rnd(n):
return np.vstack((sps.uniform.rvs(0, 1, size=n)))
# Draw 100 random parameters from uniform prior
n = 100
theta = prior_balldrop.rnd(n)
theta_range = np.array([6, 15])
# Standardize
x_range = np.array([min(x), max(x)])
x_std = (x - min(x))/(max(x) - min(x))
xrep_std = (xrep - min(xrep))/(max(xrep) - min(xrep))
# Obtain computer model output
f = timedrop(x_std, theta, x_range, theta_range)
print(np.shape(theta))
print(np.shape(x_std))
print(np.shape(f))
```
## Model emulation
Let's build an emulator for computer model runs:
```
emulator_1 = emulator(x=x_std, theta=theta, f=f, method='PCGP')
```
### Comparison of emulation methodologies
One way to test the accuracy of the emulators is to create a hold-out simulation run, and compare the predicted values from the emulator and simulated values. To do this, let's first generate random draws of parameters, and evaluate the computer model at those values.
```
#Generate random reasonable theta values
n_test = 1000
theta_test = prior_balldrop.rnd(n_test)
print(np.shape(theta_test))
# Obtain computer model output
f_test = timedrop(x_std, theta_test, x_range, theta_range)
print(np.shape(f_test))
#Predict
p_1 = emulator_1.predict(x_std, theta_test)
p_1_mean, p_1_var = p_1.mean(), p_1.var()
print('SSE PCGP = ', np.round(np.sum((p_1_mean - f_test)**2), 2))
print('Rsq PCGP = ', 1 - np.round(np.sum(np.square(p_1_mean - f_test))/np.sum(np.square(f_test.T - np.mean(f_test, axis = 1))), 2))
```
## Model calibration
```
def plot_pred(x_std, xrep, y, cal, theta_range):
fig, axs = plt.subplots(1, 4, figsize=(14, 3))
cal_theta = cal.theta.rnd(1000)
cal_theta = cal_theta*(theta_range[1] - theta_range[0]) + theta_range[0]
axs[0].plot(cal_theta)
axs[1].boxplot(cal_theta)
axs[2].hist(cal_theta)
post = cal.predict(x_std)
rndm_m = post.rnd(s = 1000)
upper = np.percentile(rndm_m, 97.5, axis = 0)
lower = np.percentile(rndm_m, 2.5, axis = 0)
median = np.percentile(rndm_m, 50, axis = 0)
axs[3].plot(xrep[0:21].reshape(21), median, color = 'black')
axs[3].fill_between(xrep[0:21].reshape(21), lower, upper, color = 'grey')
axs[3].plot(xrep, y, 'ro', markersize = 5, color='red')
plt.show()
obsvar = np.maximum(0.2*y, 0.1)
# Fit a calibrator with emulator 1 via via method = 'directbayes' and 'sampler' = 'metropolis_hastings'
cal_1 = calibrator(emu=emulator_1,
y=y,
x=xrep_std,
thetaprior=prior_balldrop,
method='directbayes',
yvar=obsvar,
args={'theta0': np.array([[0.4]]),
'numsamp' : 1000,
'stepType' : 'normal',
'stepParam' : [0.3]})
plot_pred(x_std, xrep, y, cal_1, theta_range)
# Fit a calibrator via method = 'directbayeswoodbury' and 'sampler' : 'LMC'
cal_2 = calibrator(emu=emulator_1,
y=y,
x=xrep_std,
thetaprior=prior_balldrop,
method='directbayeswoodbury',
yvar=obsvar)
plot_pred(x_std, xrep, y, cal_2, theta_range)
# Fit a calibrator via method = 'directbayes' and 'sampler' : 'LMC'
cal_3 = calibrator(emu=emulator_1,
y=y,
x=xrep_std,
thetaprior=prior_balldrop,
method='directbayes',
yvar=obsvar,
args={'sampler': 'LMC',
'theta0': prior_balldrop.rnd(1000)})
plot_pred(x_std, xrep, y, cal_3, theta_range)
```
|
github_jupyter
|
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats as sps
from surmise.emulation import emulator
from surmise.calibration import calibrator
# Read the data
ball = np.loadtxt('ball.csv', delimiter=',')
m = len(ball)
# height
xrep = np.reshape(ball[:, 0], (m, 1))
x = xrep[0:21]
# time
y = np.reshape(ball[:, 1], ((m, 1)))
# Observe the data
plt.scatter(xrep, y, color='red')
plt.xlabel("height (meters)")
plt.ylabel("time (seconds)")
plt.show()
# Computer implementation of the mathematical model
def timedrop(x, theta, hr, gr):
'''
Parameters
----------
x : m x 1 array
Input settings.
theta : n x 1 array
Parameters to be calibrated.
hr : Array of size 2
min and max value of height.
gr : Array of size 2
min and max value of gravity.
Returns
-------
m x n array
m x n computer model evaluations.
'''
# Assume x and theta are within (0, 1)
min_g = min(gr)
range_g = max(gr) - min(gr)
min_h = min(hr)
range_h = max(hr) - min_h
f = np.zeros((theta.shape[0], x.shape[0]))
for k in range(0, theta.shape[0]):
g = range_g*theta[k] + min_g
h = range_h*x + min_h
f[k, :] = np.sqrt(2*h/g).reshape(x.shape[0])
return f.T
# Define prior
class prior_balldrop:
""" This defines the class instance of priors provided to the method. """
def lpdf(theta):
return sps.uniform.logpdf(theta[:, 0], 0, 1).reshape((len(theta), 1))
def rnd(n):
return np.vstack((sps.uniform.rvs(0, 1, size=n)))
# Draw 100 random parameters from uniform prior
n = 100
theta = prior_balldrop.rnd(n)
theta_range = np.array([6, 15])
# Standardize
x_range = np.array([min(x), max(x)])
x_std = (x - min(x))/(max(x) - min(x))
xrep_std = (xrep - min(xrep))/(max(xrep) - min(xrep))
# Obtain computer model output
f = timedrop(x_std, theta, x_range, theta_range)
print(np.shape(theta))
print(np.shape(x_std))
print(np.shape(f))
emulator_1 = emulator(x=x_std, theta=theta, f=f, method='PCGP')
#Generate random reasonable theta values
n_test = 1000
theta_test = prior_balldrop.rnd(n_test)
print(np.shape(theta_test))
# Obtain computer model output
f_test = timedrop(x_std, theta_test, x_range, theta_range)
print(np.shape(f_test))
#Predict
p_1 = emulator_1.predict(x_std, theta_test)
p_1_mean, p_1_var = p_1.mean(), p_1.var()
print('SSE PCGP = ', np.round(np.sum((p_1_mean - f_test)**2), 2))
print('Rsq PCGP = ', 1 - np.round(np.sum(np.square(p_1_mean - f_test))/np.sum(np.square(f_test.T - np.mean(f_test, axis = 1))), 2))
def plot_pred(x_std, xrep, y, cal, theta_range):
fig, axs = plt.subplots(1, 4, figsize=(14, 3))
cal_theta = cal.theta.rnd(1000)
cal_theta = cal_theta*(theta_range[1] - theta_range[0]) + theta_range[0]
axs[0].plot(cal_theta)
axs[1].boxplot(cal_theta)
axs[2].hist(cal_theta)
post = cal.predict(x_std)
rndm_m = post.rnd(s = 1000)
upper = np.percentile(rndm_m, 97.5, axis = 0)
lower = np.percentile(rndm_m, 2.5, axis = 0)
median = np.percentile(rndm_m, 50, axis = 0)
axs[3].plot(xrep[0:21].reshape(21), median, color = 'black')
axs[3].fill_between(xrep[0:21].reshape(21), lower, upper, color = 'grey')
axs[3].plot(xrep, y, 'ro', markersize = 5, color='red')
plt.show()
obsvar = np.maximum(0.2*y, 0.1)
# Fit a calibrator with emulator 1 via via method = 'directbayes' and 'sampler' = 'metropolis_hastings'
cal_1 = calibrator(emu=emulator_1,
y=y,
x=xrep_std,
thetaprior=prior_balldrop,
method='directbayes',
yvar=obsvar,
args={'theta0': np.array([[0.4]]),
'numsamp' : 1000,
'stepType' : 'normal',
'stepParam' : [0.3]})
plot_pred(x_std, xrep, y, cal_1, theta_range)
# Fit a calibrator via method = 'directbayeswoodbury' and 'sampler' : 'LMC'
cal_2 = calibrator(emu=emulator_1,
y=y,
x=xrep_std,
thetaprior=prior_balldrop,
method='directbayeswoodbury',
yvar=obsvar)
plot_pred(x_std, xrep, y, cal_2, theta_range)
# Fit a calibrator via method = 'directbayes' and 'sampler' : 'LMC'
cal_3 = calibrator(emu=emulator_1,
y=y,
x=xrep_std,
thetaprior=prior_balldrop,
method='directbayes',
yvar=obsvar,
args={'sampler': 'LMC',
'theta0': prior_balldrop.rnd(1000)})
plot_pred(x_std, xrep, y, cal_3, theta_range)
| 0.735642 | 0.982987 |
<div style="display:flex;">
<span style="margin-top:auto; margin-bottom:auto; margin-right:0.5em;"> Open notebook in binder: </span>
<span style="margin-top:auto; margin-bottom:auto;"><a href="https://mybinder.org/v2/gh/jeromerg/filoc/master?filepath=examples%2Fcovid_github%2Fexample_covid_github.ipynb"><img src="https://mybinder.org/badge_logo.svg" width="150"></a></span>
</div>
```
### Improve rendering with custom style
from IPython.display import HTML
with open( './custom.css', 'r' ) as f:
display(HTML(f"<style>{f.read()}</style>"));
import os
import sys
sys.path.insert(0,os.path.abspath('../'))
from filoc import filoc
from pandas import DataFrame
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import HTML
from collections import namedtuple
```
# Covid Data Analysis
This notebook loads the covid statistics from the [John Hopkins University Github repository](https://github.com/CSSEGISandData), cleans them and visualizes them dynamically.
It illustrates, how the <span class="filoc">filoc</span> framework enables to quickly read multiple files into a single DataFrame.
Remark: All along this notebook, the star ⭐ indicates places illustrating the use of to the filoc library.
## load raw data ⭐
The [John Hopkins University Github repository](https://github.com/CSSEGISandData) stores the daily historical data in separate CSV files. The <span class="filoc">filoc</span> framework can read multiple file in almost any folder structure. And it has a pre-configured CSV Backend, so that you can read the whole data set by simply instantiating the following <span class="filoc">filoc</span>:
```
# Declare the filoc instance
loc = filoc(
locpath='github://CSSEGISandData:COVID-19@/csse_covid_19_data/csse_covid_19_daily_reports/{date_str}.csv',
backend='csv',
encoding='utf-8-sig',
cache_locpath='~/temp/filoc_cache' # disable this line to deactivate caching and allow to reload the data from the repository
)
```
More about the <span class="filoc">filoc</span> parameters:
- `locpath`: (required) Defines the location path of the files to read. Here with use the [fsspec](https://filesystem-spec.readthedocs.io/en/latest/index.html) `github://` protocol to read the data directly from the github repository.
- `backend`: (default `'json'`) Defines the backup instance, that must be used to read the files. <span class="filoc">filoc</span> comes with four predefined backends: `'json'`, `'yaml'`, `'csv'`, `'pickle'`, but if you need, you can implement your own backend too.
- `encoding`: (optional, default `'utf-8'`) Backend file encoding passed to the backend. The files contain the [BOM](https://de.wikipedia.org/wiki/Byte_Order_Mark), so we need the `'utf-8-sig'` encoding
- `cache_locpath`: (optional, default `None`) Caches the backend loaded content to a local file, to speed up multiple reads
First, we check that the <span class="filoc">filoc</span> works as expected, and that the expected files are loaded:
```
# check, whether filoc works properly
loc.list_paths()[:10]
```
... everything is fine, we see that <span class="filoc">filoc</span> fetched the files information from the github repository.
Now we can download the data into a pandas DataFrame. This step can last a few minutes, depending of you internet connection:
```
df_raw = loc.read_contents()
print(len(df_raw))
```
The whole covid-19 data set has been loaded... now we can start the data cleaning!
## Explore and clean data
### Column Cleaning
First make a copy... so that you can revert the data to the original state during the exploration "try and error" work
```
df_all = df_raw.copy()
```
Check the downloaded columns
```
df_all.dtypes
```
The CSV Backend does not perform any conversion, so all columns are loaded as string (in pandas `object` column type).
After a "try and error" session, the column cleaning looks like as follow:
```
# Basic column cleaning
df_all['country'] = df_all['Country/Region'].combine_first(df_all['Country_Region']).fillna("").replace(["None"], "")
df_all['province'] = df_all['Province/State'].combine_first(df_all['Province_State']).fillna("").replace(["None"], "")
df_all['date'] = pd.to_datetime(df_all['date_str'])
df_all['confirmed'] = pd.to_numeric(df_all['Confirmed'], errors='coerce')
df_all['recovered'] = pd.to_numeric(df_all['Recovered'], errors='coerce')
df_all['deaths'] = pd.to_numeric(df_all['Deaths'], errors='coerce')
df_all = df_all[['country', 'province', 'date', 'confirmed', 'recovered', 'deaths']].copy()
df_all = df_all.reset_index(drop=True)
```
Now we can re-check the columns after cleaning:
```
df_all.dtypes
```
## Country / Province Cleaning
The downloaded data are historical raw data and contains some variations and errors, that we need to fix. Again, it is an iterative "try and error" work.
First we need to get an efficient visualization, to get as much useful information in as few lines as possible.
Here we aggregate the data by country/province:
```
# build group-by (including helper function to rebuild the group-by sets)
df_by_country = None
df_by_country_province = None
def make_groups():
global df_by_country, df_by_country_province
df_by_country = df_all.groupby(by=['country'])
df_by_country_province = df_all.groupby(by=['country', 'province'])
make_groups()
len(df_by_country_province)
```
and we can display the aggregations:
```
df = df_by_country_province.agg([('Min' , 'min'), ('Max', 'max')])
with pd.option_context("display.max_rows", 10): # change the count of rows, to display more!
display(df)
```
If you look into the long list, you will notice, some naming issues. As well as date range issues. For a few countries, the data granularity increased at some point in the past from country level data to province level data. We need to compute ourself the country level aggregations from this point on.
So after a few "try and error" iterations, we get the following data cleaning:
```
# Special fixes based on date min-max explorations
# Rename 'Hong Kong' country name to 'Hong Kong SAR'
df_all.loc[ df_all['country'] == 'Hong Kong' , 'country'] = 'Hong Kong SAR'
# Rename 'Mainland China' country name to 'China'
df_all.loc[ df_all['country'] == 'Mainland China', 'country'] = 'China'
# Delete Country/Province: France/France
df_all = df_all.drop(df_all.index[(df_all['country'] == 'France') & (df_all['province'] == 'France')])
# For 'United Kingdom' Country, rename provinces 'UK' and '' to 'United Kingdom'
df_all.loc[ (df_all['country'] == 'United Kingdom') & df_all['province'].isin(['UK', '']), 'province'] = 'United Kingdom'
# For 'Germany' Country, rename provinces 'Bavaria' to 'Bayern'
df_all.loc[ (df_all['country'] == 'Germany' ) & df_all['province'].isin(['Bavaria']), 'province'] = 'Bayern'
# For 'France' Country, unnamed province '' is not the aggregate of province subdivisions, but the Mainland
df_all.loc[ (df_all['country'] == 'France' ) & df_all['province'].isin(['']) , 'province'] = 'Mainland'
# Rename all empty province '' by '<all>' (the aggregation at country level):
df_all.loc[df_all['province'] == '', 'province'] = '<all>'
# refresh group-bys
make_groups()
# Fix country data, were data at country level were replaced by data at province level -> we now need to aggregate ourself the province data
Fix = namedtuple("Fix", ["country", "last_aggr_date"])
fixes = [
Fix('Australia' , '2020-01-01'),
Fix('Brazil' , '2020-05-19'),
Fix('Canada' , '2010-01-01'),
Fix('Chile' , '2020-05-19'),
Fix('China' , '2010-01-01'),
Fix('Colombia' , '2020-05-27'),
Fix('Germany' , '2020-05-13'),
Fix('India' , '2020-06-09'),
Fix('Italy' , '2020-05-13'),
Fix('Japan' , '2020-05-27'),
Fix('China' , '2010-01-01'),
Fix('Mexico' , '2020-05-19'),
Fix('Netherlands', '2020-07-16'),
Fix('Pakistan' , '2020-06-09'),
Fix('Peru' , '2020-05-27'),
Fix('Russia' , '2020-05-31'),
Fix('Spain' , '2020-05-13'),
Fix('Sweden' , '2020-06-04'),
Fix('Ukraine' , '2020-05-31'),
]
for fix in fixes:
print(f'Fixing {fix.country}')
aggr_data_to_delete = df_all.index[ (df_all['country'] == fix.country) & (df_all['province'] == '<all>') & (df_all['date'] > fix.last_aggr_date) ]
data_to_aggr = df_all [ (df_all['country'] == fix.country) & (df_all['province'] != '<all>') & (df_all['date'] > fix.last_aggr_date) ]
df_all = df_all.drop(aggr_data_to_delete)
aggr_data_to_add = data_to_aggr.groupby('date').sum()
aggr_data_to_add['country'] = fix.country
aggr_data_to_add['province'] = '<all>'
aggr_data_to_add['date'] = aggr_data_to_add.index
df_all = pd.concat([df_all, aggr_data_to_add], ignore_index=True)
make_groups()
```
# Save data by country / Province ⭐
Just to illustrate an interesting feature of the <span class="filoc">filoc</span> framework: you can easily save the cleaned data into an alternative file structure suiting your needs. Here for example, we save the historical data by country / province.
First we instantiate a <span class="filoc">filoc</span> with the expected new path structure:
```
loc_save = filoc(
locpath='~/temp/covid/{country}/{province}/whole_history.csv',
backend='csv',
singleton=False,
writable=True
)
```
Then just save the data:
```
loc_save.write_contents(df_all)
```
You can check the fist of files created by <span class="filoc">filoc</span>:
```
! find ~/temp/covid -type f 2>/dev/null | head -n 10 # change 10 to increase the amount of printed lines
```
## Display Data with matplotlib
### Plot history for a single country
```
country_province_filter = ('France' , 'Mainland',)
indicators = [ 'confirmed', 'recovered', 'deaths' ]
plt.rcParams['figure.figsize'] = [15, 8]
ax = plt.gca()
df_cp = df_by_country_province.get_group(country_province_filter)
df_cp.plot(x='date', y=indicators, ax=ax);
```
### Plot the 'confirmed' indicator for multiple countries
```
indicator = 'confirmed'
country_province_filter = [
('France' , 'Mainland'),
('Germany', '<all>'),
('Spain' , '<all>'),
]
plt.rcParams['figure.figsize'] = [15, 8]
ax = plt.gca()
ax.set_title(indicator)
for cp in country_province_filter:
label = f'{cp[0]} {cp[1]}'
df_cp = df_by_country_province.get_group(cp)
df_cp.plot(x='date', y=indicator, ax=ax, label=label);
```
## Display a dash/plotly Dashboard
```
from jupyter_dash import JupyterDash
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# ENABLE THIS WHEN RUNNING IN JupyterHub or Binder
#JupyterDash.infer_jupyter_proxy_config()
# helper function
def to_options(values, unique=True, sort=True):
""" helper dropdown options builder """
if unique: values = values.unique()
if sort : values = sorted(values)
return [{'label': i, 'value': i} for i in values]
# Build the app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = JupyterDash('Covid 19 Stats', external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.Div([
html.Div([
'''Choose a country:''',
dcc.Dropdown('country', to_options(df_all.country)),
], style={'width': '49%', 'display': 'inline-block'}),
html.Div([
'''Choose a province:''',
dcc.Dropdown('province', disabled=True),
], style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
], style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px'}),
html.Div([
dcc.Graph('country-chart')
], style={'width': '98%', 'display': 'inline-block', 'padding': '0 20'}),
])
@app.callback([
Output('province', 'disabled'),
Output('province', 'options'),
], Input('country', 'value'))
def update_province(country):
if country != '':
return False, to_options(df_all.province[df_all['country'] == country])
else:
return True, None
@app.callback(Output('country-chart', 'figure'), [Input('country', 'value'), Input('province', 'value')])
def update_graph(country, province):
df = df_all[ (df_all['country'] == country) & (df_all['province'] == province) ]
indicator = 'deaths'
return {
'data': [ {
'x' : df['date'],
'y' : df['confirmed'],
'mode' : 'lines',
'name' : 'confirmed'
}, {
'x' : df['date'],
'y' : df['deaths'],
'mode' : 'lines',
'name' : 'deaths'
}],
'layout': {
'height': 500,
'margin': {'l': 30, 'b': 30, 'r': 10, 't': 10},
'annotations': [{
'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom',
'xref': 'paper', 'yref': 'paper', 'showarrow': False,
'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)',
'text': f'{country} ({province}) {indicator}'
}],
'yaxis': {'type': 'linear' },
'xaxis': {'showgrid': False}
}
}
```
Finally run the Dash app!
Remark: in the html static preview, the following cell remains empty, as its content requires the dash server to run
```
# run the app
#app.run_server(mode="jupyterlab")
app.run_server(mode="inline")
```
```python
# run the app
#app.run_server(mode="jupyterlab")
app.run_server(mode="inline")
```

|
github_jupyter
|
### Improve rendering with custom style
from IPython.display import HTML
with open( './custom.css', 'r' ) as f:
display(HTML(f"<style>{f.read()}</style>"));
import os
import sys
sys.path.insert(0,os.path.abspath('../'))
from filoc import filoc
from pandas import DataFrame
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import HTML
from collections import namedtuple
# Declare the filoc instance
loc = filoc(
locpath='github://CSSEGISandData:COVID-19@/csse_covid_19_data/csse_covid_19_daily_reports/{date_str}.csv',
backend='csv',
encoding='utf-8-sig',
cache_locpath='~/temp/filoc_cache' # disable this line to deactivate caching and allow to reload the data from the repository
)
# check, whether filoc works properly
loc.list_paths()[:10]
df_raw = loc.read_contents()
print(len(df_raw))
df_all = df_raw.copy()
df_all.dtypes
# Basic column cleaning
df_all['country'] = df_all['Country/Region'].combine_first(df_all['Country_Region']).fillna("").replace(["None"], "")
df_all['province'] = df_all['Province/State'].combine_first(df_all['Province_State']).fillna("").replace(["None"], "")
df_all['date'] = pd.to_datetime(df_all['date_str'])
df_all['confirmed'] = pd.to_numeric(df_all['Confirmed'], errors='coerce')
df_all['recovered'] = pd.to_numeric(df_all['Recovered'], errors='coerce')
df_all['deaths'] = pd.to_numeric(df_all['Deaths'], errors='coerce')
df_all = df_all[['country', 'province', 'date', 'confirmed', 'recovered', 'deaths']].copy()
df_all = df_all.reset_index(drop=True)
df_all.dtypes
# build group-by (including helper function to rebuild the group-by sets)
df_by_country = None
df_by_country_province = None
def make_groups():
global df_by_country, df_by_country_province
df_by_country = df_all.groupby(by=['country'])
df_by_country_province = df_all.groupby(by=['country', 'province'])
make_groups()
len(df_by_country_province)
df = df_by_country_province.agg([('Min' , 'min'), ('Max', 'max')])
with pd.option_context("display.max_rows", 10): # change the count of rows, to display more!
display(df)
# Special fixes based on date min-max explorations
# Rename 'Hong Kong' country name to 'Hong Kong SAR'
df_all.loc[ df_all['country'] == 'Hong Kong' , 'country'] = 'Hong Kong SAR'
# Rename 'Mainland China' country name to 'China'
df_all.loc[ df_all['country'] == 'Mainland China', 'country'] = 'China'
# Delete Country/Province: France/France
df_all = df_all.drop(df_all.index[(df_all['country'] == 'France') & (df_all['province'] == 'France')])
# For 'United Kingdom' Country, rename provinces 'UK' and '' to 'United Kingdom'
df_all.loc[ (df_all['country'] == 'United Kingdom') & df_all['province'].isin(['UK', '']), 'province'] = 'United Kingdom'
# For 'Germany' Country, rename provinces 'Bavaria' to 'Bayern'
df_all.loc[ (df_all['country'] == 'Germany' ) & df_all['province'].isin(['Bavaria']), 'province'] = 'Bayern'
# For 'France' Country, unnamed province '' is not the aggregate of province subdivisions, but the Mainland
df_all.loc[ (df_all['country'] == 'France' ) & df_all['province'].isin(['']) , 'province'] = 'Mainland'
# Rename all empty province '' by '<all>' (the aggregation at country level):
df_all.loc[df_all['province'] == '', 'province'] = '<all>'
# refresh group-bys
make_groups()
# Fix country data, were data at country level were replaced by data at province level -> we now need to aggregate ourself the province data
Fix = namedtuple("Fix", ["country", "last_aggr_date"])
fixes = [
Fix('Australia' , '2020-01-01'),
Fix('Brazil' , '2020-05-19'),
Fix('Canada' , '2010-01-01'),
Fix('Chile' , '2020-05-19'),
Fix('China' , '2010-01-01'),
Fix('Colombia' , '2020-05-27'),
Fix('Germany' , '2020-05-13'),
Fix('India' , '2020-06-09'),
Fix('Italy' , '2020-05-13'),
Fix('Japan' , '2020-05-27'),
Fix('China' , '2010-01-01'),
Fix('Mexico' , '2020-05-19'),
Fix('Netherlands', '2020-07-16'),
Fix('Pakistan' , '2020-06-09'),
Fix('Peru' , '2020-05-27'),
Fix('Russia' , '2020-05-31'),
Fix('Spain' , '2020-05-13'),
Fix('Sweden' , '2020-06-04'),
Fix('Ukraine' , '2020-05-31'),
]
for fix in fixes:
print(f'Fixing {fix.country}')
aggr_data_to_delete = df_all.index[ (df_all['country'] == fix.country) & (df_all['province'] == '<all>') & (df_all['date'] > fix.last_aggr_date) ]
data_to_aggr = df_all [ (df_all['country'] == fix.country) & (df_all['province'] != '<all>') & (df_all['date'] > fix.last_aggr_date) ]
df_all = df_all.drop(aggr_data_to_delete)
aggr_data_to_add = data_to_aggr.groupby('date').sum()
aggr_data_to_add['country'] = fix.country
aggr_data_to_add['province'] = '<all>'
aggr_data_to_add['date'] = aggr_data_to_add.index
df_all = pd.concat([df_all, aggr_data_to_add], ignore_index=True)
make_groups()
loc_save = filoc(
locpath='~/temp/covid/{country}/{province}/whole_history.csv',
backend='csv',
singleton=False,
writable=True
)
loc_save.write_contents(df_all)
! find ~/temp/covid -type f 2>/dev/null | head -n 10 # change 10 to increase the amount of printed lines
country_province_filter = ('France' , 'Mainland',)
indicators = [ 'confirmed', 'recovered', 'deaths' ]
plt.rcParams['figure.figsize'] = [15, 8]
ax = plt.gca()
df_cp = df_by_country_province.get_group(country_province_filter)
df_cp.plot(x='date', y=indicators, ax=ax);
indicator = 'confirmed'
country_province_filter = [
('France' , 'Mainland'),
('Germany', '<all>'),
('Spain' , '<all>'),
]
plt.rcParams['figure.figsize'] = [15, 8]
ax = plt.gca()
ax.set_title(indicator)
for cp in country_province_filter:
label = f'{cp[0]} {cp[1]}'
df_cp = df_by_country_province.get_group(cp)
df_cp.plot(x='date', y=indicator, ax=ax, label=label);
from jupyter_dash import JupyterDash
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# ENABLE THIS WHEN RUNNING IN JupyterHub or Binder
#JupyterDash.infer_jupyter_proxy_config()
# helper function
def to_options(values, unique=True, sort=True):
""" helper dropdown options builder """
if unique: values = values.unique()
if sort : values = sorted(values)
return [{'label': i, 'value': i} for i in values]
# Build the app
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = JupyterDash('Covid 19 Stats', external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.Div([
html.Div([
'''Choose a country:''',
dcc.Dropdown('country', to_options(df_all.country)),
], style={'width': '49%', 'display': 'inline-block'}),
html.Div([
'''Choose a province:''',
dcc.Dropdown('province', disabled=True),
], style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
], style={
'borderBottom': 'thin lightgrey solid',
'backgroundColor': 'rgb(250, 250, 250)',
'padding': '10px 5px'}),
html.Div([
dcc.Graph('country-chart')
], style={'width': '98%', 'display': 'inline-block', 'padding': '0 20'}),
])
@app.callback([
Output('province', 'disabled'),
Output('province', 'options'),
], Input('country', 'value'))
def update_province(country):
if country != '':
return False, to_options(df_all.province[df_all['country'] == country])
else:
return True, None
@app.callback(Output('country-chart', 'figure'), [Input('country', 'value'), Input('province', 'value')])
def update_graph(country, province):
df = df_all[ (df_all['country'] == country) & (df_all['province'] == province) ]
indicator = 'deaths'
return {
'data': [ {
'x' : df['date'],
'y' : df['confirmed'],
'mode' : 'lines',
'name' : 'confirmed'
}, {
'x' : df['date'],
'y' : df['deaths'],
'mode' : 'lines',
'name' : 'deaths'
}],
'layout': {
'height': 500,
'margin': {'l': 30, 'b': 30, 'r': 10, 't': 10},
'annotations': [{
'x': 0, 'y': 0.85, 'xanchor': 'left', 'yanchor': 'bottom',
'xref': 'paper', 'yref': 'paper', 'showarrow': False,
'align': 'left', 'bgcolor': 'rgba(255, 255, 255, 0.5)',
'text': f'{country} ({province}) {indicator}'
}],
'yaxis': {'type': 'linear' },
'xaxis': {'showgrid': False}
}
}
# run the app
#app.run_server(mode="jupyterlab")
app.run_server(mode="inline")
# run the app
#app.run_server(mode="jupyterlab")
app.run_server(mode="inline")
| 0.26693 | 0.914977 |
# A Simple Autoencoder
We'll start off by building a simple autoencoder to compress the MNIST dataset. With autoencoders, we pass input data through an encoder that makes a compressed representation of the input. Then, this representation is passed through a decoder to reconstruct the input data. Generally the encoder and decoder will be built with neural networks, then trained on example data.

In this notebook, we'll be build a simple network architecture for the encoder and decoder. Let's get started by importing our libraries and getting the dataset.
```
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
```
Below I'm plotting an example image from the MNIST dataset. These are 28x28 grayscale images of handwritten digits.
```
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
```
We'll train an autoencoder with these images by flattening them into 784 length vectors. The images from this dataset are already normalized such that the values are between 0 and 1. Let's start by building basically the simplest autoencoder with a **single ReLU hidden layer**. This layer will be used as the compressed representation. Then, the encoder is the input layer and the hidden layer. The decoder is the hidden layer and the output layer. Since the images are normalized between 0 and 1, we need to use a **sigmoid activation on the output layer** to get values matching the input.

> **Exercise:** Build the graph for the autoencoder in the cell below. The input images will be flattened into 784 length vectors. The targets are the same as the inputs. And there should be one hidden layer with a ReLU activation and an output layer with a sigmoid activation. The loss should be calculated with the cross-entropy loss, there is a convenient TensorFlow function for this `tf.nn.sigmoid_cross_entropy_with_logits` ([documentation](https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits)). You should note that `tf.nn.sigmoid_cross_entropy_with_logits` takes the logits, but to get the reconstructed images you'll need to pass the logits through the sigmoid function.
```
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
image_size = mnist.train.images.shape[1]
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, image_size))
targets_ = tf.placeholder(tf.float32, (None, image_size))
# Output of hidden layer
encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)
# Output layer logits
logits = tf.layers.dense(encoded, image_size)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits)
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
```
## Training
```
# Create the session
sess = tf.Session()
```
Here I'll write a bit of code to train the network. I'm not too interested in validation here, so I'll just monitor the training loss.
Calling `mnist.train.next_batch(batch_size)` will return a tuple of `(images, labels)`. We're not concerned with the labels here, we just need the images. Otherwise this is pretty straightfoward training with TensorFlow. We initialize the variables with `sess.run(tf.global_variables_initializer())`. Then, run the optimizer and get the loss with `batch_cost, _ = sess.run([cost, opt], feed_dict=feed)`.
```
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
```
## Checking out the results
Below I've plotted some of the test images along with their reconstructions. For the most part these look pretty good except for some blurriness in some parts.
```
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
```
## Up Next
We're dealing with images here, so we can (usually) get better performance using convolution layers. So, next we'll build a better autoencoder with convolutional layers.
In practice, autoencoders aren't actually better at compression compared to typical methods like JPEGs and MP3s. But, they are being used for noise reduction, which you'll also build.
|
github_jupyter
|
%matplotlib inline
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', validation_size=0)
img = mnist.train.images[2]
plt.imshow(img.reshape((28, 28)), cmap='Greys_r')
# Size of the encoding layer (the hidden layer)
encoding_dim = 32 # feel free to change this value
image_size = mnist.train.images.shape[1]
learning_rate = 0.001
inputs_ = tf.placeholder(tf.float32, (None, image_size))
targets_ = tf.placeholder(tf.float32, (None, image_size))
# Output of hidden layer
encoded = tf.layers.dense(inputs_, encoding_dim, activation=tf.nn.relu)
# Output layer logits
logits = tf.layers.dense(encoded, image_size)
# Sigmoid output from logits
decoded = tf.nn.sigmoid(logits)
# Sigmoid cross-entropy loss
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=targets_)
# Mean of the loss
cost = tf.reduce_mean(loss)
# Adam optimizer
opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Create the session
sess = tf.Session()
epochs = 20
batch_size = 200
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
feed = {inputs_: batch[0], targets_: batch[0]}
batch_cost, _ = sess.run([cost, opt], feed_dict=feed)
print("Epoch: {}/{}...".format(e+1, epochs),
"Training loss: {:.4f}".format(batch_cost))
fig, axes = plt.subplots(nrows=2, ncols=10, sharex=True, sharey=True, figsize=(20,4))
in_imgs = mnist.test.images[:10]
reconstructed, compressed = sess.run([decoded, encoded], feed_dict={inputs_: in_imgs})
for images, row in zip([in_imgs, reconstructed], axes):
for img, ax in zip(images, row):
ax.imshow(img.reshape((28, 28)), cmap='Greys_r')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
fig.tight_layout(pad=0.1)
sess.close()
| 0.821689 | 0.993307 |
<a href="https://www.skills.network/"><img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBM-DL0120ENedX/labs/Template%20for%20Instructional%20Hands-on%20Labs/images/IDSNlogo.png" width="400px" align="center"></a>
<h2>LOGISTIC REGRESSION WITH TENSORFLOW</h2>
<h3>Objective for this Notebook<h3>
<h5> 1. What is different between Linear and Logistic Regression?</h5>
<h5> 2. Utilizing Logistic Regression in TensorFlow. </h5>
<h5> 3. Training the model </h5>
## Table of Contents
Logistic Regression is one of most important techniques in data science. It is usually used to solve the classic classification problem.
<div class="alert alert-block alert-info" style="margin-top: 20px">
<font size = 3><strong>This lesson covers the following concepts of Logistics Regression:</strong></font>
<br>
<h2>Table of Contents</h2>
<ol>
<li><a href="#ref1">Linear Regression vs Logistic Regression</a></li>
<li><a href="#ref2">Utilizing Logistic Regression in TensorFlow</a></li>
<li><a href="#ref3">Training</a></li>
</ol>
</div>
<p></p>
<br>
<hr>
<a id="ref1"></a>
<h2>What is different between Linear and Logistic Regression?</h2>
While Linear Regression is suited for estimating continuous values (e.g. estimating house price), it is not the best tool for predicting the class in which an observed data point belongs. In order to provide estimate for classification, we need some sort of guidance on what would be the <b>most probable class</b> for that data point. For this, we use <b>Logistic Regression</b>.
<div class="alert alert-success alertsuccess" style="margin-top: 20px">
<font size="3"><strong>Recall linear regression:</strong></font>
<br>
<br>
Linear regression finds a function that relates a continuous dependent variable, <i>y</i>, to some predictors (independent variables <i>x1</i>, <i>x2</i>, etc.). Simple linear regression assumes a function of the form:
<br><br>
$$
y = w0 + w1 \times x1 + w2 \times x2 + \cdots
$$
<br>
and finds the values of <i>w0</i>, <i>w1</i>, <i>w2</i>, etc. The term <i>w0</i> is the "intercept" or "constant term" (it's shown as <i>b</i> in the formula below):
<br><br>
$$
Y = W X + b
$$
<p></p>
</div>
Logistic Regression is a variation of Linear Regression, useful when the observed dependent variable, <i>y</i>, is categorical. It produces a formula that predicts the probability of the class label as a function of the independent variables.
Despite the name logistic <i>regression</i>, it is actually a <b>probabilistic classification</b> model. Logistic regression fits a special s-shaped curve by taking the linear regression and transforming the numeric estimate into a probability with the following function:
$$
ProbabilityOfaClass = \\theta(y) = \\frac{e^y}{1 + e^y} = exp(y) / (1 + exp(y)) = p
$$
which produces p-values between 0 (as y approaches minus infinity $-\\infty$) and 1 (as y approaches plus infinity $+\\infty$). This now becomes a special kind of non-linear regression.
In this equation, <i>y</i> is the regression result (the sum of the variables weighted by the coefficients), <code>exp</code> is the exponential function and $\\theta(y)$ is the <a href="http://en.wikipedia.org/wiki/Logistic_function">logistic function</a>, also called logistic curve. It is a common "S" shape (sigmoid curve), and was first developed for modeling population growth.
You might also have seen this function before, in another configuration:
$$
ProbabilityOfaClass = \\theta(y) = \\frac{1}{1+e^{-y}}
$$
So, briefly, Logistic Regression passes the input through the logistic/sigmoid function but then treats the result as a probability:
<img src="https://ibm.box.com/shared/static/kgv9alcghmjcv97op4d6onkyxevk23b1.png" width="400" align="center">
* * *
<a id="ref2"></a>
<h2>Utilizing Logistic Regression in TensorFlow</h2>
We begin by installing TensorFlow version 2.2.0 and its required prerequistes.
```
!pip install grpcio==1.24.3
!pip install tensorflow==2.2.0
```
**Restart kernel for latest version of TensorFlow to be activated**
For us to utilize Logistic Regression in TensorFlow, we first need to import the required libraries. To do so, you can run the code cell below.
```
import tensorflow as tf
import pandas as pd
import numpy as np
import time
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
if not tf.__version__ == '2.2.0':
print(tf.__version__)
raise ValueError('please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)')
```
IMPORTANT! => Please restart the kernel by clicking on "Kernel"->"Restart and Clear Outout" and wait until all output disapears. Then your changes are beeing picked up
Next, we will load the dataset we are going to use. In this case, we are utilizing the <code>iris</code> dataset, which is inbuilt -- so there's no need to do any preprocessing and we can jump right into manipulating it. We separate the dataset into <i>xs</i> and <i>ys</i>, and then into training <i>xs</i> and <i>ys</i> and testing <i>xs</i> and <i>ys</i>, (pseudo)randomly.
<h3>Understanding the Data</h3>
<h4><code>Iris Dataset</code>:</h4>
This dataset was introduced by British Statistician and Biologist Ronald Fisher, it consists of 50 samples from each of three species of Iris (Iris setosa, Iris virginica and Iris versicolor). In total it has 150 records under five attributes - petal length, petal width, sepal length, sepal width and species. <a href="https://archive.ics.uci.edu/ml/datasets/iris">Dataset source</a>
Attributes
Independent Variable
<ul>
<li>petal length</li>
<li>petal width</li>
<li>sepal length</li>
<li>sepal width</li>
</ul>
Dependent Variable
<ul>
<li>Species
<ul>
<li>Iris setosa</li>
<li>Iris virginica</li>
<li>Iris versicolor</li>
</ul>
</li>
</ul>
<br>
```
iris = load_iris()
iris_X, iris_y = iris.data[:-1,:], iris.target[:-1]
iris_y= pd.get_dummies(iris_y).values
trainX, testX, trainY, testY = train_test_split(iris_X, iris_y, test_size=0.33, random_state=42)
```
Now we define x and y. These variables will hold our iris data (both the features and label matrices) We also need to give them shapes which correspond to the shape of our data.
```
# numFeatures is the number of features in our input data.
# In the iris dataset, this number is '4'.
numFeatures = trainX.shape[1]
print('numFeatures is : ', numFeatures )
# numLabels is the number of classes our data points can be in.
# In the iris dataset, this number is '3'.
numLabels = trainY.shape[1]
print('numLabels is : ', numLabels )
X = tf.Variable( np.identity(numFeatures), tf.TensorShape(numFeatures),dtype='float32') # Iris has 4 features, so X is a tensor to hold our data.
yGold = tf.Variable(np.array([1,1,1]),shape=tf.TensorShape(numLabels),dtype='float32') # This will be our correct answers matrix for 3 classes.
```
<h3>Set model weights and bias</h3>
Much like Linear Regression, we need a shared variable weight matrix for Logistic Regression. We initialize both <code>W</code> and <code>b</code> as tensors full of zeros. Since we are going to learn <code>W</code> and <code>b</code>, their initial value does not matter too much. These variables are the objects which define the structure of our regression model, and we can save them after they have been trained so we can reuse them later.
We define two TensorFlow variables as our parameters. These variables will hold the weights and biases of our logistic regression and they will be continually updated during training.
Notice that <code>W</code> has a shape of [4, 3] because we want to multiply the 4-dimensional input vectors by it to produce 3-dimensional vectors of evidence for the difference classes. <code>b</code> has a shape of [3] so we can add it to the output. TensorFlow variables need to be initialized with values, e.g. with zeros.
```
W = tf.Variable(tf.zeros([4, 3])) # 4-dimensional input and 3 classes
b = tf.Variable(tf.zeros([3])) # 3-dimensional output [0,0,1],[0,1,0],[1,0,0]
#Randomly sample from a normal distribution with standard deviation .01
weights = tf.Variable(tf.random.normal([numFeatures,numLabels],
mean=0.,
stddev=0.01,
name="weights"),dtype='float32')
bias = tf.Variable(tf.random.normal([1,numLabels],
mean=0.,
stddev=0.01,
name="bias"))
```
<h3>Logistic Regression model</h3>
We now define our operations in order to properly run the Logistic Regression. Logistic regression is typically thought of as a single equation:
$$
ŷ =sigmoid(WX+b)
$$
However, for the sake of clarity, we can have it broken into its three main components:
- a weight times features matrix multiplication operation,
- a summation of the weighted features and a bias term,
- and finally the application of a sigmoid function.
As such, you will find these components defined as three separate operations below.
```
# Three-component breakdown of the Logistic Regression equation.
# Note that these feed into each other.
def logistic_regression(x):
apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
return activation_OP
```
As we have seen before, the function we are going to use is the <i>logistic function</i> $(\\frac{1}{1+e^{-Wx}})$, which is fed the input data after applying weights and bias. In TensorFlow, this function is implemented as the <code>nn.sigmoid</code> function. Effectively, this fits the weighted input with bias into a 0-100 percent curve, which is the probability function we want.
<hr>
<a id="ref3"></a>
<h2>Training</h2>
The learning algorithm is how we search for the best weight vector (${\\bf w}$). This search is an optimization problem looking for the hypothesis that optimizes an error/cost measure.
<b>What tell us our model is bad?</b>
The Cost or Loss of the model, so what we want is to minimize that.
<h3>Cost function</h3>
Before defining our cost function, we need to define how long we are going to train and how should we define the learning rate.
```
# Number of Epochs in our training
numEpochs = 700
# Defining our learning rate iterations (decay)
learningRate = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=0.0008,
decay_steps=trainX.shape[0],
decay_rate= 0.95,
staircase=True)
```
<b>What is the cost function in our model?</b>
The cost function we are going to utilize is the Squared Mean Error loss function.
<b>How to minimize the cost function?</b>
We can't use <b>least-squares linear regression</b> here, so we will use <a href="http://en.wikipedia.org/wiki/Gradient_descent">gradient descent</a> instead. Specifically, we will use batch gradient descent which calculates the gradient from all data points in the data set.
```
#Defining our cost function - Squared Mean Error
loss_object = tf.keras.losses.MeanSquaredLogarithmicError()
optimizer = tf.keras.optimizers.SGD(learningRate)
```
We also want some additional operations to keep track of our model's efficiency over time. We can do this like so:
```
# Accuracy metric.
def accuracy(y_pred, y_true):
# Predicted class is the index of the highest score in prediction vector (i.e. argmax).
print('y_pred : ',y_pred)
print('y_true : ',y_true)
correct_prediction = tf.equal(tf.argmax(y_pred, -1), tf.argmax(y_true, -1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
```
we first wrap computation inside a GradientTape for automatic differentiation. Then we compute gradients and update W and b.
```
# Optimization process.
def run_optimization(x, y):
with tf.GradientTape() as g:
pred = logistic_regression(x)
loss = loss_object(pred, y)
gradients = g.gradient(loss, [weights, bias])
optimizer.apply_gradients(zip(gradients, [weights, bias]))
```
Now we move on to actually running our operations. We will start with the operations involved in the prediction phase (i.e. the logistic regression itself).
Now we can define and run the actual training loop, like this:
```
# Initialize reporting variables
display_step = 10
epoch_values = []
accuracy_values = []
loss_values = []
loss = 0
diff = 1
# Training epochs
for i in range(numEpochs):
if i > 1 and diff < .000001:
print("change in loss %g; convergence."%diff)
break
else:
# Run training step
run_optimization(X, yGold)
# Report occasional stats
if i % display_step == 0:
# Add epoch to epoch_values
epoch_values.append(i)
pred = logistic_regression(X)
newLoss = loss_object(pred, yGold)
# Add loss to live graphing variable
loss_values.append(newLoss)
# Generate accuracy stats on test data
acc = accuracy(pred, yGold)
accuracy_values.append(acc)
# Re-assign values for variables
diff = abs(newLoss - loss)
loss = newLoss
#generate print statements
print("step %d, training accuracy %g, loss %g, change in loss %g"%(i, acc, newLoss, diff))
# How well do we perform on held-out test data?
print("final accuracy on test set: %s" %str(acc))
```
<b>Why don't we plot the loss to see how it behaves?</b>
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.plot([np.mean(loss_values[i-50:i]) for i in range(len(loss_values))])
plt.show()
```
Try changing the parameters such as the length of training, and maybe some operations to see how the model behaves. Does it take much longer? How is the performance?
<hr>
## Want to learn more?
Running deep learning programs usually needs a high performance platform. **PowerAI** speeds up deep learning and AI. Built on IBM’s Power Systems, **PowerAI** is a scalable software platform that accelerates deep learning and AI with blazing performance for individual users or enterprises. The **PowerAI** platform supports popular machine learning libraries and dependencies including TensorFlow, Caffe, Torch, and Theano. You can use [PowerAI on IMB Cloud](https://cocl.us/ML0120EN_PAI).
Also, you can use **Watson Studio** to run these notebooks faster with bigger datasets.**Watson Studio** is IBM’s leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, **Watson Studio** enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of **Watson Studio** users today with a free account at [Watson Studio](https://cocl.us/ML0120EN_DSX).This is the end of this lesson. Thank you for reading this notebook, and good luck on your studies.
### Thanks for completing this lesson!
This is the end of **Logistic Regression with TensorFlow** notebook. Hopefully, now you have a deeper understanding of Logistic Regression and how its structure and flow work. Thank you for reading this notebook and good luck on your studies.
Created by: <a href="https://linkedin.com/in/romeo-kienzler-089b4557"> Romeo Kienzler </a>, <a href="https://br.linkedin.com/in/walter-gomes-de-amorim-junior-624726121">Saeed Aghabozorgi</a> , <a href="https://br.linkedin.com/in/walter-gomes-de-amorim-junior-624726121">Walter Gomes de Amorim Junior</a> , Victor Barros Costa
Updated to TF 2.X by <a href="https://www.linkedin.com/in/samaya-madhavan"> Samaya Madhavan </a>
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | ---------- | ----------------------------------------------------------- |
| 2020-09-21 | 2.0 | Srishti | Migrated Lab to Markdown and added to course repo in GitLab |
<hr>
## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
<hr>
Copyright © 2018 [Cognitive Class](https://cocl.us/DX0108EN_CC). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DL0120EN-SkillsNetwork-20629446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ).
|
github_jupyter
|
!pip install grpcio==1.24.3
!pip install tensorflow==2.2.0
import tensorflow as tf
import pandas as pd
import numpy as np
import time
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
if not tf.__version__ == '2.2.0':
print(tf.__version__)
raise ValueError('please upgrade to TensorFlow 2.2.0, or restart your Kernel (Kernel->Restart & Clear Output)')
iris = load_iris()
iris_X, iris_y = iris.data[:-1,:], iris.target[:-1]
iris_y= pd.get_dummies(iris_y).values
trainX, testX, trainY, testY = train_test_split(iris_X, iris_y, test_size=0.33, random_state=42)
# numFeatures is the number of features in our input data.
# In the iris dataset, this number is '4'.
numFeatures = trainX.shape[1]
print('numFeatures is : ', numFeatures )
# numLabels is the number of classes our data points can be in.
# In the iris dataset, this number is '3'.
numLabels = trainY.shape[1]
print('numLabels is : ', numLabels )
X = tf.Variable( np.identity(numFeatures), tf.TensorShape(numFeatures),dtype='float32') # Iris has 4 features, so X is a tensor to hold our data.
yGold = tf.Variable(np.array([1,1,1]),shape=tf.TensorShape(numLabels),dtype='float32') # This will be our correct answers matrix for 3 classes.
W = tf.Variable(tf.zeros([4, 3])) # 4-dimensional input and 3 classes
b = tf.Variable(tf.zeros([3])) # 3-dimensional output [0,0,1],[0,1,0],[1,0,0]
#Randomly sample from a normal distribution with standard deviation .01
weights = tf.Variable(tf.random.normal([numFeatures,numLabels],
mean=0.,
stddev=0.01,
name="weights"),dtype='float32')
bias = tf.Variable(tf.random.normal([1,numLabels],
mean=0.,
stddev=0.01,
name="bias"))
# Three-component breakdown of the Logistic Regression equation.
# Note that these feed into each other.
def logistic_regression(x):
apply_weights_OP = tf.matmul(X, weights, name="apply_weights")
add_bias_OP = tf.add(apply_weights_OP, bias, name="add_bias")
activation_OP = tf.nn.sigmoid(add_bias_OP, name="activation")
return activation_OP
# Number of Epochs in our training
numEpochs = 700
# Defining our learning rate iterations (decay)
learningRate = tf.keras.optimizers.schedules.ExponentialDecay(initial_learning_rate=0.0008,
decay_steps=trainX.shape[0],
decay_rate= 0.95,
staircase=True)
#Defining our cost function - Squared Mean Error
loss_object = tf.keras.losses.MeanSquaredLogarithmicError()
optimizer = tf.keras.optimizers.SGD(learningRate)
# Accuracy metric.
def accuracy(y_pred, y_true):
# Predicted class is the index of the highest score in prediction vector (i.e. argmax).
print('y_pred : ',y_pred)
print('y_true : ',y_true)
correct_prediction = tf.equal(tf.argmax(y_pred, -1), tf.argmax(y_true, -1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Optimization process.
def run_optimization(x, y):
with tf.GradientTape() as g:
pred = logistic_regression(x)
loss = loss_object(pred, y)
gradients = g.gradient(loss, [weights, bias])
optimizer.apply_gradients(zip(gradients, [weights, bias]))
# Initialize reporting variables
display_step = 10
epoch_values = []
accuracy_values = []
loss_values = []
loss = 0
diff = 1
# Training epochs
for i in range(numEpochs):
if i > 1 and diff < .000001:
print("change in loss %g; convergence."%diff)
break
else:
# Run training step
run_optimization(X, yGold)
# Report occasional stats
if i % display_step == 0:
# Add epoch to epoch_values
epoch_values.append(i)
pred = logistic_regression(X)
newLoss = loss_object(pred, yGold)
# Add loss to live graphing variable
loss_values.append(newLoss)
# Generate accuracy stats on test data
acc = accuracy(pred, yGold)
accuracy_values.append(acc)
# Re-assign values for variables
diff = abs(newLoss - loss)
loss = newLoss
#generate print statements
print("step %d, training accuracy %g, loss %g, change in loss %g"%(i, acc, newLoss, diff))
# How well do we perform on held-out test data?
print("final accuracy on test set: %s" %str(acc))
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.plot([np.mean(loss_values[i-50:i]) for i in range(len(loss_values))])
plt.show()
| 0.696991 | 0.987228 |
### Page Replacement Algorithms:
There are different page replacement algorithms and yet there seems to be some drawbacks in them, making no page replacement algorithm ideal. To be one step closer to achieving the ideal algorithm it is vital to have a maximum cache hit ratio and strong consistent across different workload.
```
from tqdm import tqdm_notebook as tqdm
import numpy as np
from collections import deque, defaultdict
```
### Optimal Page replacement algorithm:
The OPT algorithm is a theoretical concept which evicts the block from the cache based on future access predictions of the blocks present in the cache, this algorithm is hard to achieve as future predictions on the access can not be determined.
```
#finding the block which is avaialble in future
def findfar(blk, frm):
farthest = 0
far_ind = {}
nomore = []
for v in frm:
if v not in blk:
far_ind[v] = -1
elif v in blk:
far_ind[v] = blk.index(v)
srtdlst = sorted(far_ind.items(), key=lambda item:item[1], reverse= True)
for (k,v) in srtdlst:
if v == -1:
nomore.append(k)
if nomore:
return nomore[0]
else:
return srtdlst[0][0]
#Offline optimal page replacemnt algorithm
def OPT(blk,size):
frame = set()
hits = 0
misses = 0
isfull = False
for i,b in enumerate(blk):
if b not in frame:
misses += 1
if len(frame) == size:
isfull = True
if not isfull:
frame.add(b)
else:
farthest = findfar(blk[i+1:], frame)
frame.discard(farthest)
frame.add(b)
else:
hits += 1
return hits/(hits+misses)
```
### LRU(Least Recently Used) Page replacement algorithm
The simplest algorithm used to manage the cache data is the LRU.
The LRU algorithm ensures that when a block present in the cache is accessed the block is moved to the top of the stack. When block not present in the cache is accessed, the newly accessed block takes the place of the least recent block in the cache.
```
def LRU(blocktrace, frame):
cache = set()
recency = deque()
hit, miss = 0, 0
for block in tqdm(blocktrace, leave=False):
if block in cache:
recency.remove(block)
recency.append(block)
hit += 1
elif len(cache) < frame:
cache.add(block)
recency.append(block)
miss += 1
else:
cache.remove(recency[0])
recency.popleft()
cache.add(block)
recency.append(block)
miss += 1
hitrate = hit / (hit + miss)
return hitrate
```
### LFU(Least frequently used) Page replacement algortihm:
In contrast to the LRU algorithm, this algorithm computes the frequency of access for every block. When the block present in the cache is accessed the block’s frequency will increase, when a block not present in the cache is accessed then the newly accessed block replaces the least frequency block in the cache.
```
def LFU(blocktrace, frame):
cache = set()
cache_frequency = defaultdict(int)
frequency = defaultdict(int)
hit, miss = 0, 0
for block in tqdm(blocktrace):
frequency[block] += 1
if block in cache:
hit += 1
cache_frequency[block] += 1
elif len(cache) < frame:
cache.add(block)
cache_frequency[block] += 1
miss += 1
else:
e, f = min(cache_frequency.items(), key=lambda a: a[1])
cache_frequency.pop(e)
cache.remove(e)
cache.add(block)
cache_frequency[block] = frequency[block]
miss += 1
hitrate = hit / ( hit + miss )
return hitrate
```
### DAS (Dynamic And Stable) Page Replacement Algorithm:
This algorithm has achieved consistent hit ratio by using the fundamentals of page replacement algorithm which are: low recency and high frequency of the blocks. To achieve this stability we have to integrate the design principles of LRU and LFU in the cache and the blocks have been placed blocks according to the hit on the block, the above mentioned is done using simple computations.
```
def DAS(block, frame):
#set frames of two cache
LRUframe=int(0.1 * frame)
LFUframe= frame - LRUframe
#initial declare of hit and miss
hit,miss,LRU_hit, LFU_hit=0,0,0,0
#inital set declare for LRU and LFU
LRUcache= deque()
LFUcache= deque()
LFU_cache_frequency = defaultdict(int)
frequency = defaultdict(int)
LRU_cache_frequency = defaultdict(int)
for block in tqdm(block):
frequency[block] += 1
if block in LRUcache or block in LFUcache:
hit+=1
#checked
if block in LFUcache:
LFU_hit +=1
LFU_cache_frequency[block] += 1
if block in LRUcache:
LRU_hit +=1
LRU_cache_frequency[block] += 1
#checked
if len(LFUcache) < LFUframe:
LRUcache.remove(block)
LFUcache.append(block)
LFU_cache_frequency[block] = LRU_cache_frequency[block]
del LRU_cache_frequency[block]
else:
#block, frequency
e, LFU_fre = min(LFU_cache_frequency.items(), key=lambda a: a[1])
#print(e, LFU_fre)
LRUcache.remove(block)
LFUcache.append(block)
LFU_cache_frequency[block] = LRU_cache_frequency[block]
del LRU_cache_frequency[block]
LFUcache.remove(e)
LRUcache.append(e)
LRU_cache_frequency[e] = LFU_fre
del LFU_cache_frequency[e]
elif (len(LRUcache) + len(LFUcache)) < frame:
miss += 1
#first i will fill LRU block
if len(LRUcache) <= LRUframe-1:
LRUcache.append(block)
LRU_cache_frequency[block] += 1
#print("LRUcache", LRUcache)
else:
#now if recent clock
if len(LFUcache) <= LFUframe-1:
LFU_cache_frequency[block] += 1
LFUcache.append(block)
else:
#adding miss block in cache
miss += 1
#print(len(LFUcache), len(LRUcache))
old = LRUcache.popleft()
LRUcache.append(block)
del LRU_cache_frequency[old]
LRU_cache_frequency[block] = frequency[block]
hitrate = hit /(hit+ miss)
return hitrate
import pandas as pd
DAS_hit =[]
LRU_hit = []
LFU_hit = []
OPT_hit = []
size = [30,60,100,200,300,350,355,360,370,400,500,750,1000,1250,1500,1750,2000,2500,2700]
for i in range(len(size)):
f = open("ps.trc", 'r')
DAS_hit.append(round(DAS(f,size[i])*100 ,2))
dict = {'x': size, 'y': DAS_hit} # dictionary of lists
DAS_df = pd.DataFrame(dict) #print(dict)
DAS_df.to_csv('ps_DAS.csv') # saving the dataframe
for i in range(len(size)):
f = open("ps.trc", 'r')
LRU_hit.append(round(LRU(f,size[i])*100 ,2))
dict = {'x': size, 'y': LRU_hit} # dictionary of lists
LRU_df = pd.DataFrame(dict) #print(dict)
LRU_df.to_csv('ps_LRU.csv') # saving the dataframe
for i in range(len(size)):
f = open("ps.trc", 'r')
LFU_hit.append(round(LFU(f,size[i])*100 ,2))
dict = {'x': size, 'y': LFU_hit} # dictionary of lists
LFU_df = pd.DataFrame(dict) #print(dict)
LFU_df.to_csv('ps_LFU.csv') # saving the dataframe
for i in range(len(size)):
#f = open("test.txt", 'r')
lineList = [line.rstrip('\n') for line in open("ps.trc")]
OPT_hit.append(round(OPT(lineList,size[i])*100 ,2))
#print(hit)
# dictionary of lists
dict = {'x': size, 'y': OPT_hit}
OPT_df = pd.DataFrame(dict)
# saving the dataframe
OPT_df.to_csv('ps_OPT.csv')
import pandas as pd
import matplotlib.pyplot as plt
DAS =pd.read_csv('ps_DAS.csv')
LRU =pd.read_csv('ps_LRU.csv')
LFU =pd.read_csv('ps_LFU.csv')
OPT =pd.read_csv('ps_OPT.csv')
plt.plot(DAS['x'], DAS['y'], label="DAS")
plt.plot(LRU['x'], LRU['y'], label="LRU")
plt.plot(LFU['x'], LFU['y'], label="LFU")
plt.plot(OPT['x'], OPT['y'], label="OPT")
plt.title("Postgres")
plt.xlabel('Cache Size (# of blocks)')
plt.ylabel('Hit Ratio (%)')
plt.legend()
```
### Experiment 5 with traces "postgres" is a trace of join queries among four relations in a relational database system from the University of California at Berkeley.
LRU presents stair-step curves to increase the hit rates for those workloads. LRU is not effective until all the blocks in its locality scope are brought into the cache. For example, only after the cache can hold 355 blocks does the LRU hit rate of postgres have a sharp increase from 16.3% to 55.5%.
|
github_jupyter
|
from tqdm import tqdm_notebook as tqdm
import numpy as np
from collections import deque, defaultdict
#finding the block which is avaialble in future
def findfar(blk, frm):
farthest = 0
far_ind = {}
nomore = []
for v in frm:
if v not in blk:
far_ind[v] = -1
elif v in blk:
far_ind[v] = blk.index(v)
srtdlst = sorted(far_ind.items(), key=lambda item:item[1], reverse= True)
for (k,v) in srtdlst:
if v == -1:
nomore.append(k)
if nomore:
return nomore[0]
else:
return srtdlst[0][0]
#Offline optimal page replacemnt algorithm
def OPT(blk,size):
frame = set()
hits = 0
misses = 0
isfull = False
for i,b in enumerate(blk):
if b not in frame:
misses += 1
if len(frame) == size:
isfull = True
if not isfull:
frame.add(b)
else:
farthest = findfar(blk[i+1:], frame)
frame.discard(farthest)
frame.add(b)
else:
hits += 1
return hits/(hits+misses)
def LRU(blocktrace, frame):
cache = set()
recency = deque()
hit, miss = 0, 0
for block in tqdm(blocktrace, leave=False):
if block in cache:
recency.remove(block)
recency.append(block)
hit += 1
elif len(cache) < frame:
cache.add(block)
recency.append(block)
miss += 1
else:
cache.remove(recency[0])
recency.popleft()
cache.add(block)
recency.append(block)
miss += 1
hitrate = hit / (hit + miss)
return hitrate
def LFU(blocktrace, frame):
cache = set()
cache_frequency = defaultdict(int)
frequency = defaultdict(int)
hit, miss = 0, 0
for block in tqdm(blocktrace):
frequency[block] += 1
if block in cache:
hit += 1
cache_frequency[block] += 1
elif len(cache) < frame:
cache.add(block)
cache_frequency[block] += 1
miss += 1
else:
e, f = min(cache_frequency.items(), key=lambda a: a[1])
cache_frequency.pop(e)
cache.remove(e)
cache.add(block)
cache_frequency[block] = frequency[block]
miss += 1
hitrate = hit / ( hit + miss )
return hitrate
def DAS(block, frame):
#set frames of two cache
LRUframe=int(0.1 * frame)
LFUframe= frame - LRUframe
#initial declare of hit and miss
hit,miss,LRU_hit, LFU_hit=0,0,0,0
#inital set declare for LRU and LFU
LRUcache= deque()
LFUcache= deque()
LFU_cache_frequency = defaultdict(int)
frequency = defaultdict(int)
LRU_cache_frequency = defaultdict(int)
for block in tqdm(block):
frequency[block] += 1
if block in LRUcache or block in LFUcache:
hit+=1
#checked
if block in LFUcache:
LFU_hit +=1
LFU_cache_frequency[block] += 1
if block in LRUcache:
LRU_hit +=1
LRU_cache_frequency[block] += 1
#checked
if len(LFUcache) < LFUframe:
LRUcache.remove(block)
LFUcache.append(block)
LFU_cache_frequency[block] = LRU_cache_frequency[block]
del LRU_cache_frequency[block]
else:
#block, frequency
e, LFU_fre = min(LFU_cache_frequency.items(), key=lambda a: a[1])
#print(e, LFU_fre)
LRUcache.remove(block)
LFUcache.append(block)
LFU_cache_frequency[block] = LRU_cache_frequency[block]
del LRU_cache_frequency[block]
LFUcache.remove(e)
LRUcache.append(e)
LRU_cache_frequency[e] = LFU_fre
del LFU_cache_frequency[e]
elif (len(LRUcache) + len(LFUcache)) < frame:
miss += 1
#first i will fill LRU block
if len(LRUcache) <= LRUframe-1:
LRUcache.append(block)
LRU_cache_frequency[block] += 1
#print("LRUcache", LRUcache)
else:
#now if recent clock
if len(LFUcache) <= LFUframe-1:
LFU_cache_frequency[block] += 1
LFUcache.append(block)
else:
#adding miss block in cache
miss += 1
#print(len(LFUcache), len(LRUcache))
old = LRUcache.popleft()
LRUcache.append(block)
del LRU_cache_frequency[old]
LRU_cache_frequency[block] = frequency[block]
hitrate = hit /(hit+ miss)
return hitrate
import pandas as pd
DAS_hit =[]
LRU_hit = []
LFU_hit = []
OPT_hit = []
size = [30,60,100,200,300,350,355,360,370,400,500,750,1000,1250,1500,1750,2000,2500,2700]
for i in range(len(size)):
f = open("ps.trc", 'r')
DAS_hit.append(round(DAS(f,size[i])*100 ,2))
dict = {'x': size, 'y': DAS_hit} # dictionary of lists
DAS_df = pd.DataFrame(dict) #print(dict)
DAS_df.to_csv('ps_DAS.csv') # saving the dataframe
for i in range(len(size)):
f = open("ps.trc", 'r')
LRU_hit.append(round(LRU(f,size[i])*100 ,2))
dict = {'x': size, 'y': LRU_hit} # dictionary of lists
LRU_df = pd.DataFrame(dict) #print(dict)
LRU_df.to_csv('ps_LRU.csv') # saving the dataframe
for i in range(len(size)):
f = open("ps.trc", 'r')
LFU_hit.append(round(LFU(f,size[i])*100 ,2))
dict = {'x': size, 'y': LFU_hit} # dictionary of lists
LFU_df = pd.DataFrame(dict) #print(dict)
LFU_df.to_csv('ps_LFU.csv') # saving the dataframe
for i in range(len(size)):
#f = open("test.txt", 'r')
lineList = [line.rstrip('\n') for line in open("ps.trc")]
OPT_hit.append(round(OPT(lineList,size[i])*100 ,2))
#print(hit)
# dictionary of lists
dict = {'x': size, 'y': OPT_hit}
OPT_df = pd.DataFrame(dict)
# saving the dataframe
OPT_df.to_csv('ps_OPT.csv')
import pandas as pd
import matplotlib.pyplot as plt
DAS =pd.read_csv('ps_DAS.csv')
LRU =pd.read_csv('ps_LRU.csv')
LFU =pd.read_csv('ps_LFU.csv')
OPT =pd.read_csv('ps_OPT.csv')
plt.plot(DAS['x'], DAS['y'], label="DAS")
plt.plot(LRU['x'], LRU['y'], label="LRU")
plt.plot(LFU['x'], LFU['y'], label="LFU")
plt.plot(OPT['x'], OPT['y'], label="OPT")
plt.title("Postgres")
plt.xlabel('Cache Size (# of blocks)')
plt.ylabel('Hit Ratio (%)')
plt.legend()
| 0.086915 | 0.843702 |
```
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid",palette=sns.color_palette("Set2"))
data=pd.read_csv(r"C:\Users\dhmph\Desktop\XBUS-506-01.Visual_Analytics-master\DataScientist.csv")
data = data[['Job Title', 'Job Description', 'Salary Estimate', 'Rating', 'Company Name', 'Location', 'Industry','Sector']]
data
data['Job Title'].value_counts().head(10)
data['Industry'].value_counts().head(10)
data['Sector'].value_counts().head(10)
data['Location'].nunique()
data['Location'].value_counts().head(10)
df_location=data['Location'].value_counts().head(10)
df_location
austin_data = data.loc[data['Location'] == 'Austin, TX']
chicago_data = data.loc[data['Location'] == 'Chicago, IL']
import nltk
import re
from string import digits
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
ps = PorterStemmer()
#making all text in notes column lowercase
data["Job Description"]=data["Job Description"].str.lower()
data
tokenizer = RegexpTokenizer(r'\w+')
data["Job Description"] = data["Job Description"].apply(lambda x: tokenizer.tokenize(x.lower()))
data["Job Description"]
lemmatizer = WordNetLemmatizer()
def word_lemmatizer(text):
lem_text = [lemmatizer.lemmatize(i) for i in text]
return lem_text
data["Job Description"] = data["Job Description"].apply(lambda x: word_lemmatizer(x))
data
def remove_stopwords(text):
words = [w for w in text if w not in stopwords.words('english')]
return words
data["Job Description"] = data["Job Description"].apply(lambda x: remove_stopwords(x))
data
austin_data = data.loc[data['Location'] == 'Austin, TX']
chicago_data = data.loc[data['Location'] == 'Chicago, IL']
austin_data
def common_data_science_words(text):
common_data_science_words = ['data', 'science', 'work', 'experience', 'business','team', 'information', 'employee', 'technology', 'year',
'analysis',
'application',
'system',
'service',
'ability',
'job',
'opportunity',
'development',
'solution',
'design',
'product',
'customer',
'required',
'engineering',
'skill',
'including',
'analytics',
'project',
'requirement',
'learning',
'tool',
'technical',
'support',
'software',
'company',
'position',
'employment']
words =[w for w in text if w not in common_data_science_words]
return words
austin_data["Job Description"] = austin_data["Job Description"].apply(lambda x: common_data_science_words(x))
chicago_data["Job Description"] = chicago_data["Job Description"].apply(lambda x: common_data_science_words(x))
```
# topic modeling
```
import gensim
from gensim.utils import simple_preprocess
import gensim.corpora as corpora
from pprint import pprint
data_words_austin = list(austin_data['Job Description'])
```
### Austin LDA
```
# Create Dictionary
id2word = corpora.Dictionary(data_words_austin)
# Create Corpus
texts = data_words_austin
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# LDA model training
# number of topics
num_topics = 10
# Build LDA model
lda_model_austin = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=num_topics)
# Print the Keyword in the 20 topics
pprint(lda_model_austin.print_topics())
doc_lda = lda_model_austin[corpus]
```
### Chicago LDA
```
# Create Dictionary
id2word = corpora.Dictionary(data_words_chicago)
# Create Corpus
texts = data_words_chicago
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# LDA model training
# number of topics
num_topics = 10
# Build LDA model
lda_model_chicago = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=num_topics)
# Print the Keyword in the 20 topics
pprint(lda_model_chicago.print_topics())
doc_lda = lda_model_chicago[corpus]
```
# Load in Topic Modeling visualization
```
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
# Visualize the topics
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim_models.prepare(lda_model_austin, corpus, id2word)
vis
vis = pyLDAvis.gensim_models.prepare(lda_model_chicago, corpus, id2word)
vis
```
|
github_jupyter
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid",palette=sns.color_palette("Set2"))
data=pd.read_csv(r"C:\Users\dhmph\Desktop\XBUS-506-01.Visual_Analytics-master\DataScientist.csv")
data = data[['Job Title', 'Job Description', 'Salary Estimate', 'Rating', 'Company Name', 'Location', 'Industry','Sector']]
data
data['Job Title'].value_counts().head(10)
data['Industry'].value_counts().head(10)
data['Sector'].value_counts().head(10)
data['Location'].nunique()
data['Location'].value_counts().head(10)
df_location=data['Location'].value_counts().head(10)
df_location
austin_data = data.loc[data['Location'] == 'Austin, TX']
chicago_data = data.loc[data['Location'] == 'Chicago, IL']
import nltk
import re
from string import digits
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.stem import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
ps = PorterStemmer()
#making all text in notes column lowercase
data["Job Description"]=data["Job Description"].str.lower()
data
tokenizer = RegexpTokenizer(r'\w+')
data["Job Description"] = data["Job Description"].apply(lambda x: tokenizer.tokenize(x.lower()))
data["Job Description"]
lemmatizer = WordNetLemmatizer()
def word_lemmatizer(text):
lem_text = [lemmatizer.lemmatize(i) for i in text]
return lem_text
data["Job Description"] = data["Job Description"].apply(lambda x: word_lemmatizer(x))
data
def remove_stopwords(text):
words = [w for w in text if w not in stopwords.words('english')]
return words
data["Job Description"] = data["Job Description"].apply(lambda x: remove_stopwords(x))
data
austin_data = data.loc[data['Location'] == 'Austin, TX']
chicago_data = data.loc[data['Location'] == 'Chicago, IL']
austin_data
def common_data_science_words(text):
common_data_science_words = ['data', 'science', 'work', 'experience', 'business','team', 'information', 'employee', 'technology', 'year',
'analysis',
'application',
'system',
'service',
'ability',
'job',
'opportunity',
'development',
'solution',
'design',
'product',
'customer',
'required',
'engineering',
'skill',
'including',
'analytics',
'project',
'requirement',
'learning',
'tool',
'technical',
'support',
'software',
'company',
'position',
'employment']
words =[w for w in text if w not in common_data_science_words]
return words
austin_data["Job Description"] = austin_data["Job Description"].apply(lambda x: common_data_science_words(x))
chicago_data["Job Description"] = chicago_data["Job Description"].apply(lambda x: common_data_science_words(x))
import gensim
from gensim.utils import simple_preprocess
import gensim.corpora as corpora
from pprint import pprint
data_words_austin = list(austin_data['Job Description'])
# Create Dictionary
id2word = corpora.Dictionary(data_words_austin)
# Create Corpus
texts = data_words_austin
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# LDA model training
# number of topics
num_topics = 10
# Build LDA model
lda_model_austin = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=num_topics)
# Print the Keyword in the 20 topics
pprint(lda_model_austin.print_topics())
doc_lda = lda_model_austin[corpus]
# Create Dictionary
id2word = corpora.Dictionary(data_words_chicago)
# Create Corpus
texts = data_words_chicago
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# LDA model training
# number of topics
num_topics = 10
# Build LDA model
lda_model_chicago = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=num_topics)
# Print the Keyword in the 20 topics
pprint(lda_model_chicago.print_topics())
doc_lda = lda_model_chicago[corpus]
import pyLDAvis
import pyLDAvis.gensim_models as gensimvis
# Visualize the topics
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim_models.prepare(lda_model_austin, corpus, id2word)
vis
vis = pyLDAvis.gensim_models.prepare(lda_model_chicago, corpus, id2word)
vis
| 0.415373 | 0.430028 |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="fig/cover-small.jpg">
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*
*The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*
<!--NAVIGATION-->
< [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) | [Contents](Index.ipynb) | [Basic Python Semantics: Variables and Objects](03-Semantics-Variables.ipynb) >
# A Quick Tour of Python Language Syntax
Python was originally developed as a teaching language, but its ease of use and clean syntax have led it to be embraced by beginners and experts alike.
The cleanliness of Python's syntax has led some to call it "executable pseudocode", and indeed my own experience has been that it is often much easier to read and understand a Python script than to read a similar script written in, say, C.
Here we'll begin to discuss the main features of Python's syntax.
Syntax refers to the structure of the language (i.e., what constitutes a correctly-formed program).
For the time being, we'll not focus on the semantics – the meaning of the words and symbols within the syntax – but will return to this at a later point.
Consider the following code example:
```
# set the midpoint
midpoint = 5
# make two empty lists
lower = []; upper = []
# split the numbers into lower and upper
for i in range(10):
if (i < midpoint):
lower.append(i)
else:
upper.append(i)
print("lower:", lower)
print("upper:", upper)
```
This script is a bit silly, but it compactly illustrates several of the important aspects of Python syntax.
Let's walk through it and discuss some of the syntactical features of Python
## Comments Are Marked by ``#``
The script starts with a comment:
``` python
# set the midpoint
```
Comments in Python are indicated by a pound sign (``#``), and anything on the line following the pound sign is ignored by the interpreter.
This means, for example, that you can have stand-alone comments like the one just shown, as well as inline comments that follow a statement. For example:
``` python
x += 2 # shorthand for x = x + 2
```
Python does not have any syntax for multi-line comments, such as the ``/* ... */`` syntax used in C and C++, though multi-line strings are often used as a replacement for multi-line comments (more on this in [String Manipulation and Regular Expressions](14-Strings-and-Regular-Expressions.ipynb)).
## End-of-Line Terminates a Statement
The next line in the script is
``` python
midpoint = 5
```
This is an assignment operation, where we've created a variable named ``midpoint`` and assigned it the value ``5``.
Notice that the end of this statement is simply marked by the end of the line.
This is in contrast to languages like C and C++, where every statement must end with a semicolon (``;``).
In Python, if you'd like a statement to continue to the next line, it is possible to use the "``\``" marker to indicate this:
```
x = 1 + 2 + 3 + 4 +\
5 + 6 + 7 + 8
```
It is also possible to continue expressions on the next line within parentheses, without using the "``\``" marker:
```
x = (1 + 2 + 3 + 4 +
5 + 6 + 7 + 8)
```
Most Python style guides recommend the second version of line continuation (within parentheses) to the first (use of the "``\``" marker).
## Semicolon Can Optionally Terminate a Statement
Sometimes it can be useful to put multiple statements on a single line.
The next portion of the script is
``` python
lower = []; upper = []
```
This shows the example of how the semicolon (``;``) familiar in C can be used optionally in Python to put two statements on a single line.
Functionally, this is entirely equivalent to writing
``` python
lower = []
upper = []
```
Using a semicolon to put multiple statements on a single line is generally discouraged by most Python style guides, though occasionally it proves convenient.
## Indentation: Whitespace Matters!
Next, we get to the main block of code:
``` Python
for i in range(10):
if i < midpoint:
lower.append(i)
else:
upper.append(i)
```
This is a compound control-flow statement including a loop and a conditional – we'll look at these types of statements in a moment.
For now, consider that this demonstrates what is perhaps the most controversial feature of Python's syntax: whitespace is meaningful!
In programming languages, a *block* of code is a set of statements that should be treated as a unit.
In C, for example, code blocks are denoted by curly braces:
``` C
// C code
for(int i=0; i<100; i++)
{
// curly braces indicate code block
total += i;
}
```
In Python, code blocks are denoted by *indentation*:
``` python
for i in range(100):
# indentation indicates code block
total += i
```
In Python, indented code blocks are always preceded by a colon (``:``) on the previous line.
The use of indentation helps to enforce the uniform, readable style that many find appealing in Python code.
But it might be confusing to the uninitiated; for example, the following two snippets will produce different results:
```python
>>> if x < 4: >>> if x < 4:
... y = x * 2 ... y = x * 2
... print(x) ... print(x)
```
In the snippet on the left, ``print(x)`` is in the indented block, and will be executed only if ``x`` is less than ``4``.
In the snippet on the right ``print(x)`` is outside the block, and will be executed regardless of the value of ``x``!
Python's use of meaningful whitespace often is surprising to programmers who are accustomed to other languages, but in practice it can lead to much more consistent and readable code than languages that do not enforce indentation of code blocks.
If you find Python's use of whitespace disagreeable, I'd encourage you to give it a try: as I did, you may find that you come to appreciate it.
Finally, you should be aware that the *amount* of whitespace used for indenting code blocks is up to the user, as long as it is consistent throughout the script.
By convention, most style guides recommend to indent code blocks by four spaces, and that is the convention we will follow in this report.
Note that many text editors like Emacs and Vim contain Python modes that do four-space indentation automatically.
## Whitespace *Within* Lines Does Not Matter
While the mantra of *meaningful whitespace* holds true for whitespace *before* lines (which indicate a code block), white space *within* lines of Python code does not matter.
For example, all three of these expressions are equivalent:
```
x=1+2
x = 1 + 2
x = 1 + 2
```
Abusing this flexibility can lead to issues with code readibility – in fact, abusing white space is often one of the primary means of intentionally obfuscating code (which some people do for sport).
Using whitespace effectively can lead to much more readable code,
especially in cases where operators follow each other – compare the following two expressions for exponentiating by a negative number:
``` python
x=10**-2
```
to
``` python
x = 10 ** -2
```
I find the second version with spaces much more easily readable at a single glance.
Most Python style guides recommend using a single space around binary operators, and no space around unary operators.
We'll discuss Python's operators further in [Basic Python Semantics: Operators](04-Semantics-Operators.ipynb).
## Parentheses Are for Grouping or Calling
In the previous code snippet, we see two uses of parentheses.
First, they can be used in the typical way to group statements or mathematical operations:
```
2 * (3 + 4)
```
They can also be used to indicate that a *function* is being called.
In the next snippet, the ``print()`` function is used to display the contents of a variable (see the sidebar).
The function call is indicated by a pair of opening and closing parentheses, with the *arguments* to the function contained within:
```
print('first value:', 1)
print('second value:', 2)
```
Some functions can be called with no arguments at all, in which case the opening and closing parentheses still must be used to indicate a function evaluation.
An example of this is the ``sort`` method of lists:
```
L = [4,2,3,1]
L.sort()
print(L)
```
The "``()``" after ``sort`` indicates that the function should be executed, and is required even if no arguments are necessary.
## Aside: A Note on the ``print()`` Function
Above we used the example of the ``print()`` function.
The ``print()`` function is one piece that has changed between Python *2.x* and Python *3.x*. In Python 2, ``print`` behaved as a statement: that is, you could write
``` python
# Python 2 only!
>> print "first value:", 1
first value: 1
```
For various reasons, the language maintainers decided that in Python 3 ``print()`` should become a function, so we now write
``` python
# Python 3 only!
>>> print("first value:", 1)
first value: 1
```
This is one of the many backward-incompatible constructs between Python 2 and 3.
As of the writing of this book, it is common to find examples written in both versions of Python, and the presence of the ``print`` statement rather than the ``print()`` function is often one of the first signs that you're looking at Python 2 code.
## Finishing Up and Learning More
This has been a very brief exploration of the essential features of Python syntax; its purpose is to give you a good frame of reference for when you're reading the code in later sections.
Several times we've mentioned Python "style guides", which can help teams to write code in a consistent style.
The most widely used style guide in Python is known as PEP8, and can be found at https://www.python.org/dev/peps/pep-0008/.
As you begin to write more Python code, it would be useful to read through this!
The style suggestions contain the wisdom of many Python gurus, and most suggestions go beyond simple pedantry: they are experience-based recommendations that can help avoid subtle mistakes and bugs in your code.
<!--NAVIGATION-->
< [How to Run Python Code](01-How-to-Run-Python-Code.ipynb) | [Contents](Index.ipynb) | [Basic Python Semantics: Variables and Objects](03-Semantics-Variables.ipynb) >
|
github_jupyter
|
# set the midpoint
midpoint = 5
# make two empty lists
lower = []; upper = []
# split the numbers into lower and upper
for i in range(10):
if (i < midpoint):
lower.append(i)
else:
upper.append(i)
print("lower:", lower)
print("upper:", upper)
Comments in Python are indicated by a pound sign (``#``), and anything on the line following the pound sign is ignored by the interpreter.
This means, for example, that you can have stand-alone comments like the one just shown, as well as inline comments that follow a statement. For example:
Python does not have any syntax for multi-line comments, such as the ``/* ... */`` syntax used in C and C++, though multi-line strings are often used as a replacement for multi-line comments (more on this in [String Manipulation and Regular Expressions](14-Strings-and-Regular-Expressions.ipynb)).
## End-of-Line Terminates a Statement
The next line in the script is
This is an assignment operation, where we've created a variable named ``midpoint`` and assigned it the value ``5``.
Notice that the end of this statement is simply marked by the end of the line.
This is in contrast to languages like C and C++, where every statement must end with a semicolon (``;``).
In Python, if you'd like a statement to continue to the next line, it is possible to use the "``\``" marker to indicate this:
It is also possible to continue expressions on the next line within parentheses, without using the "``\``" marker:
Most Python style guides recommend the second version of line continuation (within parentheses) to the first (use of the "``\``" marker).
## Semicolon Can Optionally Terminate a Statement
Sometimes it can be useful to put multiple statements on a single line.
The next portion of the script is
This shows the example of how the semicolon (``;``) familiar in C can be used optionally in Python to put two statements on a single line.
Functionally, this is entirely equivalent to writing
Using a semicolon to put multiple statements on a single line is generally discouraged by most Python style guides, though occasionally it proves convenient.
## Indentation: Whitespace Matters!
Next, we get to the main block of code:
This is a compound control-flow statement including a loop and a conditional – we'll look at these types of statements in a moment.
For now, consider that this demonstrates what is perhaps the most controversial feature of Python's syntax: whitespace is meaningful!
In programming languages, a *block* of code is a set of statements that should be treated as a unit.
In C, for example, code blocks are denoted by curly braces:
In Python, code blocks are denoted by *indentation*:
In Python, indented code blocks are always preceded by a colon (``:``) on the previous line.
The use of indentation helps to enforce the uniform, readable style that many find appealing in Python code.
But it might be confusing to the uninitiated; for example, the following two snippets will produce different results:
In the snippet on the left, ``print(x)`` is in the indented block, and will be executed only if ``x`` is less than ``4``.
In the snippet on the right ``print(x)`` is outside the block, and will be executed regardless of the value of ``x``!
Python's use of meaningful whitespace often is surprising to programmers who are accustomed to other languages, but in practice it can lead to much more consistent and readable code than languages that do not enforce indentation of code blocks.
If you find Python's use of whitespace disagreeable, I'd encourage you to give it a try: as I did, you may find that you come to appreciate it.
Finally, you should be aware that the *amount* of whitespace used for indenting code blocks is up to the user, as long as it is consistent throughout the script.
By convention, most style guides recommend to indent code blocks by four spaces, and that is the convention we will follow in this report.
Note that many text editors like Emacs and Vim contain Python modes that do four-space indentation automatically.
## Whitespace *Within* Lines Does Not Matter
While the mantra of *meaningful whitespace* holds true for whitespace *before* lines (which indicate a code block), white space *within* lines of Python code does not matter.
For example, all three of these expressions are equivalent:
Abusing this flexibility can lead to issues with code readibility – in fact, abusing white space is often one of the primary means of intentionally obfuscating code (which some people do for sport).
Using whitespace effectively can lead to much more readable code,
especially in cases where operators follow each other – compare the following two expressions for exponentiating by a negative number:
to
I find the second version with spaces much more easily readable at a single glance.
Most Python style guides recommend using a single space around binary operators, and no space around unary operators.
We'll discuss Python's operators further in [Basic Python Semantics: Operators](04-Semantics-Operators.ipynb).
## Parentheses Are for Grouping or Calling
In the previous code snippet, we see two uses of parentheses.
First, they can be used in the typical way to group statements or mathematical operations:
They can also be used to indicate that a *function* is being called.
In the next snippet, the ``print()`` function is used to display the contents of a variable (see the sidebar).
The function call is indicated by a pair of opening and closing parentheses, with the *arguments* to the function contained within:
Some functions can be called with no arguments at all, in which case the opening and closing parentheses still must be used to indicate a function evaluation.
An example of this is the ``sort`` method of lists:
The "``()``" after ``sort`` indicates that the function should be executed, and is required even if no arguments are necessary.
## Aside: A Note on the ``print()`` Function
Above we used the example of the ``print()`` function.
The ``print()`` function is one piece that has changed between Python *2.x* and Python *3.x*. In Python 2, ``print`` behaved as a statement: that is, you could write
For various reasons, the language maintainers decided that in Python 3 ``print()`` should become a function, so we now write
| 0.773901 | 0.988949 |
```
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QSizePolicy
from PyQt5.QtGui import QIcon, QPixmap, QFont
class PageWindow(QtWidgets.QMainWindow):
gotoSignal = QtCore.pyqtSignal(str)
def goto(self, name):
self.gotoSignal.emit(name)
# Main 1 - Given Random Sentence
class Main1Window(PageWindow):
def __init__(self):
super().__init__()
self.title = "Fun-neme"
self.initUI()
self.setWindowTitle(self.title)
def initUI(self):
self.UiComponents()
def goToLoad(self):
self.goto("loading1")
def goToProfile(self):
self.goto("profile")
def goToBack(self):
self.goto('scenarios')
def goToOptions(self):
self.goto('options')
def UiComponents(self):
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Repeat after me...")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(170, 150, 260, 100)) #(x, y, width, height)
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# mic button
micButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
micButton.setIcon(QtGui.QIcon('radio.png'))
micButton.setIconSize(QtCore.QSize(40,40))
micButton.setFixedSize(48, 48)
micButton.move(270, 320)
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,250)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 250, 320, 30) #(x, y, width, height)
sentence.setReadOnly(True)
sentence.setText('This is a sample sentence')
sentence.setFont(QFont('Arial', 16))
# settings button
settingButton = QtWidgets.QPushButton('', self)
settingButton.setIcon(QtGui.QIcon('setting.png'))
settingButton.setIconSize(QtCore.QSize(28,28))
settingButton.setFixedSize(32, 32)
settingButton.move(590,70)
# home button
homeButton = QtWidgets.QPushButton('', self)
homeButton.setIcon(QtGui.QIcon('home.png'))
homeButton.setIconSize(QtCore.QSize(28,28))
homeButton.setFixedSize(32, 32)
homeButton.move(590,110)
homeButton.clicked.connect(self.goToOptions)
# next page button
resultButton = QtWidgets.QPushButton("Next", self)
resultButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
resultButton.clicked.connect(self.goToLoad)
# back page button
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToBack)
# Main 2 - Make Your Own Sentence
class Main2Window(PageWindow):
def __init__(self):
super().__init__()
self.title = "Fun-neme"
self.initUI()
self.setWindowTitle(self.title)
def initUI(self):
self.UiComponents()
def goToBack(self):
self.goto("options")
def goToProfile(self):
self.goto("profile")
def goToOptions(self):
self.goto('options')
def UiComponents(self):
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Enter Your Own Sentence Here:")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(100, 150, 450, 100)) #(x, y, width, height)
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# mic button
micButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
micButton.setIcon(QtGui.QIcon('radio.png'))
micButton.setIconSize(QtCore.QSize(40,40))
micButton.setFixedSize(48, 48)
micButton.move(270, 320)
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,250)
# settings button
settingButton = QtWidgets.QPushButton('', self)
settingButton.setIcon(QtGui.QIcon('setting.png'))
settingButton.setIconSize(QtCore.QSize(28,28))
settingButton.setFixedSize(32, 32)
settingButton.move(590,70)
# home button
homeButton = QtWidgets.QPushButton('', self)
homeButton.setIcon(QtGui.QIcon('home.png'))
homeButton.setIconSize(QtCore.QSize(28,28))
homeButton.setFixedSize(32, 32)
homeButton.move(590,110)
homeButton.clicked.connect(self.goToOptions)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 250, 320, 30) #(x, y, width, height)
sentence.setPlaceholderText('This is a sample sentence')
sentence.setFont(QFont('Arial', 16))
# next page button
resultButton = QtWidgets.QPushButton("Next", self)
resultButton.clicked.connect(
self.make_handleButton("nextButton")
)
resultButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
# back page button
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToBack)
def make_handleButton(self, button):
def handleButton():
if button == "nextButton":
self.goto("loading2")
return handleButton
# Page 1
class LaunchWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Welcome to Fun-emes")
self.UiComponents()
def goToMain(self):
self.goto("options")
def UiComponents(self):
# owl image
owl = QLabel(self)
owl.setPixmap(QPixmap('owl.png').scaled(128, 128, QtCore.Qt.KeepAspectRatio))
owl.setScaledContents(True);
owl.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
owl.setGeometry(140, 65, 128, 128)
title1 = QLabel(self)
title1.setText('F')
title1.setFont(QFont('Fantasy', 100))
title1.setGeometry(180, 185, 50, 80)
title1.setStyleSheet("""
QLabel {
color: #F08080
}
""")
title1 = QLabel(self)
title1.setText('un')
title1.setFont(QFont('Fantasy', 50))
title1.setGeometry(220, 204, 60, 80)
title1.setStyleSheet("""
QLabel {
color: #F08080
}
""")
title2 = QLabel(self)
title2.setText('-emes')
title2.setFont(QFont('Fantasy', 50))
title2.setGeometry(280, 204, 180, 80)
title2.setStyleSheet("""
QLabel {
color: #9ACD32
}
""")
# next button
# To Do: replace this with timed calling of self.goToMain
nextButton = QtWidgets.QPushButton("Next", self)
nextButton.setGeometry(520, 400, 50, 30)
nextButton.clicked.connect(self.goToMain)
# Page 2
class OptionsWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToOcc(self):
self.goto("occupations")
def goToMain2(self):
self.goto("main2")
def UiComponents(self):
# prompt - Please choose between:
# 1. Given random sentence
# 2. Use your own sentence
prompt = QLabel(self)
prompt.setText("Choose between: ")
prompt.setFont(QFont('Arial', 25))
prompt.setGeometry(QtCore.QRect(170, 120, 300, 100)) #(x, y, width, height)
# option1
op1Button = QtWidgets.QPushButton('Given random sentence', self)
op1Button.setGeometry(QtCore.QRect(170, 200, 250, 20))
op1Button.clicked.connect(self.goToMain2)
# option2
op2Button = QtWidgets.QPushButton('Use your own sentence', self)
op2Button.setGeometry(QtCore.QRect(170, 250, 250, 20))
op2Button.clicked.connect(self.goToOcc)
# Page 3
class OccupationsWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToSce(self):
self.goto("scenarios")
def goToOptions(self):
self.goto("options")
def UiComponents(self):
prompt = QLabel(self)
prompt.setText("Choose your environment: ")
prompt.setFont(QFont('Arial', 25))
prompt.setGeometry(QtCore.QRect(140, 50, 380, 150)) #(x, y, width, height)
# elementary school
eleButton = QtWidgets.QPushButton('Elementary School', self)
eleButton.setGeometry(QtCore.QRect(70, 200, 150, 20))
# eleButton.clicked.connect(lambda: self.handleClick('elementary'))
# highschool
highschoolButton = QtWidgets.QPushButton('Highschool', self)
highschoolButton.setGeometry(QtCore.QRect(250, 200, 150, 20))
# highschoolButton.clicked.connect(lambda: self.handleClick('highschool'))
# college
collegeButton = QtWidgets.QPushButton('College', self)
collegeButton.setGeometry(QtCore.QRect(430, 200, 150, 20))
# collegeButton.clicked.connect(lambda: self.handleClick('college'))
# employed
employedButton = QtWidgets.QPushButton('Employed', self)
employedButton.setGeometry(QtCore.QRect(70, 240, 150, 20))
# unemployed
retiredButton = QtWidgets.QPushButton('Unemployed', self)
retiredButton.setGeometry(QtCore.QRect(430, 240, 150, 20))
# retired
retiredButton = QtWidgets.QPushButton('Retired', self)
retiredButton.setGeometry(QtCore.QRect(250, 240, 150, 20))
# user feedback
sentence = QLineEdit(self)
sentence.setGeometry(100, 350, 420, 30) #(x, y, width, height)
sentence.setPlaceholderText('Enter an occupation here if yours does not show: ')
sentence.setFont(QFont('Arial', 16))
# submit button
submitButton = QtWidgets.QPushButton('Submit', self)
submitButton.setGeometry(QtCore.QRect(100, 400, 100, 20))
nextButton = QtWidgets.QPushButton('Next', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToSce)
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToOptions)
# Page 4
class ScenariosWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToMain1(self):
self.goto("main1")
def goToOcc(self):
self.goto("occupations")
def UiComponents(self):
# msg = QLabel(self)
# msg.setText(occupation)
# msg.setGeometry(QtCore.QRect(0, 0, 150, 30)) #(x, y, width, height)
prompt = QLabel(self)
prompt.setText("Pick a scenario to practice with: ")
prompt.setFont(QFont('Arial', 25))
prompt.setGeometry(QtCore.QRect(80, 50, 450, 150)) #(x, y, width, height)
# Classroom (student)
class1Button = QtWidgets.QPushButton('Classroom (student)', self)
class1Button.setGeometry(QtCore.QRect(70, 200, 150, 20))
# Classroom (teacher)
class2Button = QtWidgets.QPushButton('Classroom (teacher)', self)
class2Button.setGeometry(QtCore.QRect(70, 230, 150, 20))
# Work (employee)
work1Button = QtWidgets.QPushButton('Work (employee)', self)
work1Button.setGeometry(QtCore.QRect(70, 260, 150, 20))
# Work (boss)
work2Button = QtWidgets.QPushButton('Work (boss)', self)
work2Button.setGeometry(QtCore.QRect(70, 290, 150, 20))
# Dinning
dinningButton = QtWidgets.QPushButton('Dinning', self)
dinningButton.setGeometry(QtCore.QRect(70, 320, 150, 20))
# Hospital
hospButton = QtWidgets.QPushButton('Hospital', self)
hospButton.setGeometry(QtCore.QRect(70, 350, 150, 20))
# Social
socialButton = QtWidgets.QPushButton('Social', self)
socialButton.setGeometry(QtCore.QRect(70, 380, 150, 20))
# User feedback
userFeedback = QLineEdit(self)
userFeedback.setGeometry(QtCore.QRect(70, 410, 350, 20))
userFeedback.setPlaceholderText('Enter a scenario if the one you want does not show: ')
submitButton = QtWidgets.QPushButton('Submit', self)
submitButton.setGeometry(QtCore.QRect(70, 440, 100, 20))
nextButton = QtWidgets.QPushButton('Next', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToMain1)
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToOcc)
# Page 6
class Loading1Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToResult(self):
self.goto("result1")
def goToProfile(self):
self.goto("profile1")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
load = QLabel(self)
load.setPixmap(QPixmap('load.png').scaled(128, 128, QtCore.Qt.KeepAspectRatio))
load.setScaledContents(True);
load.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
load.setGeometry(256, 176, 128, 128)
msg = QLabel(self)
msg.setText('Grading your performance...')
msg.setFont(QFont('Cursive', 22))
msg.setGeometry(180, 310, 300, 60)
nextButton = QtWidgets.QPushButton('View Results', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToResult)
# Page 6
class Loading2Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToResult(self):
self.goto("result2")
def goToProfile(self):
self.goto("profile2")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# loading icon
load = QLabel(self)
load.setPixmap(QPixmap('load.png').scaled(128, 128, QtCore.Qt.KeepAspectRatio))
load.setScaledContents(True);
load.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
load.setGeometry(256, 176, 128, 128)
msg = QLabel(self)
msg.setText('Grading your performance...')
msg.setFont(QFont('Cursive', 22))
msg.setGeometry(180, 310, 300, 60)
nextButton = QtWidgets.QPushButton('View Results', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToResult)
# Page 7
class Result1Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Your Performance")
self.UiComponents()
def goToMain(self):
self.goto("main1")
def goToProfile(self):
self.goto("profile")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Here is how you did...")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(130, 80, 330, 100)) #(x, y, width, height)
# Score Display
score = QLabel(self)
score.setText('78')
score.setFont(QFont('Fantasy', 70))
score.setGeometry(256, 240, 100, 100)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# Score message
score = QLabel(self)
score.setText('Not bad!')
score.setFont(QFont('Fantasy', 26))
score.setGeometry(250, 340, 130, 50)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,170)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 170, 320, 30) #(x, y, width, height)
sentence.setPlaceholderText('')
# words in sentence
word1 = QLabel(self)
word1.setText('This')
word1.setGeometry(195, 170, 32, 30)
word1.setFont(QFont('Arial', 16))
word1.setStyleSheet("""
color: #F08080
""")
word2 = QLabel(self)
word2.setText(' is')
word2.setGeometry(227, 170, 24, 30)
word2.setFont(QFont('Arial', 16))
word2.setStyleSheet("""
color: #9ACD32
""")
word3 = QLabel(self)
word3.setText('a')
word3.setGeometry(251, 170, 16, 30)
word3.setFont(QFont('Arial', 16))
word3.setStyleSheet("""
color: #F08080
""")
word4 = QLabel(self)
word4.setText(' sample sentence.')
word4.setGeometry(267, 170, 160, 30)
word4.setFont(QFont('Arial', 16))
word4.setStyleSheet("""
color: #9ACD32
""")
self.backButton = QtWidgets.QPushButton("Back", self)
self.backButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
self.backButton.clicked.connect(self.goToMain)
# User feedback
userFeedback = QLineEdit(self)
userFeedback.setGeometry(QtCore.QRect(70, 410, 350, 20))
userFeedback.setPlaceholderText('Let us know if there is anything else you would like to see: ')
submitButton = QtWidgets.QPushButton('Submit', self)
submitButton.setGeometry(QtCore.QRect(70, 440, 100, 20))
# Page 7
class Result2Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Your Performance")
self.UiComponents()
def goToMain(self):
self.goto("main2")
def goToProfile(self):
self.goto("profile")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Here is how you did...")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(130, 80, 330, 100)) #(x, y, width, height)
# Score Display
score = QLabel(self)
score.setText('78')
score.setFont(QFont('Fantasy', 70))
score.setGeometry(270, 280, 100, 100)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# Score message
score = QLabel(self)
score.setText('Not bad!')
score.setFont(QFont('Fantasy', 26))
score.setGeometry(268, 380, 130, 50)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,250)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 250, 320, 30) #(x, y, width, height)
sentence.setPlaceholderText('')
# words in sentence
word1 = QLabel(self)
word1.setText('This')
word1.setGeometry(195, 250, 32, 30)
word1.setFont(QFont('Arial', 16))
word1.setStyleSheet("""
color: #F08080
""")
word2 = QLabel(self)
word2.setText(' is')
word2.setGeometry(227, 250, 24, 30)
word2.setFont(QFont('Arial', 16))
word2.setStyleSheet("""
color: #9ACD32
""")
word3 = QLabel(self)
word3.setText('a')
word3.setGeometry(251, 250, 16, 30)
word3.setFont(QFont('Arial', 16))
word3.setStyleSheet("""
color: #F08080
""")
word4 = QLabel(self)
word4.setText(' sample sentence.')
word4.setGeometry(267, 250, 160, 30)
word4.setFont(QFont('Arial', 16))
word4.setStyleSheet("""
color: #9ACD32
""")
self.backButton = QtWidgets.QPushButton("Back", self)
self.backButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
self.backButton.clicked.connect(self.goToMain)
# Page 8
class UserProfileWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Your Profile")
self.UiComponents()
def goToMain(self):
self.goto("main1")
def UiComponents(self):
# sample sentence you get wrong the most
sentence = QLabel(self)
sentence.setText('This is the sentence you get wrong the most: ')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(80, 20, 450, 50)) #(x, y, width, height)
sentence = QLabel(self)
sentence.setText('This world is literally complicated')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(120, 60, 450, 50)) #(x, y, width, height)
# sample phoneme you get wrong the most
sentence = QLabel(self)
sentence.setText('This are the top 5 phonemes you get wrong the most: ')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(80, 160, 500, 50)) #(x, y, width, height)
sentence = QLabel(self)
sentence.setText('ay, ah, ch, mb, au')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(120, 200, 450, 50)) #(x, y, width, height)
# how many sentences you have practiced
sentence = QLabel(self)
sentence.setText('Congratulations! You have practices 15 sentences today!')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(80, 250, 550, 100)) #(x, y, width, height)
self.backButton = QtWidgets.QPushButton("Back", self)
self.backButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
self.backButton.clicked.connect(self.goToMain)
class Window(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
# set the window size
self.left = 50
self.top = 50
self.width = 640
self.height = 480
self.setGeometry(self.left, self.top, self.width, self.height)
# Set window background color
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QtCore.Qt.white)
self.setPalette(p)
self.stacked_widget = QtWidgets.QStackedWidget()
self.setCentralWidget(self.stacked_widget)
self.m_pages = {}
self.register(LaunchWindow(), "launch")
self.register(OptionsWindow(), "options")
self.register(OccupationsWindow(), "occupations")
self.register(ScenariosWindow(), "scenarios")
self.register(Main1Window(), "main1")
self.register(Main2Window(), "main2")
self.register(Loading1Window(), "loading1")
self.register(Loading2Window(), "loading2")
self.register(Result1Window(), "result1")
self.register(Result2Window(), "result2")
self.register(UserProfileWindow(), "profile")
self.goto("launch")
def register(self, widget, name):
self.m_pages[name] = widget
self.stacked_widget.addWidget(widget)
if isinstance(widget, PageWindow):
widget.gotoSignal.connect(self.goto)
@QtCore.pyqtSlot(str)
def goto(self, name):
if name in self.m_pages:
widget = self.m_pages[name]
self.stacked_widget.setCurrentWidget(widget)
self.setWindowTitle(widget.windowTitle())
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
w = Window()
w.show()
sys.exit(app.exec_())
```
|
github_jupyter
|
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QSizePolicy
from PyQt5.QtGui import QIcon, QPixmap, QFont
class PageWindow(QtWidgets.QMainWindow):
gotoSignal = QtCore.pyqtSignal(str)
def goto(self, name):
self.gotoSignal.emit(name)
# Main 1 - Given Random Sentence
class Main1Window(PageWindow):
def __init__(self):
super().__init__()
self.title = "Fun-neme"
self.initUI()
self.setWindowTitle(self.title)
def initUI(self):
self.UiComponents()
def goToLoad(self):
self.goto("loading1")
def goToProfile(self):
self.goto("profile")
def goToBack(self):
self.goto('scenarios')
def goToOptions(self):
self.goto('options')
def UiComponents(self):
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Repeat after me...")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(170, 150, 260, 100)) #(x, y, width, height)
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# mic button
micButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
micButton.setIcon(QtGui.QIcon('radio.png'))
micButton.setIconSize(QtCore.QSize(40,40))
micButton.setFixedSize(48, 48)
micButton.move(270, 320)
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,250)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 250, 320, 30) #(x, y, width, height)
sentence.setReadOnly(True)
sentence.setText('This is a sample sentence')
sentence.setFont(QFont('Arial', 16))
# settings button
settingButton = QtWidgets.QPushButton('', self)
settingButton.setIcon(QtGui.QIcon('setting.png'))
settingButton.setIconSize(QtCore.QSize(28,28))
settingButton.setFixedSize(32, 32)
settingButton.move(590,70)
# home button
homeButton = QtWidgets.QPushButton('', self)
homeButton.setIcon(QtGui.QIcon('home.png'))
homeButton.setIconSize(QtCore.QSize(28,28))
homeButton.setFixedSize(32, 32)
homeButton.move(590,110)
homeButton.clicked.connect(self.goToOptions)
# next page button
resultButton = QtWidgets.QPushButton("Next", self)
resultButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
resultButton.clicked.connect(self.goToLoad)
# back page button
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToBack)
# Main 2 - Make Your Own Sentence
class Main2Window(PageWindow):
def __init__(self):
super().__init__()
self.title = "Fun-neme"
self.initUI()
self.setWindowTitle(self.title)
def initUI(self):
self.UiComponents()
def goToBack(self):
self.goto("options")
def goToProfile(self):
self.goto("profile")
def goToOptions(self):
self.goto('options')
def UiComponents(self):
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Enter Your Own Sentence Here:")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(100, 150, 450, 100)) #(x, y, width, height)
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# mic button
micButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
micButton.setIcon(QtGui.QIcon('radio.png'))
micButton.setIconSize(QtCore.QSize(40,40))
micButton.setFixedSize(48, 48)
micButton.move(270, 320)
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,250)
# settings button
settingButton = QtWidgets.QPushButton('', self)
settingButton.setIcon(QtGui.QIcon('setting.png'))
settingButton.setIconSize(QtCore.QSize(28,28))
settingButton.setFixedSize(32, 32)
settingButton.move(590,70)
# home button
homeButton = QtWidgets.QPushButton('', self)
homeButton.setIcon(QtGui.QIcon('home.png'))
homeButton.setIconSize(QtCore.QSize(28,28))
homeButton.setFixedSize(32, 32)
homeButton.move(590,110)
homeButton.clicked.connect(self.goToOptions)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 250, 320, 30) #(x, y, width, height)
sentence.setPlaceholderText('This is a sample sentence')
sentence.setFont(QFont('Arial', 16))
# next page button
resultButton = QtWidgets.QPushButton("Next", self)
resultButton.clicked.connect(
self.make_handleButton("nextButton")
)
resultButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
# back page button
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToBack)
def make_handleButton(self, button):
def handleButton():
if button == "nextButton":
self.goto("loading2")
return handleButton
# Page 1
class LaunchWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Welcome to Fun-emes")
self.UiComponents()
def goToMain(self):
self.goto("options")
def UiComponents(self):
# owl image
owl = QLabel(self)
owl.setPixmap(QPixmap('owl.png').scaled(128, 128, QtCore.Qt.KeepAspectRatio))
owl.setScaledContents(True);
owl.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
owl.setGeometry(140, 65, 128, 128)
title1 = QLabel(self)
title1.setText('F')
title1.setFont(QFont('Fantasy', 100))
title1.setGeometry(180, 185, 50, 80)
title1.setStyleSheet("""
QLabel {
color: #F08080
}
""")
title1 = QLabel(self)
title1.setText('un')
title1.setFont(QFont('Fantasy', 50))
title1.setGeometry(220, 204, 60, 80)
title1.setStyleSheet("""
QLabel {
color: #F08080
}
""")
title2 = QLabel(self)
title2.setText('-emes')
title2.setFont(QFont('Fantasy', 50))
title2.setGeometry(280, 204, 180, 80)
title2.setStyleSheet("""
QLabel {
color: #9ACD32
}
""")
# next button
# To Do: replace this with timed calling of self.goToMain
nextButton = QtWidgets.QPushButton("Next", self)
nextButton.setGeometry(520, 400, 50, 30)
nextButton.clicked.connect(self.goToMain)
# Page 2
class OptionsWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToOcc(self):
self.goto("occupations")
def goToMain2(self):
self.goto("main2")
def UiComponents(self):
# prompt - Please choose between:
# 1. Given random sentence
# 2. Use your own sentence
prompt = QLabel(self)
prompt.setText("Choose between: ")
prompt.setFont(QFont('Arial', 25))
prompt.setGeometry(QtCore.QRect(170, 120, 300, 100)) #(x, y, width, height)
# option1
op1Button = QtWidgets.QPushButton('Given random sentence', self)
op1Button.setGeometry(QtCore.QRect(170, 200, 250, 20))
op1Button.clicked.connect(self.goToMain2)
# option2
op2Button = QtWidgets.QPushButton('Use your own sentence', self)
op2Button.setGeometry(QtCore.QRect(170, 250, 250, 20))
op2Button.clicked.connect(self.goToOcc)
# Page 3
class OccupationsWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToSce(self):
self.goto("scenarios")
def goToOptions(self):
self.goto("options")
def UiComponents(self):
prompt = QLabel(self)
prompt.setText("Choose your environment: ")
prompt.setFont(QFont('Arial', 25))
prompt.setGeometry(QtCore.QRect(140, 50, 380, 150)) #(x, y, width, height)
# elementary school
eleButton = QtWidgets.QPushButton('Elementary School', self)
eleButton.setGeometry(QtCore.QRect(70, 200, 150, 20))
# eleButton.clicked.connect(lambda: self.handleClick('elementary'))
# highschool
highschoolButton = QtWidgets.QPushButton('Highschool', self)
highschoolButton.setGeometry(QtCore.QRect(250, 200, 150, 20))
# highschoolButton.clicked.connect(lambda: self.handleClick('highschool'))
# college
collegeButton = QtWidgets.QPushButton('College', self)
collegeButton.setGeometry(QtCore.QRect(430, 200, 150, 20))
# collegeButton.clicked.connect(lambda: self.handleClick('college'))
# employed
employedButton = QtWidgets.QPushButton('Employed', self)
employedButton.setGeometry(QtCore.QRect(70, 240, 150, 20))
# unemployed
retiredButton = QtWidgets.QPushButton('Unemployed', self)
retiredButton.setGeometry(QtCore.QRect(430, 240, 150, 20))
# retired
retiredButton = QtWidgets.QPushButton('Retired', self)
retiredButton.setGeometry(QtCore.QRect(250, 240, 150, 20))
# user feedback
sentence = QLineEdit(self)
sentence.setGeometry(100, 350, 420, 30) #(x, y, width, height)
sentence.setPlaceholderText('Enter an occupation here if yours does not show: ')
sentence.setFont(QFont('Arial', 16))
# submit button
submitButton = QtWidgets.QPushButton('Submit', self)
submitButton.setGeometry(QtCore.QRect(100, 400, 100, 20))
nextButton = QtWidgets.QPushButton('Next', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToSce)
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToOptions)
# Page 4
class ScenariosWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToMain1(self):
self.goto("main1")
def goToOcc(self):
self.goto("occupations")
def UiComponents(self):
# msg = QLabel(self)
# msg.setText(occupation)
# msg.setGeometry(QtCore.QRect(0, 0, 150, 30)) #(x, y, width, height)
prompt = QLabel(self)
prompt.setText("Pick a scenario to practice with: ")
prompt.setFont(QFont('Arial', 25))
prompt.setGeometry(QtCore.QRect(80, 50, 450, 150)) #(x, y, width, height)
# Classroom (student)
class1Button = QtWidgets.QPushButton('Classroom (student)', self)
class1Button.setGeometry(QtCore.QRect(70, 200, 150, 20))
# Classroom (teacher)
class2Button = QtWidgets.QPushButton('Classroom (teacher)', self)
class2Button.setGeometry(QtCore.QRect(70, 230, 150, 20))
# Work (employee)
work1Button = QtWidgets.QPushButton('Work (employee)', self)
work1Button.setGeometry(QtCore.QRect(70, 260, 150, 20))
# Work (boss)
work2Button = QtWidgets.QPushButton('Work (boss)', self)
work2Button.setGeometry(QtCore.QRect(70, 290, 150, 20))
# Dinning
dinningButton = QtWidgets.QPushButton('Dinning', self)
dinningButton.setGeometry(QtCore.QRect(70, 320, 150, 20))
# Hospital
hospButton = QtWidgets.QPushButton('Hospital', self)
hospButton.setGeometry(QtCore.QRect(70, 350, 150, 20))
# Social
socialButton = QtWidgets.QPushButton('Social', self)
socialButton.setGeometry(QtCore.QRect(70, 380, 150, 20))
# User feedback
userFeedback = QLineEdit(self)
userFeedback.setGeometry(QtCore.QRect(70, 410, 350, 20))
userFeedback.setPlaceholderText('Enter a scenario if the one you want does not show: ')
submitButton = QtWidgets.QPushButton('Submit', self)
submitButton.setGeometry(QtCore.QRect(70, 440, 100, 20))
nextButton = QtWidgets.QPushButton('Next', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToMain1)
prevButton = QtWidgets.QPushButton('Back', self)
prevButton.setGeometry(QtCore.QRect(520, 440, 100, 20))
prevButton.clicked.connect(self.goToOcc)
# Page 6
class Loading1Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToResult(self):
self.goto("result1")
def goToProfile(self):
self.goto("profile1")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
load = QLabel(self)
load.setPixmap(QPixmap('load.png').scaled(128, 128, QtCore.Qt.KeepAspectRatio))
load.setScaledContents(True);
load.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
load.setGeometry(256, 176, 128, 128)
msg = QLabel(self)
msg.setText('Grading your performance...')
msg.setFont(QFont('Cursive', 22))
msg.setGeometry(180, 310, 300, 60)
nextButton = QtWidgets.QPushButton('View Results', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToResult)
# Page 6
class Loading2Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.UiComponents()
def goToResult(self):
self.goto("result2")
def goToProfile(self):
self.goto("profile2")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# loading icon
load = QLabel(self)
load.setPixmap(QPixmap('load.png').scaled(128, 128, QtCore.Qt.KeepAspectRatio))
load.setScaledContents(True);
load.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
load.setGeometry(256, 176, 128, 128)
msg = QLabel(self)
msg.setText('Grading your performance...')
msg.setFont(QFont('Cursive', 22))
msg.setGeometry(180, 310, 300, 60)
nextButton = QtWidgets.QPushButton('View Results', self)
nextButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
nextButton.clicked.connect(self.goToResult)
# Page 7
class Result1Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Your Performance")
self.UiComponents()
def goToMain(self):
self.goto("main1")
def goToProfile(self):
self.goto("profile")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Here is how you did...")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(130, 80, 330, 100)) #(x, y, width, height)
# Score Display
score = QLabel(self)
score.setText('78')
score.setFont(QFont('Fantasy', 70))
score.setGeometry(256, 240, 100, 100)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# Score message
score = QLabel(self)
score.setText('Not bad!')
score.setFont(QFont('Fantasy', 26))
score.setGeometry(250, 340, 130, 50)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,170)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 170, 320, 30) #(x, y, width, height)
sentence.setPlaceholderText('')
# words in sentence
word1 = QLabel(self)
word1.setText('This')
word1.setGeometry(195, 170, 32, 30)
word1.setFont(QFont('Arial', 16))
word1.setStyleSheet("""
color: #F08080
""")
word2 = QLabel(self)
word2.setText(' is')
word2.setGeometry(227, 170, 24, 30)
word2.setFont(QFont('Arial', 16))
word2.setStyleSheet("""
color: #9ACD32
""")
word3 = QLabel(self)
word3.setText('a')
word3.setGeometry(251, 170, 16, 30)
word3.setFont(QFont('Arial', 16))
word3.setStyleSheet("""
color: #F08080
""")
word4 = QLabel(self)
word4.setText(' sample sentence.')
word4.setGeometry(267, 170, 160, 30)
word4.setFont(QFont('Arial', 16))
word4.setStyleSheet("""
color: #9ACD32
""")
self.backButton = QtWidgets.QPushButton("Back", self)
self.backButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
self.backButton.clicked.connect(self.goToMain)
# User feedback
userFeedback = QLineEdit(self)
userFeedback.setGeometry(QtCore.QRect(70, 410, 350, 20))
userFeedback.setPlaceholderText('Let us know if there is anything else you would like to see: ')
submitButton = QtWidgets.QPushButton('Submit', self)
submitButton.setGeometry(QtCore.QRect(70, 440, 100, 20))
# Page 7
class Result2Window(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Your Performance")
self.UiComponents()
def goToMain(self):
self.goto("main2")
def goToProfile(self):
self.goto("profile")
def UiComponents(self):
# user image
user = QtWidgets.QPushButton('', self)
user.setIcon(QtGui.QIcon('user.png'))
user.setIconSize(QtCore.QSize(64,64))
user.setFixedSize(70, 70)
user.setGeometry(570,0,70,70)
user.clicked.connect(self.goToProfile)
# User Prompt
promptMsg = QLabel(self)
promptMsg.setText("Here is how you did...")
promptMsg.setFont(QFont('Arial', 30))
promptMsg.setGeometry(QtCore.QRect(130, 80, 330, 100)) #(x, y, width, height)
# Score Display
score = QLabel(self)
score.setText('78')
score.setFont(QFont('Fantasy', 70))
score.setGeometry(270, 280, 100, 100)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# Score message
score = QLabel(self)
score.setText('Not bad!')
score.setFont(QFont('Fantasy', 26))
score.setGeometry(268, 380, 130, 50)
score.setStyleSheet("""
QLabel {
color: #FF8C00
}
""")
# speaker button
speakerButton = QtWidgets.QPushButton('', self)
# self.button.clicked.connect(self.handleButton)
speakerButton.setIcon(QtGui.QIcon('speaker1.png'))
speakerButton.setIconSize(QtCore.QSize(28,28))
speakerButton.setFixedSize(32, 32)
speakerButton.move(125,250)
# sentence input
sentence = QLineEdit(self)
sentence.setGeometry(190, 250, 320, 30) #(x, y, width, height)
sentence.setPlaceholderText('')
# words in sentence
word1 = QLabel(self)
word1.setText('This')
word1.setGeometry(195, 250, 32, 30)
word1.setFont(QFont('Arial', 16))
word1.setStyleSheet("""
color: #F08080
""")
word2 = QLabel(self)
word2.setText(' is')
word2.setGeometry(227, 250, 24, 30)
word2.setFont(QFont('Arial', 16))
word2.setStyleSheet("""
color: #9ACD32
""")
word3 = QLabel(self)
word3.setText('a')
word3.setGeometry(251, 250, 16, 30)
word3.setFont(QFont('Arial', 16))
word3.setStyleSheet("""
color: #F08080
""")
word4 = QLabel(self)
word4.setText(' sample sentence.')
word4.setGeometry(267, 250, 160, 30)
word4.setFont(QFont('Arial', 16))
word4.setStyleSheet("""
color: #9ACD32
""")
self.backButton = QtWidgets.QPushButton("Back", self)
self.backButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
self.backButton.clicked.connect(self.goToMain)
# Page 8
class UserProfileWindow(PageWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setWindowTitle("Your Profile")
self.UiComponents()
def goToMain(self):
self.goto("main1")
def UiComponents(self):
# sample sentence you get wrong the most
sentence = QLabel(self)
sentence.setText('This is the sentence you get wrong the most: ')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(80, 20, 450, 50)) #(x, y, width, height)
sentence = QLabel(self)
sentence.setText('This world is literally complicated')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(120, 60, 450, 50)) #(x, y, width, height)
# sample phoneme you get wrong the most
sentence = QLabel(self)
sentence.setText('This are the top 5 phonemes you get wrong the most: ')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(80, 160, 500, 50)) #(x, y, width, height)
sentence = QLabel(self)
sentence.setText('ay, ah, ch, mb, au')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(120, 200, 450, 50)) #(x, y, width, height)
# how many sentences you have practiced
sentence = QLabel(self)
sentence.setText('Congratulations! You have practices 15 sentences today!')
sentence.setFont(QFont('Arial', 20))
sentence.setGeometry(QtCore.QRect(80, 250, 550, 100)) #(x, y, width, height)
self.backButton = QtWidgets.QPushButton("Back", self)
self.backButton.setGeometry(QtCore.QRect(520, 400, 100, 20))
self.backButton.clicked.connect(self.goToMain)
class Window(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super().__init__(parent)
# set the window size
self.left = 50
self.top = 50
self.width = 640
self.height = 480
self.setGeometry(self.left, self.top, self.width, self.height)
# Set window background color
self.setAutoFillBackground(True)
p = self.palette()
p.setColor(self.backgroundRole(), QtCore.Qt.white)
self.setPalette(p)
self.stacked_widget = QtWidgets.QStackedWidget()
self.setCentralWidget(self.stacked_widget)
self.m_pages = {}
self.register(LaunchWindow(), "launch")
self.register(OptionsWindow(), "options")
self.register(OccupationsWindow(), "occupations")
self.register(ScenariosWindow(), "scenarios")
self.register(Main1Window(), "main1")
self.register(Main2Window(), "main2")
self.register(Loading1Window(), "loading1")
self.register(Loading2Window(), "loading2")
self.register(Result1Window(), "result1")
self.register(Result2Window(), "result2")
self.register(UserProfileWindow(), "profile")
self.goto("launch")
def register(self, widget, name):
self.m_pages[name] = widget
self.stacked_widget.addWidget(widget)
if isinstance(widget, PageWindow):
widget.gotoSignal.connect(self.goto)
@QtCore.pyqtSlot(str)
def goto(self, name):
if name in self.m_pages:
widget = self.m_pages[name]
self.stacked_widget.setCurrentWidget(widget)
self.setWindowTitle(widget.windowTitle())
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
w = Window()
w.show()
sys.exit(app.exec_())
| 0.295942 | 0.097262 |
# Face Recognition with SphereFace
Paper: https://arxiv.org/abs/1704.08063
Repo: https://github.com/wy1iu/sphereface
```
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
#We are going to use deepface to detect and align faces
#Repo: https://github.com/serengil/deepface
#!pip install deepface
from deepface.commons import functions
```
### Pre-trained model
```
#Structure: https://github.com/wy1iu/sphereface/blob/master/train/code/sphereface_deploy.prototxt
#Weights: https://drive.google.com/open?id=0B_geeR2lTMegb2F6dmlmOXhWaVk
model = cv2.dnn.readNetFromCaffe("sphereface_deploy.prototxt", "sphereface_model.caffemodel")
#SphereFace input shape. You can verify this in the prototxt.
input_shape = (112, 96)
```
### Common functions
```
#Similarity metrics tutorial: https://sefiks.com/2018/08/13/cosine-similarity-in-machine-learning/
def findCosineDistance(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
euclidean_distance = source_representation - test_representation
euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
```
### Data set
```
# Master.csv: https://github.com/serengil/deepface/blob/master/tests/dataset/master.csv
# Images: https://github.com/serengil/deepface/tree/master/tests/dataset
df = pd.read_csv("../deepface/tests/dataset/master.csv")
df.head()
euclidean_distances = []; cosine_distances = []
for index, instance in tqdm(df.iterrows(), total = df.shape[0]):
img1_path = instance["file_x"]
img2_path = instance["file_y"]
target_label = instance["Decision"]
#----------------------------------
#detect and align
img1 = functions.preprocess_face("../deepface/tests/dataset/%s" % (img1_path), target_size=input_shape)[0]
img2 = functions.preprocess_face("../deepface/tests/dataset/%s" % (img2_path), target_size=input_shape)[0]
#----------------------------------
#reshape images to expected shapes
img1_blob = cv2.dnn.blobFromImage(img1)
img2_blob = cv2.dnn.blobFromImage(img2)
if img1_blob.shape != (1, 3, 96, 112):
raise ValueError("img shape must be (1, 3, 96, 112) but it has a ", img1_blob.shape," shape")
#----------------------------------
#representation
model.setInput(img1_blob)
img1_representation = model.forward()[0]
model.setInput(img2_blob)
img2_representation = model.forward()[0]
#----------------------------------
euclidean_distance = findEuclideanDistance(img1_representation, img2_representation)
euclidean_distances.append(euclidean_distance)
cosine_distance = findCosineDistance(img1_representation, img2_representation)
cosine_distances.append(cosine_distance)
df['euclidean'] = euclidean_distances
df['cosine'] = cosine_distances
df.head()
```
### Visualize distributions
```
df[df.Decision == "Yes"]['euclidean'].plot(kind='kde', title = 'euclidean', label = 'Yes', legend = True)
df[df.Decision == "No"]['euclidean'].plot(kind='kde', title = 'euclidean', label = 'No', legend = True)
plt.show()
df[df.Decision == "Yes"]['cosine'].plot(kind='kde', title = 'cosine', label = 'Yes', legend = True)
df[df.Decision == "No"]['cosine'].plot(kind='kde', title = 'cosine', label = 'No', legend = True)
plt.show()
```
### Find the best threshold
```
#Repo: https://github.com/serengil/chefboost
#!pip install chefboost
from chefboost import Chefboost as chef
config = {'algorithm': 'C4.5'}
df[['euclidean', 'Decision']].head()
euclidean_tree = chef.fit(df[['euclidean', 'Decision']].copy(), config)
cosine_tree = chef.fit(df[['cosine', 'Decision']].copy(), config)
#stored in outputs/rules
euclidean_threshold = 17.212238311767578 #euclidean
cosine_threshold = 0.4668717384338379 #cosine
```
### Predictions
```
df['prediction_by_euclidean'] = 'No'
df['prediction_by_cosine'] = 'No'
df.loc[df[df['euclidean'] <= euclidean_threshold].index, 'prediction_by_euclidean'] = 'Yes'
df.loc[df[df['cosine'] <= cosine_threshold].index, 'prediction_by_cosine'] = 'Yes'
df.sample(5)
euclidean_positives = 0; cosine_positives = 0
for index, instance in df.iterrows():
target = instance['Decision']
prediction_by_euclidean = instance['prediction_by_euclidean']
prediction_by_cosine = instance['prediction_by_cosine']
if target == prediction_by_euclidean:
euclidean_positives = euclidean_positives + 1
if target == prediction_by_cosine:
cosine_positives = cosine_positives + 1
print("Accuracy (euclidean): ",round(100 * euclidean_positives/df.shape[0], 2))
print("Accuracy (cosine): ",round(100 * cosine_positives/df.shape[0], 2))
```
### Production
```
def verifyFaces(img1_path, img2_path):
print("Verify ",img1_path," and ",img2_path)
#------------------------------------
#detect and align
img1 = functions.preprocess_face(img1_path, target_size=input_shape)[0]
img2 = functions.preprocess_face(img2_path, target_size=input_shape)[0]
img1_blob = cv2.dnn.blobFromImage(img1)
img2_blob = cv2.dnn.blobFromImage(img2)
#------------------------------------
#representation
model.setInput(img1_blob)
img1_representation = model.forward()[0]
model.setInput(img2_blob)
img2_representation = model.forward()[0]
#------------------------------------
#verify
euclidean_distance = findEuclideanDistance(img1_representation, img2_representation)
print("Found euclidean distance is ",euclidean_distance," whereas required threshold is ",euclidean_threshold)
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
plt.imshow(img1[:,:,::-1])
plt.axis('off')
ax2 = fig.add_subplot(1,2,2)
plt.imshow(img2[:,:,::-1])
plt.axis('off')
plt.show()
if euclidean_distance <= euclidean_threshold:
print("they are same person")
else:
print("they are not same person")
```
### True positive examples
```
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img2.jpg")
verifyFaces("../deepface/tests/dataset/img54.jpg", "../deepface/tests/dataset/img3.jpg")
verifyFaces("../deepface/tests/dataset/img42.jpg", "../deepface/tests/dataset/img45.jpg")
verifyFaces("../deepface/tests/dataset/img9.jpg", "../deepface/tests/dataset/img49.jpg")
```
### True negative examples
```
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img3.jpg")
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img45.jpg")
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img49.jpg")
```
|
github_jupyter
|
import cv2
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
#We are going to use deepface to detect and align faces
#Repo: https://github.com/serengil/deepface
#!pip install deepface
from deepface.commons import functions
#Structure: https://github.com/wy1iu/sphereface/blob/master/train/code/sphereface_deploy.prototxt
#Weights: https://drive.google.com/open?id=0B_geeR2lTMegb2F6dmlmOXhWaVk
model = cv2.dnn.readNetFromCaffe("sphereface_deploy.prototxt", "sphereface_model.caffemodel")
#SphereFace input shape. You can verify this in the prototxt.
input_shape = (112, 96)
#Similarity metrics tutorial: https://sefiks.com/2018/08/13/cosine-similarity-in-machine-learning/
def findCosineDistance(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
euclidean_distance = source_representation - test_representation
euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance))
euclidean_distance = np.sqrt(euclidean_distance)
return euclidean_distance
# Master.csv: https://github.com/serengil/deepface/blob/master/tests/dataset/master.csv
# Images: https://github.com/serengil/deepface/tree/master/tests/dataset
df = pd.read_csv("../deepface/tests/dataset/master.csv")
df.head()
euclidean_distances = []; cosine_distances = []
for index, instance in tqdm(df.iterrows(), total = df.shape[0]):
img1_path = instance["file_x"]
img2_path = instance["file_y"]
target_label = instance["Decision"]
#----------------------------------
#detect and align
img1 = functions.preprocess_face("../deepface/tests/dataset/%s" % (img1_path), target_size=input_shape)[0]
img2 = functions.preprocess_face("../deepface/tests/dataset/%s" % (img2_path), target_size=input_shape)[0]
#----------------------------------
#reshape images to expected shapes
img1_blob = cv2.dnn.blobFromImage(img1)
img2_blob = cv2.dnn.blobFromImage(img2)
if img1_blob.shape != (1, 3, 96, 112):
raise ValueError("img shape must be (1, 3, 96, 112) but it has a ", img1_blob.shape," shape")
#----------------------------------
#representation
model.setInput(img1_blob)
img1_representation = model.forward()[0]
model.setInput(img2_blob)
img2_representation = model.forward()[0]
#----------------------------------
euclidean_distance = findEuclideanDistance(img1_representation, img2_representation)
euclidean_distances.append(euclidean_distance)
cosine_distance = findCosineDistance(img1_representation, img2_representation)
cosine_distances.append(cosine_distance)
df['euclidean'] = euclidean_distances
df['cosine'] = cosine_distances
df.head()
df[df.Decision == "Yes"]['euclidean'].plot(kind='kde', title = 'euclidean', label = 'Yes', legend = True)
df[df.Decision == "No"]['euclidean'].plot(kind='kde', title = 'euclidean', label = 'No', legend = True)
plt.show()
df[df.Decision == "Yes"]['cosine'].plot(kind='kde', title = 'cosine', label = 'Yes', legend = True)
df[df.Decision == "No"]['cosine'].plot(kind='kde', title = 'cosine', label = 'No', legend = True)
plt.show()
#Repo: https://github.com/serengil/chefboost
#!pip install chefboost
from chefboost import Chefboost as chef
config = {'algorithm': 'C4.5'}
df[['euclidean', 'Decision']].head()
euclidean_tree = chef.fit(df[['euclidean', 'Decision']].copy(), config)
cosine_tree = chef.fit(df[['cosine', 'Decision']].copy(), config)
#stored in outputs/rules
euclidean_threshold = 17.212238311767578 #euclidean
cosine_threshold = 0.4668717384338379 #cosine
df['prediction_by_euclidean'] = 'No'
df['prediction_by_cosine'] = 'No'
df.loc[df[df['euclidean'] <= euclidean_threshold].index, 'prediction_by_euclidean'] = 'Yes'
df.loc[df[df['cosine'] <= cosine_threshold].index, 'prediction_by_cosine'] = 'Yes'
df.sample(5)
euclidean_positives = 0; cosine_positives = 0
for index, instance in df.iterrows():
target = instance['Decision']
prediction_by_euclidean = instance['prediction_by_euclidean']
prediction_by_cosine = instance['prediction_by_cosine']
if target == prediction_by_euclidean:
euclidean_positives = euclidean_positives + 1
if target == prediction_by_cosine:
cosine_positives = cosine_positives + 1
print("Accuracy (euclidean): ",round(100 * euclidean_positives/df.shape[0], 2))
print("Accuracy (cosine): ",round(100 * cosine_positives/df.shape[0], 2))
def verifyFaces(img1_path, img2_path):
print("Verify ",img1_path," and ",img2_path)
#------------------------------------
#detect and align
img1 = functions.preprocess_face(img1_path, target_size=input_shape)[0]
img2 = functions.preprocess_face(img2_path, target_size=input_shape)[0]
img1_blob = cv2.dnn.blobFromImage(img1)
img2_blob = cv2.dnn.blobFromImage(img2)
#------------------------------------
#representation
model.setInput(img1_blob)
img1_representation = model.forward()[0]
model.setInput(img2_blob)
img2_representation = model.forward()[0]
#------------------------------------
#verify
euclidean_distance = findEuclideanDistance(img1_representation, img2_representation)
print("Found euclidean distance is ",euclidean_distance," whereas required threshold is ",euclidean_threshold)
fig = plt.figure()
ax1 = fig.add_subplot(1,2,1)
plt.imshow(img1[:,:,::-1])
plt.axis('off')
ax2 = fig.add_subplot(1,2,2)
plt.imshow(img2[:,:,::-1])
plt.axis('off')
plt.show()
if euclidean_distance <= euclidean_threshold:
print("they are same person")
else:
print("they are not same person")
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img2.jpg")
verifyFaces("../deepface/tests/dataset/img54.jpg", "../deepface/tests/dataset/img3.jpg")
verifyFaces("../deepface/tests/dataset/img42.jpg", "../deepface/tests/dataset/img45.jpg")
verifyFaces("../deepface/tests/dataset/img9.jpg", "../deepface/tests/dataset/img49.jpg")
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img3.jpg")
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img45.jpg")
verifyFaces("../deepface/tests/dataset/img1.jpg", "../deepface/tests/dataset/img49.jpg")
| 0.713631 | 0.914482 |
## UCI ML Breast Cancer Classification
We attempt to understand the most accurate model that can learn from the given dataset and be able to predict well, any new input data to be either a "malignant" or "benign" example.
We'll be trying out various models spanning across LogReg, DTC, RFC, KNN and SVM.
### Importing Libraries
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
```
### Loading the data
```
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
names = ['id', 'clump_thickness', 'uniform_cell_size', 'uniform_cell_shape',
'marginal_adhesion', 'single_epithelial_size', 'bare_nuclei',
'bland_chromatin', 'normal_nucleoli', 'mitoses', 'class']
df = pd.read_csv(url, names=names).replace('?', np.nan).dropna()
df.drop(['id'], axis=1, inplace=True) #since, it won't contribute to classification.
df.head()
df.describe()
```
### Preparing the data
```
dataset = df
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
```
So now, the data looks clean and usable with nulls and id removed. Let's see if there's any high/low correlations amongst features.
```
from pandas.plotting import scatter_matrix
scatter_matrix(df, figsize=(15, 15))
df.drop(['class'], axis=1, inplace=True)
import seaborn as sb
# plotting correlation heatmap
dataplot=sb.heatmap(df.corr())
# displaying heatmap
plt.show()
```
### Splitting the dataset into the Training set and Test set
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
```
### Feature Scaling
```
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
```
## Decision Tree Classification
### Training the Decision Tree Classification model on the Training set
```
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
```
### Making the Confusion Matrix
```
y1_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y1_pred)
print(cm)
accuracy_score(y_test, y1_pred)
```
## KNN Classification
### Training the K-NN model on the Training set
```
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(X_train, y_train)
```
### Confusion Matrix
```
y2_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y2_pred)
print(cm)
accuracy_score(y_test, y2_pred)
```
## Kernel SVM Classifier
### Training the Kernel SVM
```
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
```
### Making the Confusion Matrix
```
y3_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y3_pred)
print(cm)
accuracy_score(y_test, y3_pred)
```
## Logistic Regression
### Training the model
```
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
```
### Results
```
y4_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y4_pred)
print(cm)
accuracy_score(y_test, y4_pred)
```
## Naive Bayes Classifier
```
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
y5_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y5_pred)
print(cm)
accuracy_score(y_test, y5_pred)
```
## Random Forest Classifier
```
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y6_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y6_pred)
print(cm)
accuracy_score(y_test, y6_pred)
```
So, after evaluating all models, we can safely conclude that the Decision Tree Classifier performed the best on this dataset with ~96% accuracy provided and it was followed by Random Forest Classifier at 95.32% (maybe can do better if we go further optimize the hyperparameters) and Kernel SVM ties at 3rd place with KNN and LogReg at 94.7% accuracy.
```
```
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import confusion_matrix, accuracy_score
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/breast-cancer-wisconsin.data'
names = ['id', 'clump_thickness', 'uniform_cell_size', 'uniform_cell_shape',
'marginal_adhesion', 'single_epithelial_size', 'bare_nuclei',
'bland_chromatin', 'normal_nucleoli', 'mitoses', 'class']
df = pd.read_csv(url, names=names).replace('?', np.nan).dropna()
df.drop(['id'], axis=1, inplace=True) #since, it won't contribute to classification.
df.head()
df.describe()
dataset = df
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
from pandas.plotting import scatter_matrix
scatter_matrix(df, figsize=(15, 15))
df.drop(['class'], axis=1, inplace=True)
import seaborn as sb
# plotting correlation heatmap
dataplot=sb.heatmap(df.corr())
# displaying heatmap
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y1_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y1_pred)
print(cm)
accuracy_score(y_test, y1_pred)
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(X_train, y_train)
y2_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y2_pred)
print(cm)
accuracy_score(y_test, y2_pred)
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0)
classifier.fit(X_train, y_train)
y3_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y3_pred)
print(cm)
accuracy_score(y_test, y3_pred)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(X_train, y_train)
y4_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y4_pred)
print(cm)
accuracy_score(y_test, y4_pred)
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)
y5_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y5_pred)
print(cm)
accuracy_score(y_test, y5_pred)
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
y6_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y6_pred)
print(cm)
accuracy_score(y_test, y6_pred)
| 0.723212 | 0.977457 |
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
# Motivation
We can view pretty much all of **machine learning (ML)** (and this is one of many possible views) as an **optimisation** exercise. Our challenge in supervised learning is to find a function that maps the inputs of a certain system to its outputs. Since we don't have direct access to that function, we have to estimate it. We aim to find the *best* possible estimate. Whenever we use the word "best" in mathematics, we imply some kind of optimisation. Thus we either maximise some **performance function**, which increases for better estimates, or minimise some **loss function**, which decreases for better estimates. In general, we refer to the function that we optimise as the **objective function**.
There are elements of both science and art in the choice of performance/loss functions. For now let us focus on optimisation itself.
# Univariate functions
From school many of us remember how to optimise functions of a single scalar variable — **univariate** functions, such as, for example,
$$f(x) = -2x^2 + 6x + 9.$$
In Python we would define this function as
```
def func(x): return -2. * x**2 + 6. * x + 9.
```
So we can pass values of $x$ to it as arguments and obtain the corresponding values $f(x)$ as the function's return value:
```
func(0.)
```
Whenever we are dealing with functions, it is always a good idea to visually examine their graphs:
```
xs = np.linspace(-10., 10., 100)
fs = [func(x) for x in xs]
plt.plot(xs, fs, 'o');
```
Unsurprisingly (if we remember high school mathematics), the graph of our univariate **quadratic** (because the highest power of $x$ in it comes as $x^2$) function is a **parabola**. We are lucky: this function is **concave** — if we join any two points on its graph, the straight line joining them will always lie below the graph. For such functions we can usually find the **global optimum** (**minimum** or **maximum**, in this case the function has a single **global maximum**).
# Global versus local optima
We say **global** optimum, because a function may have multiple optima. All of them are called **local** optima, but only the largest maxima (the smallest minima) are referred to as **global**.
Consider the function
$$f(x) = x \cos(x).$$
It has numerous local minima and local maxima over $x \in \mathbb{R}$, but no global minimum/maximum:
```
xs = np.linspace(-100., 100., 1000)
fs = xs * np.cos(xs)
plt.plot(xs, fs);
```
Now consider the function
$$f(x) = \frac{1}{x} \sin(x).$$
It has a single global maximum, two global minima, and infinitely many local maxima and minima.
```
xs = np.linspace(-100., 100., 1000)
fs = (1./xs) * np.sin(xs)
plt.plot(xs, fs);
```
# High school optimisation
Many of us remember from school this method of optimising functions. For our function, say
$$f(x) = -2x^2 + 6x + 9,$$
find the function's derivative. If we forgot how to differentiate functions, we can look up the rules of differentiation, say, on Wikipedia. In our example, differentiation is straightforward, and yields
$$\frac{d}{dx}f(x) = -4x + 6.$$
However, if we have completely forgotten the rules of differentiation, one particular Python library — the one for doing symbolic maths — comes in useful:
```
import sympy
x = sympy.symbols('x')
func_diff = sympy.diff(-2. * x**2 + 6. * x + 9, x)
func_diff
```
Our next step is to find such $x$ (we'll call it $x_{\text{max}}$), at which this derivative becomes zero. This notation is somewhat misleading, because it is $f(x_{\text{max}})$ that is maximum, not $x_{\text{max}}$ itself; $x_{\text{max}}$ is the *location* of the function's maximum:
$$\frac{d}{dx}f(x_{\text{max}}) = 0,$$
i.e.
$$-4x_{\text{max}} + 6 = 0.$$
Hence the solution is
$$x_{\text{max}} = -6 / (-4) = 3/2 = 1.5$$
We could also use SymPy to solve the above equation:
```
roots = sympy.solve(func_diff, x)
roots
x_max = roots[0]
```
In order to check that the value is indeed a local maximum and not a local minimum (and not a **saddle point**, look them up), we look at the second derivative of the function,
$$\frac{d^2}{dx^2}f(x_{\text{max}}) = -4.$$
Since this second derivative is negative at $x_{\text{max}}$, we are indeed looking at an (at least local) maximum. In this case we are lucky: this is also a global maximum. However, in general, it isn't easy to check mathematically whether an optimum global or not. This is one of the major challenges in optimisation.
Let us now find the value of the function at the maximum by plugging in $x_{\text{max}}$ into $f$:
$$f_{\text{max}} = f(x_{\text{max}}) = -2 x_{\text{max}}^2 + 6 x_{\text{max}} + 9 = -2 \cdot 1.5^2 + 6 \cdot 1.5 + 9 = 13.5.$$
```
f_max = func(x_max)
f_max
```
Let us label this maximum on the function's graph:
```
xs = np.linspace(-10., 10., 100)
fs = [func(x) for x in xs]
plt.plot(xs, fs, 'o')
plt.plot(x_max, f_max, 'o', color='red')
plt.axvline(x_max, color='red')
plt.axhline(f_max, color='red');
```
# Multivariate functions
So far we have considered the optimisation of **real-valued** functions of a single real variable, i.e. $f: \mathbb{R} \rightarrow \mathbb{R}$.
However, most functions that we encounter in data science and machine learning are **multivariate**, i.e. $f: \mathbb{R}^n \rightarrow \mathbb{R}$. Moreover, some are also **multivalued**, i.e. $f: \mathbb{R}^n \rightarrow \mathbb{R}^m$.
(Note: univariate/multivariate refers to the function's argument, whereas single-valued/multi-valued to the function's output.)
Consider, for example, the following single-valued, multivariate function:
$$f(x_1, x_2) = -x_1^2 - x_2^2 + 6x_1 + 3x_2 + 9.$$
We could define it in Python as
```
def func(x1, x2): return -x1**2 - x2**2 + 6.*x1 + 3.*x2 + 9.
```
Let's plot its graph. First, we need to compute the values of the function on a two-dimensional mesh grid:
```
x1s, x2s = np.meshgrid(np.linspace(-100., 100., 100), np.linspace(-100., 100., 100))
fs = func(x1s, x2s)
np.shape(fs)
```
Then we can use the following code to produce a 3D plot:
```
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.contour3D(x1s, x2s, fs, 50);
```
It may be more convenient to implement multivariate functions as functions of a single vector (more precisely, rank-1 NumPy array) in Python:
```
def func(x): return -x[0]**2 - x[1]**2 + 6.*x[0] + 3.*x[1] + 9.
```
# Optimising multivariate functions analytically
The analytical method of finding the optimum of a multivariate function is similar to that for univariate functions. As the function has mutliple arguments, we need to find its so-called **partial derivative** with respect to each argument. They are computed similarly to normal derivatives, while pretending that all the other arguments are constants:
$$\frac{\partial}{\partial x_1} f(x_1, x_2) = -2x_1 + 6,$$
$$\frac{\partial}{\partial x_2} f(x_1, x_2) = -2x_2 + 3.$$
We call the vector of the function's partial derivatives its **gradient** vector, or **grad**:
$$\nabla f(x_1, x_2) = \begin{pmatrix} \frac{\partial}{\partial x_1} f(x_1, x_2) \\ \frac{\partial}{\partial x_2} f(x_1, x_2) \end{pmatrix}.$$
When the function is continuous and differentiable, all the partial derivatives will be 0 at a local maximum or minimum point. Saying that all the partial derivatives are zero at a point, $(x_1^*, x_2^*)$, is the same as saying the gradient at that point is the zero vector:
$$\nabla f(x_1^*, x_2^*) = \begin{pmatrix} \frac{\partial}{\partial x_1} f(x_1^*, x_2^*) \\ \frac{\partial}{\partial x_2} f(x_1^*, x_2^*) \end{pmatrix} = \begin{pmatrix} 0 \\ 0 \end{pmatrix} = \mathbf{0}.$$
In our example, we can easily establish that the gradient vector is zero at $x_1^* = 3$, $x_2^* = 1.5$. And the maximum value that is achieved is
```
func([3, 1.5])
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.contour3D(x1s, x2s, fs, 50)
ax.plot([3], [1.5], [20.25], 'o', color='red', markersize=20);
```
# The Jacobian
Notice that, for multivalued (not just multivariate) functions, $\mathbb{R}^n \rightarrow \mathbb{R}^m$, the **gradient** vector of partial derivatives generalises to the **Jacobian** matrix:
$$\mathbf{J} = \begin{pmatrix} \frac{\partial f_1}{\partial x_1} & \frac{\partial f_1}{\partial x_2} & \cdots & \frac{\partial f_1}{\partial x_n} \\ \vdots & \vdots & \ddots & \vdots \\ \frac{\partial f_m}{\partial x_1} & \frac{\partial f_m}{\partial x_2} & \cdots & \frac{\partial f_m}{\partial x_n} \end{pmatrix}.$$
# Newton-Raphson's method
**Newton-Raphson's method** is a numerical procedure for finding zeros (**roots**) of functions.
For example, consider again the function
$$f(x) = -2x^2 + 6x + 9.$$
```
def func(x): return -2. * x**2 + 6. * x + 9.
```
We have already found that its derivative is given by
$$\frac{df}{dx}(x) = -4x + 6.$$
```
def func_diff(x): return -4. * x + 6.
```
The Newton-Raphson method starts with some initial guess, $x_0$, and then proceeds iteratively:
$$x_{n+1} = x_n - \frac{f(x_n)}{\frac{d}{dx}f(x_n)}$$
Let's code it up:
```
def newton_raphson_method(f, fdiff, x0, iter_count=10):
x = x0
print('x_0', x0)
for i in range(iter_count):
x = x - f(x) / fdiff(x)
print('x_%d' % (i+1), x)
return x
```
Now let's apply it to our function:
```
newton_raphson_method(func, func_diff, -5.)
```
We see that the method converges quite quickly to (one of the) roots. Notice that, which of the two roots we converge to depends on the initial guess:
```
newton_raphson_method(func, func_diff, x0=5.)
```
**Newton-Raphson** is a **root finding**, not an **optimisation**, algorithm. However, recall that optimisation is equivalent to finding the root of the derivative function. Thus we can apply this algorithm to the derivative function (we also need to provide the second derivative function) to find a local optimum of the function:
```
def func_diff2(x): return -4.
newton_raphson_method(func_diff, func_diff2, -5.)
```
The result is consistent with our analytical solution.
# Newton's method for multivariate functions
Newton's method can be generalised to mutlivariate functions. For multivalued multivariate functions $f: \mathbb{R}^k \rightarrow \mathbb{R}^k$, the method becomes
$$x_{n+1} = x_n - \mathbf{J}(x_n)^{-1} f(x_n),$$
where $\mathbf{J}$ is the Jacobian.
Since inverses are only defined for square matrices, for functions $f: \mathbb{R}^k \rightarrow \mathbb{R}^m$, we use the Moore-Penrose pseudoinverse $\mathbf{J}^+ = (\mathbf{J}^T \mathbf{J})^{-1} \mathbf{J}^T$ instead of $\mathbf{J}^{-1}$. Let's code this up.
Inside our generalised implementation of Newton-Raphson, we'll be working with vectors. It's probably a good idea to assume that the function and the Jacobian return rank-2 NumPy arrays.
However, one may have coded up the function as
```
def func(x): return -x[0]**2 - x[1]**2 + 6.*x[0] + 3.*x[1] + 9.
```
and the Jacobian as
```
def func_diff(x): return np.array([-2.*x[0] + 6., -2.*x[1] + 3.])
```
Let's see how we can convert NumPy stuff to rank-2 arrays. For rank-1 arrays:
```
a = np.array([3., 5., 7.])
np.reshape(a, (np.shape(a)[0], -1))
```
if we want a column (rather than row) vector, which is probably a sensible default. If we wanted a row vector, we could do
```
np.reshape(a, (-1, np.shape(a)[0]))
```
Existing rank-2 arrays remain unchanged by this:
```
a = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
np.reshape(a, (np.shape(a)[0], -1))
np.reshape(a, (-1, np.shape(a)[0]))
```
For scalars, `np.shape(a)[0]` won't work, as their shape is `()`, so we need to do something special. Based on this information, let us implement the auxiliary function `to_rank_2`:
```
def to_rank_2(arg, row_vector=False):
shape = np.shape(arg)
size = 1 if len(shape) == 0 else shape[0]
new_shape = (-1, size) if row_vector else (size, -1)
return np.reshape(arg, new_shape)
```
And test it:
```
to_rank_2(5.)
to_rank_2([1., 2., 3.])
to_rank_2([[1.], [2.], [3.]])
to_rank_2([[1., 2., 3.]])
to_rank_2([[1., 2., 3], [4., 5., 6.]])
```
Now let's generalise our implementation of the Newton-Raphson method:
```
def newton_raphson_method(f, fdiff, x0, iter_count=10):
x = to_rank_2(x0)
for i in range(iter_count):
f_x = to_rank_2(f(x))
fdiff_x = to_rank_2(fdiff(x), row_vector=True)
non_square_jacobian_inv = np.dot(np.linalg.inv(np.dot(fdiff_x.T, fdiff_x)), fdiff_x.T)
x = x - np.dot(non_square_jacobian_inv, f_x)
print('x_%d' % (i+1), x)
return x
newton_raphson_method(func, func_diff, np.array([-10., -10.]), iter_count=5)
func_diff([-80.25, 25.125])
```
**NB! TODO: The above doesn't seem to work at the moment. The returned optimum is wrong. Can you spot a problem with the above implementation?**
# Quasi-Newton method
In practice, we may not always have access to the Jacobian of a function. There are numerical methods, known as **quasi-Newton methods**, which approximate the Jacobian numerically.
One such method is the **Broyden-Fletcher-Goldfarb-Shanno (BFGS)** algorithm. It is generally a bad idea to implement these algorithms by hand, since their implementations are often nuanced and nontrivial.
Fortunately, Python libraries provide excellent implementations of optimisation algorithms.
Let us use SciPy to optimise our function.
Remember that to maximise a function we simply minimise its negative, which is what we achieve with the Python lambda below:
```
import scipy.optimize
scipy.optimize.minimize(lambda x: -func(x), np.array([-80., 25.]), method='BFGS')
```
# Grid search
What we have considered so far isn't the most straightforward optimisation procedure. A natural first thing to do is often the **grid search**.
In grid search, we pick a subset of the parameter search, usually a rectangular grid, evaluate the value at each grid point and pick the point where the function is largest (smallest) as the approximate location of the maximum (minimum).
As a by-product of the grid search we get a heat-map — an excellent way of visualising the magnitude of the function on the parameter space.
If we have more than two parameters, we can produce heatmaps for each parameter pair. (E.g., for a three-dimensional function, $(x_1, x_2)$, $(x_1, x_3)$, $(x_2, x_3)$.)
Grid search is often useful for **tuning** machine learning **hyperparameters** and finding optimal values for trading (and other) strategies, in which case a single evaluation of the objective function may correspond to a single backtest run over all available data.
Let us use the following auxiliary function from https://matplotlib.org/gallery/images_contours_and_fields/image_annotated_heatmap.html
```
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def func(x1, x2): return -x1**2 - x2**2 + 6.*x1 + 3.*x2 + 9.
x1s_ = np.linspace(-100., 100., 10)
x2s_ = np.linspace(-100., 100., 10)
x1s, x2s = np.meshgrid(x1s_, x2s_)
fs = func(x1s, x2s)
np.shape(fs)
heatmap(fs, x1s_, x2s_)[0];
```
# Random search
Sometimes a **random search** may be preferred over grid search. This also enables us to incorporate our guess — a prior distribution — of the location of the optimum, so we can sample the parameter points from that prior distribution and evaluate the values of the function at those points.
Both **grid search** and **random search** are the so-called **embarrassingly parallel** methods and are trivial to parallelise, either over multiple cores on a single machine or over a cluster/cloud.
In general, it is suboptimal to explore a hypercube of the parameter space by systematically going through each point in a grid. Sobol sequences give the optimal sequence of points to try — see Sergei Kucherenko's work in this area.
# Stochastic and batch gradient descent
When working with **aritificial neural networks (ANNs)** we usually prefer the **stochastic** and **batch gradient descent methods** over the quasi-Newton methods. We will examine these methods when we introduce ANNs.
|
github_jupyter
|
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
def func(x): return -2. * x**2 + 6. * x + 9.
func(0.)
xs = np.linspace(-10., 10., 100)
fs = [func(x) for x in xs]
plt.plot(xs, fs, 'o');
xs = np.linspace(-100., 100., 1000)
fs = xs * np.cos(xs)
plt.plot(xs, fs);
xs = np.linspace(-100., 100., 1000)
fs = (1./xs) * np.sin(xs)
plt.plot(xs, fs);
import sympy
x = sympy.symbols('x')
func_diff = sympy.diff(-2. * x**2 + 6. * x + 9, x)
func_diff
roots = sympy.solve(func_diff, x)
roots
x_max = roots[0]
f_max = func(x_max)
f_max
xs = np.linspace(-10., 10., 100)
fs = [func(x) for x in xs]
plt.plot(xs, fs, 'o')
plt.plot(x_max, f_max, 'o', color='red')
plt.axvline(x_max, color='red')
plt.axhline(f_max, color='red');
def func(x1, x2): return -x1**2 - x2**2 + 6.*x1 + 3.*x2 + 9.
x1s, x2s = np.meshgrid(np.linspace(-100., 100., 100), np.linspace(-100., 100., 100))
fs = func(x1s, x2s)
np.shape(fs)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.contour3D(x1s, x2s, fs, 50);
def func(x): return -x[0]**2 - x[1]**2 + 6.*x[0] + 3.*x[1] + 9.
func([3, 1.5])
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.contour3D(x1s, x2s, fs, 50)
ax.plot([3], [1.5], [20.25], 'o', color='red', markersize=20);
def func(x): return -2. * x**2 + 6. * x + 9.
def func_diff(x): return -4. * x + 6.
def newton_raphson_method(f, fdiff, x0, iter_count=10):
x = x0
print('x_0', x0)
for i in range(iter_count):
x = x - f(x) / fdiff(x)
print('x_%d' % (i+1), x)
return x
newton_raphson_method(func, func_diff, -5.)
newton_raphson_method(func, func_diff, x0=5.)
def func_diff2(x): return -4.
newton_raphson_method(func_diff, func_diff2, -5.)
def func(x): return -x[0]**2 - x[1]**2 + 6.*x[0] + 3.*x[1] + 9.
def func_diff(x): return np.array([-2.*x[0] + 6., -2.*x[1] + 3.])
a = np.array([3., 5., 7.])
np.reshape(a, (np.shape(a)[0], -1))
np.reshape(a, (-1, np.shape(a)[0]))
a = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
np.reshape(a, (np.shape(a)[0], -1))
np.reshape(a, (-1, np.shape(a)[0]))
def to_rank_2(arg, row_vector=False):
shape = np.shape(arg)
size = 1 if len(shape) == 0 else shape[0]
new_shape = (-1, size) if row_vector else (size, -1)
return np.reshape(arg, new_shape)
to_rank_2(5.)
to_rank_2([1., 2., 3.])
to_rank_2([[1.], [2.], [3.]])
to_rank_2([[1., 2., 3.]])
to_rank_2([[1., 2., 3], [4., 5., 6.]])
def newton_raphson_method(f, fdiff, x0, iter_count=10):
x = to_rank_2(x0)
for i in range(iter_count):
f_x = to_rank_2(f(x))
fdiff_x = to_rank_2(fdiff(x), row_vector=True)
non_square_jacobian_inv = np.dot(np.linalg.inv(np.dot(fdiff_x.T, fdiff_x)), fdiff_x.T)
x = x - np.dot(non_square_jacobian_inv, f_x)
print('x_%d' % (i+1), x)
return x
newton_raphson_method(func, func_diff, np.array([-10., -10.]), iter_count=5)
func_diff([-80.25, 25.125])
import scipy.optimize
scipy.optimize.minimize(lambda x: -func(x), np.array([-80., 25.]), method='BFGS')
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im, cbar
def func(x1, x2): return -x1**2 - x2**2 + 6.*x1 + 3.*x2 + 9.
x1s_ = np.linspace(-100., 100., 10)
x2s_ = np.linspace(-100., 100., 10)
x1s, x2s = np.meshgrid(x1s_, x2s_)
fs = func(x1s, x2s)
np.shape(fs)
heatmap(fs, x1s_, x2s_)[0];
| 0.657978 | 0.995395 |
# Introduction
Understanding the behavior of neural networks and why they generalize has been a central pursuit of the theoretical deep learning community.
Our paper [*A Fine-Grained Spectral Perspective on Neural Networks*](https://arxiv.org/abs/1907.10599) attacks this problem by looking at eigenvalues and eigenfunctions, as the name suggests.
We will study the spectra of the *Conjugate Kernel* [[Daniely et al. 2017](http://papers.nips.cc/paper/6427-toward-deeper-understanding-of-neural-networks-the-power-of-initialization-and-a-dual-view-on-expressivity.pdf)], or CK (also called the *Neural Network-Gaussian Process Kernel* [[Lee et al. 2018](http://arxiv.org/abs/1711.00165)]), and the *Neural Tangent Kernel*, or NTK [[Jacot et al. 2018](http://arxiv.org/abs/1806.07572)].
Roughly, the CK and the NTK tell us respectively "what a network looks like at initialization" and "what a network looks like during and after training."
Their spectra then encode valuable information about the initial distribution and the training and generalization properties of neural networks.
## Intuition for the utility of the spectral perspective
Let's take the example of the CK.
We know from [Lee et al. (2018)](http://arxiv.org/abs/1711.00165) that a randomly initialized network is distributed as a Gaussian process $\mathcal N(0, K)$, where $K$ is the corresponding CK, in the infinite-width limit.
If we have the eigendecomposition
\begin{equation}
K = \sum_{i \ge 1} \lambda_i u_i\otimes u_i
\label{eqn:eigendecomposition}
\end{equation}
with eigenvalues $\lambda_i$ in decreasing order and corresponding eigenfunctions $u_i$, then each sample from this GP can be obtained as
$$
\sum_{i \ge 1} \sqrt{\lambda_i} \omega_i u_i,\quad
\omega_i \sim \mathcal N(0, 1).
$$
Training the last layer of a randomly initialized network via full batch gradient descent for an infinite amount of time corresponds to Gaussian process inference with kernel $K$ [[Lee et al. 2018](http://arxiv.org/abs/1711.00165), [2019](http://arxiv.org/abs/1902.06720)].
Thus, the more the GP prior (governed by the CK) is consistent with the ground truth function $f^*$, the more we expect the Gaussian process inference and GD training to generalize well.
We can measure this consistency in the "alignment" between the eigenvalues $\lambda_i$ and the squared coefficients $a_i^2$ of $f^*$'s expansion in the $\{u_i\}_i$ basis.
The former can be interpreted as the expected magnitude (squared) of the $u_i$-component of a sample $f \sim \mathcal N(0, K)$, and the latter can be interpreted as the actual magnitude squared of such component of $f^*$.
Here and in this paper, we will investigate an even cleaner setting where $f^* = u_i$ is an eigenfunction.
Thus we would hope to use a kernel whose $i$th eigenvalue $\lambda_i$ is as large as possible.
A similar intuition holds for NTK, because training all parameters of the network for an infinite amount of time yields the mean prediction of the GP $\mathcal N(0, \text{NTK})$ in expectation [[Lee et al. 2019](http://arxiv.org/abs/1902.06720)].
## A brief summary of the spectral theory of CK and NTK
Now, if the CK and the NTK have spectra difficult to compute, then this perspective is not so useful.
But in idealized settings, where the data distribution is uniform over the boolean cube, the sphere, or from the standard Gaussian, a complete (or almost complete in the Gaussian case) eigendecomposition of the kernel can be obtained, thanks to the symmetry of the domain.
Here and in the paper, we focus on the boolean cube, since in high dimensions, all three distributions are very similar, and the boolean cube eigenvalues are much easier to compute (see paper for more details).
We briefly summarize the spectral theory of CK and NTK (of multilayer perceptrons, or MLPs) on the boolean cube.
First, these kernels are always diagonalized by the *boolean Fourier basis*, which are just monomial functions like $x_1 x_3 x_{10}$.
These Fourier basis functions are naturally graded by their *degree*, ranging from 0 to the dimension $d$ of the cube.
Then the kernel has $d+1$ unique eigenvalues,
$$\mu_0, \ldots, \mu_d$$
corresponding to each of the degrees, so that the eigenspace associated to $\mu_k$ is a $\binom d k$ dimensional space of monomials with degree $k$.
These eigenvalues are simple linear functions of a small number of the kernel values, and can be easily computed.
Let's try computing them ourselves!
# Computing Eigenvalues over a Grid of Hyperparameters
```
import numpy as np
import scipy as sp
from scipy.special import erf as erf
import matplotlib.pyplot as plt
from itertools import product
import seaborn as sns
sns.set()
from mpl_toolkits.axes_grid1 import ImageGrid
def tight_layout(plt):
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
```
Our methods for doing the theoretical computations lie in the `theory` module.
```
from theory import *
```
First, let's compute the eigenvalues of erf CK and NTK over a large range of hyperparameters:
- $\sigma_w^2 \in [1, 5]$
- $\sigma_b^2 \in [0, 4]$
- dimension 128 boolean cube
- depth up to 128
- degree $k \le 8$.
Unless stated otherwise, all plots below use these hyperparameters.
We will do the same for relu kernels later.
```
# range of $\sigma_b^2$
erfvbrange = np.linspace(0, 4, num=41)
# range of $\sigma_w^2$
erfvwrange = np.linspace(1, 5, num=41)
erfvws, erfvbs = np.meshgrid(erfvwrange, erfvbrange, indexing='ij')
# `dim` = $d$
dim = 128
depth = 128
# we will compute the eigenvalues $\mu_k$ for $k = 0, 1, ..., maxdeg$.
maxdeg = 8
```
As mentioned in the paper, any CK or NTK $K$ of multilayer perceptrons (MLPs) takes the form
$$K(x, y) = \Phi\left(\frac{\langle x, y \rangle}{\|x\|\|y\|}, \frac{\|x\|^2}d, \frac{\|y\|^2}d\right)$$
for some function $\Phi: \mathbb R^3 \to \mathbb R$.
On the boolean cube $\{1, -1\}^d$, $\|x\|^2 = d$ for all $x$, and $\langle x, y \rangle / d$ takes value in a discrete set $\{-1, -1+2/d, \ldots, 1-2/d, 1\}$.
Thus $K(x, y)$ only takes a finite number of different values as well.
We first compute these values (see paper for the precise formulas).
```
# `erfkervals` has two entries, with keys `cks` and `ntks`.
# Each entry is an array with shape (`depth`, len(erfvwrange), len(erfvbrange), `dim`+1)
# The last dimension carries the entries $\Phi(-1), \Phi(-1 + 2/d), ..., \Phi(1)$
erfkervals = boolcubeFgrid(dim, depth, erfvws, erfvbs, VErf, VDerErf)
erfkervals['cks'].shape
```
The eigenvalues $\mu_k, k = 0, 1, \ldots, d$, can be expressed as a simple linear function of $\Phi$'s values, as hinted before.
However, a naive evaluation would lose too much numerical precision because the number of alternating terms.
Instead, we do something more clever, resulting in the following algorithm:
- For $\Delta = 2/d$, we first evaluate $\Phi^{(a)}(x) = \frac 1 2 \left(\Phi^{(a-1)}(x) - \Phi^{(a-1)}(x - \Delta)\right)$ with base case $\Phi^{(0)} = \Phi$, for $a = 0, 1, \ldots$, and for various values of $x$.
- Then we just sum a bunch of nonnegative terms to get the eigenvalue $\mu_k$ associated to degree $k$ monomials
$$\mu_k = \frac 1{2^{d-k}} \sum_{r=0}^{d-k}\binom{d-k}r \Phi^{(k)}(1 - r \Delta).$$
We will actually use an even more clever algorithm here, but with the same line of the reasoning; see the paper and the `twostep` option in the source code for details.
Note that, here we will compute *normalized eigenvalues*, normalized by their trace.
So these normalized eigenvalues, with multiplicity, should sum up to 1.
```
erfeigs = {}
# `erfeigs['ck']` is an array with shape (`maxdeg`, `depth`+1, len(erfvwrange), len(erfvbrange))
# `erfeigs['ck'][k, L] is the matrix of eigenvalue $\mu_k$ for a depth $L$ erf network,
# as a function of the values of $\sigma_w^2, \sigma_b^2$ in `erfvwrange` and `erfvbrange`
# Note that these eigenvalues are normalized by the trace
# (so that all normalized eigenvalues sum up to 1)
erfeigs['ck'] = relu(boolCubeMuAll(dim, maxdeg, erfkervals['cks']))
# similarly for `erfeigs['ntk']`
erfeigs['ntk'] = relu(boolCubeMuAll(dim, maxdeg, erfkervals['ntks']))
```
To perform fine-grained analysis of how hyperparameters affects the performance of the kernel and thus the network itself, we use a heuristic, the *fractional variance*, defined as
$$
\text{degree $k$ fractional variance} = \frac{\binom d k \mu_k}{\sum_{i=0}^d \binom d i \mu_i}.
$$
This terminology comes from the fact that, if we were to sample a function $f$ from a Gaussian process with kernel $K$, then we expect that $r\%$ of the total variance of $f$ comes from degree $k$ components of $f$, where $r\%$ is the degree $k$ fractional variance.
If we were to try to learn a homogeneous degree-$k$ polynomial using a kernel $K$, intuitively we should try to choose $K$ such that its $\mu_k$ is maximized.
In the paper, we present empirical evidence that fractional variance is indeed inversely correlated with test loss.
So let's compute them.
```
# `erfeigs['ckfracvar']` is an array with shape (`maxdeg`, `depth`+1, len(erfvwrange), len(erfvbrange))
# just like `erfeigs['ck']`
erfeigs['ckfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* erfeigs['ck']
)
# Same thing here
erfeigs['ntkfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* erfeigs['ntk']
)
erfeigs['ckfracvar'].shape
```
Similarly, let's compute the eigenvalues of ReLU CK and NTK over a large range of hyperparameters:
- $\sigma_w^2 = 2$
- $\sigma_b^2 \in [0, 4]$
- dimension 128 boolean cube
- depth up to 128
- degree $k \le 8$.
Unless stated otherwise, all plots below use these hyperparameters.
```
reluvws, reluvbs = np.meshgrid([2], np.linspace(0, 4, num=401), indexing='ij')
dim = 128
depth = 128
maxdeg = 8
relukervals = boolcubeFgrid(dim, depth, reluvws, reluvbs, VReLU, VStep)
relueigs = {}
relueigs['ck'] = relu(boolCubeMuAll(dim, maxdeg, relukervals['cks']))
relueigs['ntk'] = relu(boolCubeMuAll(dim, maxdeg, relukervals['ntks']))
relueigs['ckfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* relueigs['ck']
)
relueigs['ntkfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* relueigs['ntk']
)
```
Now we have computed all the eigenvalues, let's take a look at them!
# Deeper Networks Learn More Complex Features --- But Not Too Deep
If $K$ were to be the CK or NTK of a relu or erf MLP, then we find that for higher $k$, depth of the network helps increase $\mu_k$.
```
maxdeg = 8
plt.figure(figsize=(12, 4))
relueigs['ntkbestdepth'] = np.argmax(np.max(relueigs['ntk'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
relueigs['ckbestdepth'] = np.argmax(np.max(relueigs['ck'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
fig = plt.subplot(141)
plt.text(-.2, -.15, '(a)', fontsize=24, transform=fig.axes.transAxes)
plt.plot(np.arange(1, maxdeg+1), relueigs['ntkbestdepth'], label='ntk', markersize=4, marker='o')
plt.plot(np.arange(1, maxdeg+1), relueigs['ckbestdepth'], label='ck', markersize=4, marker='o')
plt.legend()
plt.xlabel('degree')
plt.ylabel('optimal depth')
plt.title('relu kernel optimal depths')
erfeigs['ntkbestdepth'] = np.argmax(np.max(erfeigs['ntk'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
erfeigs['ckbestdepth'] = np.argmax(np.max(erfeigs['ck'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
fig = plt.subplot(142)
plt.text(-.2, -.15, '(b)', fontsize=24, transform=fig.axes.transAxes)
plt.plot(np.arange(1, maxdeg+1), erfeigs['ntkbestdepth'], label='ntk', markersize=4, marker='o')
plt.plot(np.arange(1, maxdeg+1), erfeigs['ckbestdepth'], label='ck', markersize=4, marker='o')
plt.legend()
plt.xlabel('degree')
plt.title('erf kernel optimal depths')
fig = plt.subplot(143)
plt.text(-.5, -.15, '(c)', fontsize=24, transform=fig.axes.transAxes)
plt.imshow(relueigs['ntkfracvar'][3:, :20, 0, 0].T, aspect=12/20, origin='lower', extent=[2.5, 8.5, -.5, 20.5])
cb = plt.colorbar()
plt.xticks(range(3, 9, 2))
plt.xlabel('degree')
plt.ylabel('depth')
plt.grid()
plt.title(u'relu ntk, $\sigma_b^2=0$')
fig = plt.subplot(144)
plt.text(-.5, -.15, '(d)', fontsize=24, transform=fig.axes.transAxes)
plt.imshow(erfeigs['ntkfracvar'][3:, :, 0, 0].T, aspect=12/129, origin='lower', extent=[2.5, 8.5, -.5, 128.5], vmin=0, vmax=0.21)
cb = plt.colorbar()
cb.set_label('fractional variance')
plt.xticks(range(3, 9, 2))
plt.xlabel('degree')
plt.grid()
plt.title(u'erf ntk, $\sigma_w^2=1$, $\sigma_b^2=0$')
tight_layout(plt)
```
In **(a)** and **(b)** above, we plot, for each degree $k$, the depth that (with some combination of other hyperparameters like $\sigma_b^2$) maximizes degree $k$ fractional variance, for respectively relu and erf kernels.
Clearly, the maximizing depths are increasing with $k$ for relu, and also for erf when considering either odd $k$ or even $k$ only.
The slightly differing behavior between even and odd $k$ is expected, as seen in the form of Theorem 4.1 in the paper.
Note the different scales of y-axes for relu and erf --- the depth effect is much stronger for erf than relu.
For relu NTK and CK, $\sigma_b^2=0$ maximizes fractional variance in general, and the same holds for erf NTK and CK in the odd degrees (see our other notebook, [TheCompleteHyperparameterPicture]()).
In **(c)** and **(d)**, we give a more fine-grained look at the $\sigma_b^2=0$ slice, via heatmaps of fractional variance against degree and depth.
Brighter color indicates higher variance, and we see the optimal depth for each degree $k$ clearly increases with $k$ for relu NTK, and likewise for odd degrees of erf NTK.
However, note that as $k$ increases, the difference between the maximal fractional variance and those slightly suboptimal becomes smaller and smaller, reflected by suppressed range of color moving to the right.
The heatmaps for relu and erf CKs look similar (compute them yourself, as an exercise!).
In the paper, this trend of increasing optimal depth is backed up empirical data from training neural networks to learn polynomials of various degrees.
Note that implicit in our results here is a highly nontrivial observation:
Past some point (the *optimal depth*), high depth can be detrimental to the performance of the network, beyond just the difficulty to train, and this detriment can already be seen in the corresponding NTK or CK.
In particular, it's *not* true that the optimal depth is infinite.
This adds significant nuance to the folk wisdom that "depth increases expressivity and allows neural networks to learn more complex features."
# NTK Favors More Complex Features Than CK
We generally find the degree $k$ fractional variance of NTK to be higher than that of CK when $k$ is large, and vice versa when $k$ is small.
```
plt.figure(figsize=(14, 4))
def convert2vb(i):
return i/100
fig = plt.subplot(131)
plt.text(-.15, -.15, '(a)', fontsize=24, transform=fig.axes.transAxes)
cpal = sns.color_palette()
for i, (depth, vbid) in enumerate([(1, 10), (128, 300), (3, 0)]):
color = cpal[i]
plt.plot(relueigs['ntkfracvar'][:, depth, 0, vbid], c=color, label='{} | {}'.format(depth, convert2vb(vbid)), marker='o', markersize=4)
plt.plot(relueigs['ckfracvar'][:, depth, 0, vbid], '--', c=color, marker='o', markersize=4)
plt.plot([], c='black', label='ntk')
plt.plot([], '--', c='black', label='ck')
plt.legend(title=u'depth | $\sigma_b^2$')
plt.xlabel('degree')
plt.ylabel('fractional variance')
plt.title('relu examples')
plt.semilogy()
def convert2vb(i):
return i/10
def convert2vw(i):
return i/10 + 1
cpal = sns.color_palette()
fig = plt.subplot(132)
plt.text(-.15, -.15, '(b)', fontsize=24, transform=fig.axes.transAxes)
for i, (depth, vwid, vbid) in enumerate([(1, 10, 1),(24, 0, 1), (1, 40, 40)]):
color = cpal[i]
plt.plot(erfeigs['ntkfracvar'][:, depth, vwid, vbid], c=color,
label='{} | {} | {}'.format(depth, int(convert2vw(vwid)), convert2vb(vbid)), marker='o', markersize=4)
plt.plot(erfeigs['ckfracvar'][:, depth, vwid, vbid], '--', c=color, marker='o', markersize=4)
plt.plot([], c='black', label='ntk')
plt.plot([], '--', c='black', label='ck')
plt.legend(title=u'depth | $\sigma_w^2$ | $\sigma_b^2$')
plt.xlabel('degree')
plt.title('erf examples')
plt.semilogy()
fig = plt.subplot(133)
plt.text(-.15, -.15, '(c)', fontsize=24, transform=fig.axes.transAxes)
# relu
balance = np.mean((relueigs['ntk'] > relueigs['ck']) & (relueigs['ntk'] > 1e-15), axis=(1, 2, 3))
# needed to filter out all situations where eigenval is so small that it's likely just 0
balance /= np.mean((relueigs['ntk'] > 1e-15), axis=(1, 2, 3))
plt.plot(np.arange(0, maxdeg+1), balance, marker='o', label='relu')
# erf
balance = np.mean((erfeigs['ntk'] > erfeigs['ck']) & (erfeigs['ntk'] > 1e-15), axis=(1, 2, 3))
# needed to filter out all situations where eigenval is so small that it's likely just 0
balance /= np.mean((erfeigs['ntk'] > 1e-15), axis=(1, 2, 3))
plt.plot(np.arange(0, maxdeg+1), balance, marker='o', label='erf')
plt.xlabel('degree')
plt.ylabel('fraction')
plt.legend()
plt.title('fraction of hyperparams where ntk > ck')
plt.suptitle('ntk favors higher degrees compared to ck')
tight_layout(plt)
```
In **(a)**, we give several examples of the fractional variance curves for relu CK and NTK across several representative hyperparameters.
In **(b)**, we do the same for erf CK and NTK.
In both cases, we clearly see that, while for degree 0 or 1, the fractional variance is typically higher for CK, the reverse is true for larger degrees.
In **(c)**, for each degree $k$, we plot the *fraction of hyperparameters* where the degree $k$ fractional variance of NTK is greater than that of CK.
Consistent with previous observations, this fraction increases with the degree.
This means that, if we train only the last layer of a neural network (i.e. CK dynamics), we intuitively should expect to learn *simpler* features faster and generalize better, while, if we train all parameters of the network (i.e. NTK dynamics), we should expect to learn more *complex* features faster and generalize better.
Similarly, if we were to sample a function from a Gaussian process with the CK as kernel (recall this is just the distribution of randomly initialized infinite width MLPs), this function is more likely to be accurately approximated by low degree polynomials than the same with the NTK.
Again, in the paper, we present empirical evidence that, training the last layer is better than training all layers only for degree 0 polynomials, i.e. constant functions, but is worse for all higher degree (homogenous) polynomials.
This corroborates the observations made from the spectra here.
# Conclusion
We have replicated the spectral plots of section 5 and 6 in the paper, concerning the generalization properties of neural networks.
As one can see, the spectral perspective is quite useful for understanding the effects of hyperparameters.
A complete picture of how the combination of $\sigma_w^2, \sigma_b^2$, depth, degree, and nonlinearity affects fractional variance is presented in the notebook *[The Complete Hyperparameter Picture](TheCompleteHyperparameterPicture.ipynb)*.
The fractional variance is, however, by no means a perfect indicator of generalization, and there's plenty of room for improvement, as mentioned in the main text.
We hope a better predictor of test loss can be obtained in future works.
Another interesting topic that spectral analysis can shed light on is the so-called "simplicity bias" of neural networks.
We discuss this in the notebook *[Clarifying Simplicity Bias](ClarifyingSimplicityBias.ipynb)*.
|
github_jupyter
|
import numpy as np
import scipy as sp
from scipy.special import erf as erf
import matplotlib.pyplot as plt
from itertools import product
import seaborn as sns
sns.set()
from mpl_toolkits.axes_grid1 import ImageGrid
def tight_layout(plt):
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
from theory import *
# range of $\sigma_b^2$
erfvbrange = np.linspace(0, 4, num=41)
# range of $\sigma_w^2$
erfvwrange = np.linspace(1, 5, num=41)
erfvws, erfvbs = np.meshgrid(erfvwrange, erfvbrange, indexing='ij')
# `dim` = $d$
dim = 128
depth = 128
# we will compute the eigenvalues $\mu_k$ for $k = 0, 1, ..., maxdeg$.
maxdeg = 8
# `erfkervals` has two entries, with keys `cks` and `ntks`.
# Each entry is an array with shape (`depth`, len(erfvwrange), len(erfvbrange), `dim`+1)
# The last dimension carries the entries $\Phi(-1), \Phi(-1 + 2/d), ..., \Phi(1)$
erfkervals = boolcubeFgrid(dim, depth, erfvws, erfvbs, VErf, VDerErf)
erfkervals['cks'].shape
erfeigs = {}
# `erfeigs['ck']` is an array with shape (`maxdeg`, `depth`+1, len(erfvwrange), len(erfvbrange))
# `erfeigs['ck'][k, L] is the matrix of eigenvalue $\mu_k$ for a depth $L$ erf network,
# as a function of the values of $\sigma_w^2, \sigma_b^2$ in `erfvwrange` and `erfvbrange`
# Note that these eigenvalues are normalized by the trace
# (so that all normalized eigenvalues sum up to 1)
erfeigs['ck'] = relu(boolCubeMuAll(dim, maxdeg, erfkervals['cks']))
# similarly for `erfeigs['ntk']`
erfeigs['ntk'] = relu(boolCubeMuAll(dim, maxdeg, erfkervals['ntks']))
# `erfeigs['ckfracvar']` is an array with shape (`maxdeg`, `depth`+1, len(erfvwrange), len(erfvbrange))
# just like `erfeigs['ck']`
erfeigs['ckfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* erfeigs['ck']
)
# Same thing here
erfeigs['ntkfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* erfeigs['ntk']
)
erfeigs['ckfracvar'].shape
reluvws, reluvbs = np.meshgrid([2], np.linspace(0, 4, num=401), indexing='ij')
dim = 128
depth = 128
maxdeg = 8
relukervals = boolcubeFgrid(dim, depth, reluvws, reluvbs, VReLU, VStep)
relueigs = {}
relueigs['ck'] = relu(boolCubeMuAll(dim, maxdeg, relukervals['cks']))
relueigs['ntk'] = relu(boolCubeMuAll(dim, maxdeg, relukervals['ntks']))
relueigs['ckfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* relueigs['ck']
)
relueigs['ntkfracvar'] = (
sp.special.binom(dim, np.arange(0, maxdeg+1))[:, None, None, None]
* relueigs['ntk']
)
maxdeg = 8
plt.figure(figsize=(12, 4))
relueigs['ntkbestdepth'] = np.argmax(np.max(relueigs['ntk'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
relueigs['ckbestdepth'] = np.argmax(np.max(relueigs['ck'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
fig = plt.subplot(141)
plt.text(-.2, -.15, '(a)', fontsize=24, transform=fig.axes.transAxes)
plt.plot(np.arange(1, maxdeg+1), relueigs['ntkbestdepth'], label='ntk', markersize=4, marker='o')
plt.plot(np.arange(1, maxdeg+1), relueigs['ckbestdepth'], label='ck', markersize=4, marker='o')
plt.legend()
plt.xlabel('degree')
plt.ylabel('optimal depth')
plt.title('relu kernel optimal depths')
erfeigs['ntkbestdepth'] = np.argmax(np.max(erfeigs['ntk'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
erfeigs['ckbestdepth'] = np.argmax(np.max(erfeigs['ck'][1:, :, ...], axis=(2, 3)), axis=1).squeeze()
fig = plt.subplot(142)
plt.text(-.2, -.15, '(b)', fontsize=24, transform=fig.axes.transAxes)
plt.plot(np.arange(1, maxdeg+1), erfeigs['ntkbestdepth'], label='ntk', markersize=4, marker='o')
plt.plot(np.arange(1, maxdeg+1), erfeigs['ckbestdepth'], label='ck', markersize=4, marker='o')
plt.legend()
plt.xlabel('degree')
plt.title('erf kernel optimal depths')
fig = plt.subplot(143)
plt.text(-.5, -.15, '(c)', fontsize=24, transform=fig.axes.transAxes)
plt.imshow(relueigs['ntkfracvar'][3:, :20, 0, 0].T, aspect=12/20, origin='lower', extent=[2.5, 8.5, -.5, 20.5])
cb = plt.colorbar()
plt.xticks(range(3, 9, 2))
plt.xlabel('degree')
plt.ylabel('depth')
plt.grid()
plt.title(u'relu ntk, $\sigma_b^2=0$')
fig = plt.subplot(144)
plt.text(-.5, -.15, '(d)', fontsize=24, transform=fig.axes.transAxes)
plt.imshow(erfeigs['ntkfracvar'][3:, :, 0, 0].T, aspect=12/129, origin='lower', extent=[2.5, 8.5, -.5, 128.5], vmin=0, vmax=0.21)
cb = plt.colorbar()
cb.set_label('fractional variance')
plt.xticks(range(3, 9, 2))
plt.xlabel('degree')
plt.grid()
plt.title(u'erf ntk, $\sigma_w^2=1$, $\sigma_b^2=0$')
tight_layout(plt)
plt.figure(figsize=(14, 4))
def convert2vb(i):
return i/100
fig = plt.subplot(131)
plt.text(-.15, -.15, '(a)', fontsize=24, transform=fig.axes.transAxes)
cpal = sns.color_palette()
for i, (depth, vbid) in enumerate([(1, 10), (128, 300), (3, 0)]):
color = cpal[i]
plt.plot(relueigs['ntkfracvar'][:, depth, 0, vbid], c=color, label='{} | {}'.format(depth, convert2vb(vbid)), marker='o', markersize=4)
plt.plot(relueigs['ckfracvar'][:, depth, 0, vbid], '--', c=color, marker='o', markersize=4)
plt.plot([], c='black', label='ntk')
plt.plot([], '--', c='black', label='ck')
plt.legend(title=u'depth | $\sigma_b^2$')
plt.xlabel('degree')
plt.ylabel('fractional variance')
plt.title('relu examples')
plt.semilogy()
def convert2vb(i):
return i/10
def convert2vw(i):
return i/10 + 1
cpal = sns.color_palette()
fig = plt.subplot(132)
plt.text(-.15, -.15, '(b)', fontsize=24, transform=fig.axes.transAxes)
for i, (depth, vwid, vbid) in enumerate([(1, 10, 1),(24, 0, 1), (1, 40, 40)]):
color = cpal[i]
plt.plot(erfeigs['ntkfracvar'][:, depth, vwid, vbid], c=color,
label='{} | {} | {}'.format(depth, int(convert2vw(vwid)), convert2vb(vbid)), marker='o', markersize=4)
plt.plot(erfeigs['ckfracvar'][:, depth, vwid, vbid], '--', c=color, marker='o', markersize=4)
plt.plot([], c='black', label='ntk')
plt.plot([], '--', c='black', label='ck')
plt.legend(title=u'depth | $\sigma_w^2$ | $\sigma_b^2$')
plt.xlabel('degree')
plt.title('erf examples')
plt.semilogy()
fig = plt.subplot(133)
plt.text(-.15, -.15, '(c)', fontsize=24, transform=fig.axes.transAxes)
# relu
balance = np.mean((relueigs['ntk'] > relueigs['ck']) & (relueigs['ntk'] > 1e-15), axis=(1, 2, 3))
# needed to filter out all situations where eigenval is so small that it's likely just 0
balance /= np.mean((relueigs['ntk'] > 1e-15), axis=(1, 2, 3))
plt.plot(np.arange(0, maxdeg+1), balance, marker='o', label='relu')
# erf
balance = np.mean((erfeigs['ntk'] > erfeigs['ck']) & (erfeigs['ntk'] > 1e-15), axis=(1, 2, 3))
# needed to filter out all situations where eigenval is so small that it's likely just 0
balance /= np.mean((erfeigs['ntk'] > 1e-15), axis=(1, 2, 3))
plt.plot(np.arange(0, maxdeg+1), balance, marker='o', label='erf')
plt.xlabel('degree')
plt.ylabel('fraction')
plt.legend()
plt.title('fraction of hyperparams where ntk > ck')
plt.suptitle('ntk favors higher degrees compared to ck')
tight_layout(plt)
| 0.662251 | 0.99445 |
### Import the relevant libraries
```
# We must always import the relevant libraries for our problem at hand. NumPy and TensorFlow are required for this example.
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
```
### Data generation
We generate data using the exact same logic and code as the example from the previous notebook. The only difference now is that we save it to an npz file. Npz is numpy's file type which allows you to save numpy arrays into a single .npz file. We introduce this change because in machine learning most often:
* you are given some data (csv, database, etc.)
* you preprocess it into a desired format (later on we will see methods for preprocesing)
* you save it into npz files (if you're working in Python) to access later
Nothing to worry about - this is literally saving your NumPy arrays into a file that you can later access, nothing more.
```
# First, we should declare a variable containing the size of the training set we want to generate.
observations = 1000
# We will work with two variables as inputs. You can think about them as x1 and x2 in our previous examples.
# We have picked x and z, since it is easier to differentiate them.
# We generate them randomly, drawing from an uniform distribution. There are 3 arguments of this method (low, high, size).
# The size of xs and zs is observations x 1. In this case: 1000 x 1.
xs = np.random.uniform(low=-10, high=10, size=(observations,1))
zs = np.random.uniform(-10, 10, (observations,1))
# Combine the two dimensions of the input into one input matrix.
# This is the X matrix from the linear model y = x*w + b.
# column_stack is a Numpy method, which combines two matrices (vectors) into one.
generated_inputs = np.column_stack((xs,zs))
# We add a random small noise to the function i.e. f(x,z) = 2x - 3z + 5 + <small noise>
noise = np.random.uniform(-1, 1, (observations,1))
# Produce the targets according to our f(x,z) = 2x - 3z + 5 + noise definition.
# In this way, we are basically saying: the weights should be 2 and -3, while the bias is 5.
generated_targets = 2*xs - 3*zs + 5 + noise
# save into an npz file called "TF_intro"
np.savez('TF_intro', inputs=generated_inputs, targets=generated_targets)
```
## Solving with TensorFlow
<i/>Note: This intro is just the basics of TensorFlow which has way more capabilities and depth than that.<i>
```
# The shape of the data we've prepared above. Think about it as: number of inputs, number of outputs.
input_size = 2
output_size = 1
```
### Outlining the model
```
# Here we define a basic TensorFlow object - the placeholder.
# As before, we will feed the inputs and targets to the model.
# In the TensorFlow context, we feed the data to the model THROUGH the placeholders.
# The particular inputs and targets are contained in our .npz file.
# The first None parameter of the placeholders' shape means that
# this dimension could be of any length. That's since we are mainly interested in
# the input size, i.e. how many input variables we have and not the number of samples (observations)
# The number of input variables changes the MODEL itself, while the number of observations doesn't.
# Remember that the weights and biases were independent of the number of samples, so the MODEL is independent.
# Important: NO calculation happens at this point.
inputs = tf.placeholder(tf.float32, [None, input_size])
targets = tf.placeholder(tf.float32, [None, output_size])
# As before, we define our weights and biases.
# They are the other basic TensorFlow object - a variable.
# We feed data into placeholders and they have a different value for each iteration
# Variables, however, preserve their values across iterations.
# To sum up, data goes into placeholders; parameters go into variables.
# We use the same random uniform initialization in [-0.1,0.1] as in the minimal example but using the TF syntax
# Important: NO calculation happens at this point.
weights = tf.Variable(tf.random_uniform([input_size, output_size], minval=-0.1, maxval=0.1))
biases = tf.Variable(tf.random_uniform([output_size], minval=-0.1, maxval=0.1))
# We get the outputs following our linear combination: y = xw + b
# Important: NO calculation happens at this point.
# This line simply tells TensorFlow what rule to apply when we feed in the training data (below).
outputs = tf.matmul(inputs, weights) + biases
```
|
github_jupyter
|
# We must always import the relevant libraries for our problem at hand. NumPy and TensorFlow are required for this example.
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# First, we should declare a variable containing the size of the training set we want to generate.
observations = 1000
# We will work with two variables as inputs. You can think about them as x1 and x2 in our previous examples.
# We have picked x and z, since it is easier to differentiate them.
# We generate them randomly, drawing from an uniform distribution. There are 3 arguments of this method (low, high, size).
# The size of xs and zs is observations x 1. In this case: 1000 x 1.
xs = np.random.uniform(low=-10, high=10, size=(observations,1))
zs = np.random.uniform(-10, 10, (observations,1))
# Combine the two dimensions of the input into one input matrix.
# This is the X matrix from the linear model y = x*w + b.
# column_stack is a Numpy method, which combines two matrices (vectors) into one.
generated_inputs = np.column_stack((xs,zs))
# We add a random small noise to the function i.e. f(x,z) = 2x - 3z + 5 + <small noise>
noise = np.random.uniform(-1, 1, (observations,1))
# Produce the targets according to our f(x,z) = 2x - 3z + 5 + noise definition.
# In this way, we are basically saying: the weights should be 2 and -3, while the bias is 5.
generated_targets = 2*xs - 3*zs + 5 + noise
# save into an npz file called "TF_intro"
np.savez('TF_intro', inputs=generated_inputs, targets=generated_targets)
# The shape of the data we've prepared above. Think about it as: number of inputs, number of outputs.
input_size = 2
output_size = 1
# Here we define a basic TensorFlow object - the placeholder.
# As before, we will feed the inputs and targets to the model.
# In the TensorFlow context, we feed the data to the model THROUGH the placeholders.
# The particular inputs and targets are contained in our .npz file.
# The first None parameter of the placeholders' shape means that
# this dimension could be of any length. That's since we are mainly interested in
# the input size, i.e. how many input variables we have and not the number of samples (observations)
# The number of input variables changes the MODEL itself, while the number of observations doesn't.
# Remember that the weights and biases were independent of the number of samples, so the MODEL is independent.
# Important: NO calculation happens at this point.
inputs = tf.placeholder(tf.float32, [None, input_size])
targets = tf.placeholder(tf.float32, [None, output_size])
# As before, we define our weights and biases.
# They are the other basic TensorFlow object - a variable.
# We feed data into placeholders and they have a different value for each iteration
# Variables, however, preserve their values across iterations.
# To sum up, data goes into placeholders; parameters go into variables.
# We use the same random uniform initialization in [-0.1,0.1] as in the minimal example but using the TF syntax
# Important: NO calculation happens at this point.
weights = tf.Variable(tf.random_uniform([input_size, output_size], minval=-0.1, maxval=0.1))
biases = tf.Variable(tf.random_uniform([output_size], minval=-0.1, maxval=0.1))
# We get the outputs following our linear combination: y = xw + b
# Important: NO calculation happens at this point.
# This line simply tells TensorFlow what rule to apply when we feed in the training data (below).
outputs = tf.matmul(inputs, weights) + biases
| 0.881181 | 0.988602 |
## Some Common Hazards
In this final notebook we will look briefly at some of the pitfalls that new programmers in Python may encounter. We cover these now in order to try and avoid falling victim to these problems in the future.
The two hazards that we will discuss here both come about because Python variables are actually pointers that point to Python objects, as we saw in notebook 01 - Variables and Types.
<br>
### Variables can change type in Python
You may wonder why we refer to this as a hazard, as some programmers will see this as a handy feature of Python. It can however cause trouble if we do any type checks in our code, or if we accidentally program two different variables the same name one will replace the other without Python telling us!
In many programming languages, if I try to create two variables with the same name they will give an error and tell us that a variable with that name already exists. Python however will allow you to have what looks like two variables with the same name, as what is does is just replace the original pointer with a new pointer!
```
x = 1
#Some other code in here, and I forgot that I previously had a variable called x
x = "Hello "
#Some more code here, and I now try to use x, which I think should have the value 1 in it.
x = x * 2
print(x)
print(type(x) is int)
```
Hello Hello?! That's not what I expected! In other languages this error would be picked up, often at compile time, that this shouldn't be allowed. We've changed the type of the variable from an integer to a string, but in Python this is perfectly allowed, because the variable is just a pointer to an object. All thats happened is it now points to an object that contains a string, rather than the original object that contained an integer. This feature of Python is known as "dynamic typing", but if you've programmed in other languages before you may miss the safety net of variables being statically typed.
<br>
### Mutable and Immutable objects
This is an easy pitfall to fall victim to early on in your programming career in Python, and is usually encountered the first time you try to make a copy of some data. Say you have a variable `x` that contains a value, and you would like to make a copy of that data in another variable `y`. The mistake most of you would make (and most experienced programmers have made in the past) is to try to do `y = x`. Lets see the issue with that below:
```
x = 5
y = x
print("x = ", x)
print("y = ", y)
```
Now lets try to change the value of `x`, but leave the value of `y` as it is.
```
x = 1
print("x = ", x)
print("y = ", y)
```
Perfect thats worked exactly as we'd hoped. In fact, this will always work for integers, floats, booleans and strings, and these object types are called immutable, which means they cannot be changed. If I want to change the value pointed to by a variable, as I cannot change the value of an immutable object, I must instead create an entirely new object with the new value, and point my variable at that new object. However, you should be careful, not all objects are immutable....
```
# x is a list that contains 5 values, 1 through 5.
x = [1,2,3,4,5]
y = x
print("x = ", x)
print("y = ", y)
```
All good you say! They both have the data as we wanted, so everything is fine! Not so fast...
```
#Now lets add another element to the list x, but leave y unchanged
x.append(6)
print("x = ", x)
print("y = ", y)
```
Noooooo! We changed the data in `x`, but it also changed the data in `y`. That didn't happen last time! The reason is that lists are mutable objects, that means that we can update them without needing to create a new object. Now things start to make sense when you consider x and y as pointers, and they point to the same data because of the `y = x` assignment. So if we change the data in the object, then the two variables that point at that object will both see the change.
Thankfully all of the objects that we've looked at in this topic are immutable: Booleans, integers, floats, and strings can happily be copied using assignment such as `y = x` to copy the value of the variable `x` into `y`. If we try to change either variable, an entirely new object is created, and the pointer is updated to point at that new object. However, when we start to work with lists in the next topic, this is something we need to be aware of and remember!
If you would like more detail on how mutable and immutable objects work in Python, I suggest you head to this webpage:
<br><a href="https://realpython.com/pointers-in-python/">https://realpython.com/pointers-in-python/</a>.
|
github_jupyter
|
x = 1
#Some other code in here, and I forgot that I previously had a variable called x
x = "Hello "
#Some more code here, and I now try to use x, which I think should have the value 1 in it.
x = x * 2
print(x)
print(type(x) is int)
x = 5
y = x
print("x = ", x)
print("y = ", y)
x = 1
print("x = ", x)
print("y = ", y)
# x is a list that contains 5 values, 1 through 5.
x = [1,2,3,4,5]
y = x
print("x = ", x)
print("y = ", y)
#Now lets add another element to the list x, but leave y unchanged
x.append(6)
print("x = ", x)
print("y = ", y)
| 0.049142 | 0.987436 |
```
import psycopg2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import matplotlib.pyplot as plt
warnings.simplefilter('ignore')
pd.options.display.max_columns = 300
sns.set_style('darkgrid')
#connect SQL
conn = psycopg2.connect(database='usaspending', user='team', password='ZAQ!@#zaq123', host='dopelytics.site', port='5432')
sql_cols = ('federal_action_obligation, '
#'total_dollars_obligated, '
'base_and_exercised_options_value, '
'base_and_all_options_value, '
'awarding_sub_agency_name, '
'awarding_office_name, '
#'funding_sub_agency_name, '
#'funding_office_name, '
#'primary_place_of_performance_state_code, '
#'award_or_idv_flag, '
#'award_type, '
#'type_of_contract_pricing, '
#'dod_claimant_program_description, '
'type_of_set_aside_code, '
#'multi_year_contract, '
#'dod_acquisition_program_description, '
#'subcontracting_plan, '
#'contract_bundling, '
#'evaluated_preference, '
#'national_interest_action, '
#'cost_or_pricing_data, '
#'gfe_gfp, '
#'contract_financing, '
'portfolio_group, '
#'product_or_service_code_description, '
'naics_bucket_title'
#'naics_description'
)
#Create DF
sql_tbl_name = 'consolidated_data2'
df = pd.read_sql_query('SELECT ' + sql_cols + ' FROM ' + sql_tbl_name, con=conn)
df.head()
df.info()
df.isnull().sum()
df = df.dropna()
df.shape
def set_aside(c):
if c['type_of_set_aside_code'] == 'NONE':
return 0
else:
return 1
#Create column name 'set_aside' and apply function to populate rows with 0 or 1.
df['set_aside'] = df.apply(set_aside, axis=1)
def contract_value(c):
if c['base_and_exercised_options_value'] > 0:
return c['base_and_exercised_options_value']
elif c['base_and_all_options_value'] > 0:
return c['base_and_all_options_value']
elif c['federal_action_obligation'] > 0:
return c['federal_action_obligation']
else:
return 0
df['contract_value'] = df.apply(contract_value, axis=1)
#Drop columns that we dont need anymore.
df = df.drop(['type_of_set_aside_code','base_and_exercised_options_value','base_and_all_options_value',
'federal_action_obligation'], axis=1)
df.isnull().sum()
#Create features DF
X = df.drop(['set_aside'], axis=1).copy()
X.head()
#Create Target
y = df['set_aside']
y.value_counts()
#Convert all the Features Char to Binary
X = pd.get_dummies(X)
X.head()
from sklearn.model_selection import train_test_split, cross_val_score
from yellowbrick.classifier import ClassificationReport
from sklearn.ensemble import RandomForestClassifier
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
model = RandomForestClassifier(n_estimators=17, n_jobs=-1, random_state=0)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
classes = ['None', 'Set Aside']
visualizer = ClassificationReport(model, classes=classes, support=True)
visualizer.score(X_test, y_test)
visualizer.show()
model_score = cross_val_score(estimator=model, X=X, y=y, scoring='f1', cv=12)
print("Accuracy : ", round(model_score.mean(),2))
print('Standard Deviation : ',round(model_score.std(),2))
import pickle
#Save Trained Model
filename = 'RandomForest_SetAside_None_Model.save'
pickle.dump(model, open(filename, 'wb'))
```
## Second Model for Set Aside
```
df1 = pd.read_sql_query('SELECT ' + sql_cols + ' FROM ' + sql_tbl_name, con=conn)
none_set_asides = df1[df1['type_of_set_aside_code']== 'NONE'].index
df1.shape
df1.head()
#drop all the set_aside = NONE
df1 = df1.drop(none_set_asides, axis=0)
def contract_value(c):
if c['base_and_exercised_options_value'] > 0:
return c['base_and_exercised_options_value']
elif c['base_and_all_options_value'] > 0:
return c['base_and_all_options_value']
elif c['federal_action_obligation'] > 0:
return c['federal_action_obligation']
else:
return 0
df1['contract_value'] = df1.apply(contract_value, axis=1)
df1['type_of_set_aside_code'].value_counts()
df1['set_aside_number'] = df1['type_of_set_aside_code'].map({'SBA':1, '8AN':2, '8A':3, 'SDVOSBC':4,'HZC':5, 'WOSB':6, 'SBP':7, 'EDWOSB':7, 'SDVOSBS':7,
'HZS':7, 'WOSBSS':7, 'EDWOSBSS':7, 'ISBEE':7, 'HS3':7, 'IEE':7})
#Drop columns that we dont need anymore.
df1 = df1.drop(['type_of_set_aside_code','base_and_exercised_options_value','base_and_all_options_value',
'federal_action_obligation'], axis=1)
df1 = df1.dropna()
df1.shape
X1 = df1.drop(['set_aside_number'], axis=1).copy()
X1 = pd.get_dummies(X1)
y1 = df1['set_aside_number'].copy()
y1.value_counts()
X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.20, random_state=42)
model1 = RandomForestClassifier(n_estimators=17)
model1.fit(X1_train, y1_train)
classes1 = ['SBA', '8AN', '8A', 'SDVOSBC','HZC', 'WOSB', 'OTHER SET ASIDE']
visualizer = ClassificationReport(model1, classes=classes1, support=True)
visualizer.score(X1_test, y1_test)
visualizer.show()
model_score_all_set_aside = cross_val_score(estimator=model, X=X1, y=y1, scoring='f1_weighted', cv=12)
print("Accuracy : ", round(model_score_all_set_aside.mean(),2))
print('Standard Deviation : ',round(model_score_all_set_aside.std(),2))
#Save Trained Model
filename = 'RandomForest_All_Set_Aside_Model.save'
pickle.dump(model, open(filename, 'wb'))
```
|
github_jupyter
|
import psycopg2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import matplotlib.pyplot as plt
warnings.simplefilter('ignore')
pd.options.display.max_columns = 300
sns.set_style('darkgrid')
#connect SQL
conn = psycopg2.connect(database='usaspending', user='team', password='ZAQ!@#zaq123', host='dopelytics.site', port='5432')
sql_cols = ('federal_action_obligation, '
#'total_dollars_obligated, '
'base_and_exercised_options_value, '
'base_and_all_options_value, '
'awarding_sub_agency_name, '
'awarding_office_name, '
#'funding_sub_agency_name, '
#'funding_office_name, '
#'primary_place_of_performance_state_code, '
#'award_or_idv_flag, '
#'award_type, '
#'type_of_contract_pricing, '
#'dod_claimant_program_description, '
'type_of_set_aside_code, '
#'multi_year_contract, '
#'dod_acquisition_program_description, '
#'subcontracting_plan, '
#'contract_bundling, '
#'evaluated_preference, '
#'national_interest_action, '
#'cost_or_pricing_data, '
#'gfe_gfp, '
#'contract_financing, '
'portfolio_group, '
#'product_or_service_code_description, '
'naics_bucket_title'
#'naics_description'
)
#Create DF
sql_tbl_name = 'consolidated_data2'
df = pd.read_sql_query('SELECT ' + sql_cols + ' FROM ' + sql_tbl_name, con=conn)
df.head()
df.info()
df.isnull().sum()
df = df.dropna()
df.shape
def set_aside(c):
if c['type_of_set_aside_code'] == 'NONE':
return 0
else:
return 1
#Create column name 'set_aside' and apply function to populate rows with 0 or 1.
df['set_aside'] = df.apply(set_aside, axis=1)
def contract_value(c):
if c['base_and_exercised_options_value'] > 0:
return c['base_and_exercised_options_value']
elif c['base_and_all_options_value'] > 0:
return c['base_and_all_options_value']
elif c['federal_action_obligation'] > 0:
return c['federal_action_obligation']
else:
return 0
df['contract_value'] = df.apply(contract_value, axis=1)
#Drop columns that we dont need anymore.
df = df.drop(['type_of_set_aside_code','base_and_exercised_options_value','base_and_all_options_value',
'federal_action_obligation'], axis=1)
df.isnull().sum()
#Create features DF
X = df.drop(['set_aside'], axis=1).copy()
X.head()
#Create Target
y = df['set_aside']
y.value_counts()
#Convert all the Features Char to Binary
X = pd.get_dummies(X)
X.head()
from sklearn.model_selection import train_test_split, cross_val_score
from yellowbrick.classifier import ClassificationReport
from sklearn.ensemble import RandomForestClassifier
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
model = RandomForestClassifier(n_estimators=17, n_jobs=-1, random_state=0)
model.fit(X_train, y_train)
predictions = model.predict(X_test)
classes = ['None', 'Set Aside']
visualizer = ClassificationReport(model, classes=classes, support=True)
visualizer.score(X_test, y_test)
visualizer.show()
model_score = cross_val_score(estimator=model, X=X, y=y, scoring='f1', cv=12)
print("Accuracy : ", round(model_score.mean(),2))
print('Standard Deviation : ',round(model_score.std(),2))
import pickle
#Save Trained Model
filename = 'RandomForest_SetAside_None_Model.save'
pickle.dump(model, open(filename, 'wb'))
df1 = pd.read_sql_query('SELECT ' + sql_cols + ' FROM ' + sql_tbl_name, con=conn)
none_set_asides = df1[df1['type_of_set_aside_code']== 'NONE'].index
df1.shape
df1.head()
#drop all the set_aside = NONE
df1 = df1.drop(none_set_asides, axis=0)
def contract_value(c):
if c['base_and_exercised_options_value'] > 0:
return c['base_and_exercised_options_value']
elif c['base_and_all_options_value'] > 0:
return c['base_and_all_options_value']
elif c['federal_action_obligation'] > 0:
return c['federal_action_obligation']
else:
return 0
df1['contract_value'] = df1.apply(contract_value, axis=1)
df1['type_of_set_aside_code'].value_counts()
df1['set_aside_number'] = df1['type_of_set_aside_code'].map({'SBA':1, '8AN':2, '8A':3, 'SDVOSBC':4,'HZC':5, 'WOSB':6, 'SBP':7, 'EDWOSB':7, 'SDVOSBS':7,
'HZS':7, 'WOSBSS':7, 'EDWOSBSS':7, 'ISBEE':7, 'HS3':7, 'IEE':7})
#Drop columns that we dont need anymore.
df1 = df1.drop(['type_of_set_aside_code','base_and_exercised_options_value','base_and_all_options_value',
'federal_action_obligation'], axis=1)
df1 = df1.dropna()
df1.shape
X1 = df1.drop(['set_aside_number'], axis=1).copy()
X1 = pd.get_dummies(X1)
y1 = df1['set_aside_number'].copy()
y1.value_counts()
X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, test_size=0.20, random_state=42)
model1 = RandomForestClassifier(n_estimators=17)
model1.fit(X1_train, y1_train)
classes1 = ['SBA', '8AN', '8A', 'SDVOSBC','HZC', 'WOSB', 'OTHER SET ASIDE']
visualizer = ClassificationReport(model1, classes=classes1, support=True)
visualizer.score(X1_test, y1_test)
visualizer.show()
model_score_all_set_aside = cross_val_score(estimator=model, X=X1, y=y1, scoring='f1_weighted', cv=12)
print("Accuracy : ", round(model_score_all_set_aside.mean(),2))
print('Standard Deviation : ',round(model_score_all_set_aside.std(),2))
#Save Trained Model
filename = 'RandomForest_All_Set_Aside_Model.save'
pickle.dump(model, open(filename, 'wb'))
| 0.323594 | 0.384912 |
<a href="https://colab.research.google.com/github/Tranminhtuan48/pythonbasics/blob/main/bai12_10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Bài 1: Viết chương trình tìm tất cả các số chia hết cho 7 nhưng không phải bội số của 5, nằm trong
đoạn 10 và 200 (tính cả 10 và 200). Các số thu được sẽ được in ra màn hình.
```
j = []
for i in range (10,200):
if (i % 7 == 0) and (i % 5 != 0):
j.append(str(i))
print(",".join(j))
```
Bài 2: Viết một chương trình tính giai thừa của một số nguyên dương n. Với n được nhập từ bàn
phím. Ví dụ, n = 8 thì kết quả đầu ra phải là 1*2*3*4*5*6*7*8 = 40320.
```
print("Nhập giá trị n: ",end='')
n=int(input())
k=[]
if n > 0:
giaithua=1
for i in range(1,n+1):
giaithua=giaithua*i
for f in range (1,n+1):
k.append(str(f))
print(n,"giai thừa bằng:","*".join(k),"=",giaithua)
else:
print("Vui lòng nhập n > 0")
```
Bài 3: Dãy số Fibonacci được định nghĩa như sau: F0 = 0, F1 = 1, F2 = 1, Fn = F(n-1) + F(n-2)
với n >= 2. Ví dụ: 0, 1, 1, 2, 3, 5, 8, ... Hãy viết chương trình tìm n số Fibonacci đầu tiên với n
nhập vào từ bàn phím.
```
k=int(input("dãy Số Fibonaci: "))
q=[0,1]
i=2
for i in range(i,k):
x=q[i-1]+q[i-2]
q.append(x)
print(q)
```
Bài 4: Viết chương trình liệt kê tất cả các số nguyên tố nhỏ hơn n. Số nguyên dương n được nhập
từ bàn phím.
```
import math
n = int(input())
print (2)
for nt in range(3,n,2):
if all(nt%i!=0 for i in range(3,int(math.sqrt(nt))+1, 2)):
print (nt)
```
Bài 5: Viết chương trình liệt kê n số nguyên tố đầu tiên. Số nguyên dương n được nhập từ bàn
phím.
```
so = int(input("nhap so can kiem tra: "));
if so >1 :
for i in range (2,so):
t=0
for j in range (2,i):
if(i%j==0):
t=1
break
if(t==0 and i!=0):
print(i)
else:
print("nhap lai di")
```
Bài 6: Viết chương trình liệt kê tất cả số nguyên tố có 5 chữ số.
```
for i in range(10000,99999):
for h in range(2,i):
if i%h==0:
break
else:
print(i,end=",")
```
Bài 7: Viết chương trình tính tổng của các chữ số của môt số nguyên n. Số nguyên dương n được
nhập từ bàn phím. Ví dụ: n = 1234, tổng các chữ số: 1 + 2 + 3 + 4 = 10
```
print("nhap n: ")
n=int(input())
a=str(n)
m=0
for i in range(len(a)):
r= int(a[i])
m=r+m
print (m)
```
Bài 8: Viết chương trình liệt kê các số Fibonacci nhỏ hơn n là số nguyên tố. N là số nguyên dương
được nhập từ bàn phím.
```
k=int(input("Số Fibonaci: "))
q=[0,1]
i=2
for i in range(i,k):
x=q[i-1]+q[i-2]
q.append(x)
print(q)
print("day so nguyen to fibonacci: ")
for y in q:
if y<k and y>1:
for z in range(2,y):
if y%z==0 :
break
else:
print(y, end=",")
```
|
github_jupyter
|
j = []
for i in range (10,200):
if (i % 7 == 0) and (i % 5 != 0):
j.append(str(i))
print(",".join(j))
print("Nhập giá trị n: ",end='')
n=int(input())
k=[]
if n > 0:
giaithua=1
for i in range(1,n+1):
giaithua=giaithua*i
for f in range (1,n+1):
k.append(str(f))
print(n,"giai thừa bằng:","*".join(k),"=",giaithua)
else:
print("Vui lòng nhập n > 0")
k=int(input("dãy Số Fibonaci: "))
q=[0,1]
i=2
for i in range(i,k):
x=q[i-1]+q[i-2]
q.append(x)
print(q)
import math
n = int(input())
print (2)
for nt in range(3,n,2):
if all(nt%i!=0 for i in range(3,int(math.sqrt(nt))+1, 2)):
print (nt)
so = int(input("nhap so can kiem tra: "));
if so >1 :
for i in range (2,so):
t=0
for j in range (2,i):
if(i%j==0):
t=1
break
if(t==0 and i!=0):
print(i)
else:
print("nhap lai di")
for i in range(10000,99999):
for h in range(2,i):
if i%h==0:
break
else:
print(i,end=",")
print("nhap n: ")
n=int(input())
a=str(n)
m=0
for i in range(len(a)):
r= int(a[i])
m=r+m
print (m)
k=int(input("Số Fibonaci: "))
q=[0,1]
i=2
for i in range(i,k):
x=q[i-1]+q[i-2]
q.append(x)
print(q)
print("day so nguyen to fibonacci: ")
for y in q:
if y<k and y>1:
for z in range(2,y):
if y%z==0 :
break
else:
print(y, end=",")
| 0.017943 | 0.909546 |
# Solving Vehicle Routing Problem with Amazon SageMaker RL
In this notebook, you will see how reinforcement learning can be used to solve a Vehicle Routing Probelm (VRP). Given one or more vehicles and a set of locations, VRP tries to find the route that reaches all locations with minimal operational cost. This problem has been of great interest for decades, as it has a wide application in logistics, parcel delivery, and more. It has many variants that characterize different constraints or features, among which online and stochastic version is considered in this example.
## Problem Statement
Consider a delivery driver using a phone app, orders arrive on the app in a dynamic manner. Each order has a delivery charge known to the driver at the time of order creation, and it is assigned to a location in the city. The city is a grid map and consists of mutually exclusive zones that generate orders at different rates and rewards. The orders have a delivery time limit, the timer starts with the order creation and is same for all orders. The driver has to accept an order and pick up the package from a given location prior to delivery. The vehicle has a capacity limit, but the driver can accept unlimited orders and plan their route accordingly. The driver’s goal is to maximize the total net reward.
This formulation is known as stochastic and dynamic capacitated vehicle routing problem with pickup and delivery, time windows and service guarantee.
<img src="images/rl_vehicle_routing.png" width="500" align="center"/>
At each time step, the RL agent is aware of the following information:
- Pickup location
- Driver info: driver's position, capacity left
- Order info: order's location, order's status (open, accepted, picked up or delivered), time elapsed since each order’s generation, order dollar value
At each time step, the RL agent can take one of the following actions:
- Accept an order
- Pick up an accepted order
- Go to a customer's node for delivery
- Head to a specific pickup location
- Wait and stay unmoved
During training, the agent cannot perform the following invalid actions: (i) pick up an order when its remaining capacity is 0; (ii) pick up an order that is not yet accepted; (iii) deliver an order that is not yet picked up.
At each time step, the reward is defined as the difference between total value of all delivered orders and cost:
- Total value of all delivered orders is divided into 3 equal parts -- when the order gets accepted, picked up, and delivered respectively
- Cost includes time cost and moving cost. Both are per time step
- A large penalty is imposed if the agent accepts an order but fails to deliver within the delivery limit
## Using Amazon SageMaker for RL
Amazon SageMaker allows you to train your RL agents in cloud machines using docker containers. You do not have to worry about setting up your machines with the RL toolkits and deep learning frameworks. You can easily switch between many different machines setup for you, including powerful GPU machines that give a big speedup. You can also choose to use multiple machines in a cluster to further speedup training, often necessary for production level loads.
## Pre-requisites
### Roles and permissions
To get started, we'll import the Python libraries we need, set up the environment with a few prerequisites for permissions and configurations.
```
import sagemaker
import boto3
import sys
import os
import glob
import re
import subprocess
from IPython.display import HTML
import time
from time import gmtime, strftime
sys.path.append("common")
from misc import get_execution_role, wait_for_s3_object, wait_for_training_job_to_complete
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
```
### Setup S3 bucket
Set up the linkage and authentication to the S3 bucket that you want to use for checkpoint and the metadata.
```
# S3 bucket
sage_session = sagemaker.session.Session()
s3_bucket = sage_session.default_bucket()
s3_output_path = "s3://{}/".format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
```
### Define Variables
We define variables such as the job prefix for the training jobs *and the image path for the container (only when this is BYOC).*
```
# create unique job name
job_name_prefix = "rl-vehicle-routing"
```
### Configure settings
You can run your RL training jobs on a SageMaker notebook instance or on your own machine. In both of these scenarios, you can run the following in either `local` or `SageMaker` modes. The `local` mode uses the SageMaker Python SDK to run your code in a local container before deploying to SageMaker. This can speed up iterative testing and debugging while using the same familiar Python SDK interface. You just need to set `local_mode = True`.
```
local_mode = False
if local_mode:
instance_type = "local"
else:
# If on SageMaker, pick the instance type
instance_type = "ml.m5.xlarge"
```
### Create an IAM role
Either get the execution role when running from a SageMaker notebook instance `role = sagemaker.get_execution_role()` or, when running from local notebook instance, use utils method `role = get_execution_role()` to create an execution role.
```
try:
role = sagemaker.get_execution_role()
except:
role = get_execution_role()
print("Using IAM role arn: {}".format(role))
```
### Install docker for `local` mode
In order to work in `local` mode, you need to have docker installed. When running from you local machine, please make sure that you have docker or docker-compose (for local CPU machines) and nvidia-docker (for local GPU machines) installed. Alternatively, when running from a SageMaker notebook instance, you can simply run the following script to install dependenceis.
Note, you can only run a single local notebook at one time.
```
# only run from SageMaker notebook instance
if local_mode:
!/bin/bash ./common/setup.sh
```
## Set up the environment
The environment is defined in a Python file called `autoscalesim.py` and the file is uploaded on `/src` directory.
The environment also implements the `init()`, `step()` and `reset()` functions that describe how the environment behaves. This is consistent with Open AI Gym interfaces for defining an environment.
1. init() - initialize the environment in a pre-defined state
2. step() - take an action on the environment
3. reset()- restart the environment on a new episode
4. [if applicable] render() - get a rendered image of the environment in its current state
```
# uncomment the following line to see the environment
# !pygmentize src/vrp_env.py
```
## Write the training code
The training code is written in the file `train_bin_packing.py` which is also uploaded in the `/src` directory.
First import the environment files and the preset files, and then define the main() function.
```
!pygmentize src/train_vehicle_routing_problem.py
```
## Train the RL model using the Python SDK Script mode
If you are using local mode, the training will run on the notebook instance. When using SageMaker for training, you can select a GPU or CPU instance. The [RLEstimator](https://sagemaker.readthedocs.io/en/stable/sagemaker.rl.html) is used for training RL jobs.
1. Specify the source directory where the gym environment and training code is uploaded.
2. Specify the entry point as the training code
3. Specify the choice of RL toolkit and framework. This automatically resolves to the ECR path for the RL Container.
4. Define the training parameters such as the instance count, job name, S3 path for output and job name.
5. Specify the hyperparameters for the RL agent algorithm. The RLCOACH_PRESET or the RLRAY_PRESET can be used to specify the RL agent algorithm you want to use.
6. Define the metrics definitions that you are interested in capturing in your logs. These can also be visualized in CloudWatch and SageMaker Notebooks.
### Define Metric
A list of dictionaries that defines the metric(s) used to evaluate the training jobs. Each dictionary contains two keys: ‘Name’ for the name of the metric, and ‘Regex’ for the regular expression used to extract the metric from the logs.
```
metric_definitions = [
{
"Name": "episode_reward_mean",
"Regex": "episode_reward_mean: ([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)",
},
{
"Name": "episode_reward_max",
"Regex": "episode_reward_max: ([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)",
},
{
"Name": "episode_reward_min",
"Regex": "episode_reward_min: ([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)",
},
]
```
### Define Estimator
This Estimator executes an RLEstimator script in a managed Reinforcement Learning (RL) execution environment within a SageMaker Training Job. The managed RL environment is an Amazon-built Docker container that executes functions defined in the supplied entry_point Python script.
```
train_entry_point = "train_vehicle_routing_problem.py"
train_job_max_duration_in_seconds = 60 * 15
estimator = RLEstimator(
entry_point=train_entry_point,
source_dir="src",
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.RAY,
toolkit_version="0.6.5",
framework=RLFramework.TENSORFLOW,
role=role,
instance_type=instance_type,
instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
metric_definitions=metric_definitions,
max_run=train_job_max_duration_in_seconds,
hyperparameters={},
)
estimator.fit(wait=local_mode)
job_name = estimator.latest_training_job.job_name
print("Training job: %s" % job_name)
```
## Visualization
RL training can take a long time. So while it's running there are a variety of ways we can track progress of the running training job. Some intermediate output gets saved to S3 during training, so we'll set up to capture that.
```
s3_url = "s3://{}/{}".format(s3_bucket, job_name)
intermediate_folder_key = "{}/output/intermediate/".format(job_name)
intermediate_url = "s3://{}/{}training/".format(s3_bucket, intermediate_folder_key)
print("S3 job path: {}".format(s3_url))
print("Intermediate folder path: {}".format(intermediate_url))
```
### Plot metrics for training job
We can see the reward metric of the training as it's running, using algorithm metrics that are recorded in CloudWatch metrics. We can plot this to see the performance of the model over time.
```
%matplotlib inline
from sagemaker.analytics import TrainingJobAnalytics
if not local_mode:
wait_for_training_job_to_complete(job_name) # Wait for the job to finish
df = TrainingJobAnalytics(job_name, ["episode_reward_mean"]).dataframe()
df_min = TrainingJobAnalytics(job_name, ["episode_reward_min"]).dataframe()
df_max = TrainingJobAnalytics(job_name, ["episode_reward_max"]).dataframe()
df["rl_reward_mean"] = df["value"]
df["rl_reward_min"] = df_min["value"]
df["rl_reward_max"] = df_max["value"]
num_metrics = len(df)
if num_metrics == 0:
print("No algorithm metrics found in CloudWatch")
else:
plt = df.plot(
x="timestamp",
y=["rl_reward_mean"],
figsize=(18, 6),
fontsize=18,
legend=True,
style="-",
color=["b", "r", "g"],
)
plt.fill_between(df.timestamp, df.rl_reward_min, df.rl_reward_max, color="b", alpha=0.2)
plt.set_ylabel("Mean reward per episode", fontsize=20)
plt.set_xlabel("Training time (s)", fontsize=20)
plt.legend(loc=4, prop={"size": 20})
else:
print("Can't plot metrics in local mode.")
```
#### Monitor training progress
You can repeatedly run the visualization cells to get the latest metrics as the training job proceeds.
## Training Results
You can let the training job run longer by specifying `train_max_run` in `RLEstimator`. The figure below illustrates the reward function of the RL policy vs. that of a MIP baseline. The experiments are conducted on a p3.2x instance. For more details on the environment setup and how different parameters are set, please refer to [ORL: Reinforcement Learning Benchmarks for Online Stochastic Optimization
Problems](https://arxiv.org/pdf/1911.10641.pdf).
<img src="images/rl_vehicle_routing_result.png" width="800" align="center"/>
|
github_jupyter
|
import sagemaker
import boto3
import sys
import os
import glob
import re
import subprocess
from IPython.display import HTML
import time
from time import gmtime, strftime
sys.path.append("common")
from misc import get_execution_role, wait_for_s3_object, wait_for_training_job_to_complete
from sagemaker.rl import RLEstimator, RLToolkit, RLFramework
# S3 bucket
sage_session = sagemaker.session.Session()
s3_bucket = sage_session.default_bucket()
s3_output_path = "s3://{}/".format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
# create unique job name
job_name_prefix = "rl-vehicle-routing"
local_mode = False
if local_mode:
instance_type = "local"
else:
# If on SageMaker, pick the instance type
instance_type = "ml.m5.xlarge"
try:
role = sagemaker.get_execution_role()
except:
role = get_execution_role()
print("Using IAM role arn: {}".format(role))
# only run from SageMaker notebook instance
if local_mode:
!/bin/bash ./common/setup.sh
# uncomment the following line to see the environment
# !pygmentize src/vrp_env.py
!pygmentize src/train_vehicle_routing_problem.py
metric_definitions = [
{
"Name": "episode_reward_mean",
"Regex": "episode_reward_mean: ([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)",
},
{
"Name": "episode_reward_max",
"Regex": "episode_reward_max: ([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)",
},
{
"Name": "episode_reward_min",
"Regex": "episode_reward_min: ([-+]?[0-9]*\\.?[0-9]+([eE][-+]?[0-9]+)?)",
},
]
train_entry_point = "train_vehicle_routing_problem.py"
train_job_max_duration_in_seconds = 60 * 15
estimator = RLEstimator(
entry_point=train_entry_point,
source_dir="src",
dependencies=["common/sagemaker_rl"],
toolkit=RLToolkit.RAY,
toolkit_version="0.6.5",
framework=RLFramework.TENSORFLOW,
role=role,
instance_type=instance_type,
instance_count=1,
output_path=s3_output_path,
base_job_name=job_name_prefix,
metric_definitions=metric_definitions,
max_run=train_job_max_duration_in_seconds,
hyperparameters={},
)
estimator.fit(wait=local_mode)
job_name = estimator.latest_training_job.job_name
print("Training job: %s" % job_name)
s3_url = "s3://{}/{}".format(s3_bucket, job_name)
intermediate_folder_key = "{}/output/intermediate/".format(job_name)
intermediate_url = "s3://{}/{}training/".format(s3_bucket, intermediate_folder_key)
print("S3 job path: {}".format(s3_url))
print("Intermediate folder path: {}".format(intermediate_url))
%matplotlib inline
from sagemaker.analytics import TrainingJobAnalytics
if not local_mode:
wait_for_training_job_to_complete(job_name) # Wait for the job to finish
df = TrainingJobAnalytics(job_name, ["episode_reward_mean"]).dataframe()
df_min = TrainingJobAnalytics(job_name, ["episode_reward_min"]).dataframe()
df_max = TrainingJobAnalytics(job_name, ["episode_reward_max"]).dataframe()
df["rl_reward_mean"] = df["value"]
df["rl_reward_min"] = df_min["value"]
df["rl_reward_max"] = df_max["value"]
num_metrics = len(df)
if num_metrics == 0:
print("No algorithm metrics found in CloudWatch")
else:
plt = df.plot(
x="timestamp",
y=["rl_reward_mean"],
figsize=(18, 6),
fontsize=18,
legend=True,
style="-",
color=["b", "r", "g"],
)
plt.fill_between(df.timestamp, df.rl_reward_min, df.rl_reward_max, color="b", alpha=0.2)
plt.set_ylabel("Mean reward per episode", fontsize=20)
plt.set_xlabel("Training time (s)", fontsize=20)
plt.legend(loc=4, prop={"size": 20})
else:
print("Can't plot metrics in local mode.")
| 0.24899 | 0.978975 |
<h1 align="center">Classification of Arrhythmia</h1>
Dataset used in this project is available on the UCI machine learning Repository.
* It can be found at the following address: https://archive.ics.uci.edu/ml/datasets/Arrhythmia.
* It consists of 452 different examples spread over 16 classes. Of the 452 examples, 245 are of "normal" people. We also have 12 different types of arrhythmias. Among all these types of arrhythmias, the most representative are the "coronary artery disease" and "Rjgbt boundle branch block".
* We have 279 features, which include age, sex, weight, height of patients and related information from the electrocardiogram. We explicitly observe that the number of features is relatively high compared to the number of examples we are available.
* Our goal is to predict if a person is suffering from arrhythmia and if class, classify it in to one of 12 available groups.
### Importing Essential Libraries
```
import pandas as pd
import numpy as np
import scipy as sp
import math as mt
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.impute import SimpleImputer
```
### Data Reading
Starting with reading the data file and creating a dataframe for putting the output of all the models that we will be running. The purpose of doing this is it will be easy for us to compare the models.
```
df=pd.read_csv("arrhythmia.csv",header=None)
df.head()
df.tail()
```
**Basic Description of dataframe**
```
#Dimension of dataset.
df.shape
#concise summary of the dataframe.
df.info()
#descriptive statistics of dataframe.
df.describe().T
```
# 1. Data preprocessing
### Handling Missing Values
<br>
While going through the dataset we observed that out of 279 attributes, 5 Attributes have missing value in the form
of '?'. The approach which we will following is, first replacing '?' with numpy NAN and then imputing the mean using Simple Imputer.
**Checking for null values in dataset**
```
#Counting total Number of null values
pd.isnull(df).sum().sum()
#Replacing ? with np.nan value-
df = df.replace('?', np.NaN)
#final counting total number of null values in dataset
nu=pd.isnull(df).sum().sum()
nu
```
**Visualizing the distribution of our missing data:**
```
pd.isnull(df).sum().plot()
plt.xlabel('Columns')
plt.ylabel('Total number of null value in each column')
#Zooming in
pd.isnull(df).sum()[7:17].plot(kind="bar")
plt.xlabel('Columns')
plt.ylabel('Total number of null value in each column')
```
Column 13 contains more than 350 missing values out of total 452 instances. so we will drop column 13. other attributes have comparatively less null values. So instead of droping, we will replace the null value of other attributes with their mean values.
```
#Dropping the column 13
df.drop(columns = 13, inplace=True)
```
**Using the mean strategy for imputation**
```
# make copy to avoid changing original data (when Imputing)
new_df = df.copy()
# make new columns indicating what will be imputed
cols_with_missing = (col for col in new_df.columns if new_df[col].isnull().any())
for col in cols_with_missing:
new_df[col] = new_df[col].isnull()
# Imputation
# my_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
my_imputer = SimpleImputer()
new_df = pd.DataFrame(my_imputer.fit_transform(new_df))
new_df.columns = df.columns
# imputed dataframe
new_df.head()
# DataSet with Zero null Values.
pd.isnull(new_df).sum().sum()
```
**Generating final dataset**
```
#Creating column names
final_df_columns=["Age","Sex","Height","Weight","QRS_Dur",
"P-R_Int","Q-T_Int","T_Int","P_Int","QRS","T","P","J","Heart_Rate",
"Q_Wave","R_Wave","S_Wave","R'_Wave","S'_Wave","Int_Def","Rag_R_Nom",
"Diph_R_Nom","Rag_P_Nom","Diph_P_Nom","Rag_T_Nom","Diph_T_Nom",
"DII00", "DII01","DII02", "DII03", "DII04","DII05","DII06","DII07","DII08","DII09","DII10","DII11",
"DIII00","DIII01","DIII02", "DIII03", "DIII04","DIII05","DIII06","DIII07","DIII08","DIII09","DIII10","DIII11",
"AVR00","AVR01","AVR02","AVR03","AVR04","AVR05","AVR06","AVR07","AVR08","AVR09","AVR10","AVR11",
"AVL00","AVL01","AVL02","AVL03","AVL04","AVL05","AVL06","AVL07","AVL08","AVL09","AVL10","AVL11",
"AVF00","AVF01","AVF02","AVF03","AVF04","AVF05","AVF06","AVF07","AVF08","AVF09","AVF10","AVF11",
"V100","V101","V102","V103","V104","V105","V106","V107","V108","V109","V110","V111",
"V200","V201","V202","V203","V204","V205","V206","V207","V208","V209","V210","V211",
"V300","V301","V302","V303","V304","V305","V306","V307","V308","V309","V310","V311",
"V400","V401","V402","V403","V404","V405","V406","V407","V408","V409","V410","V411",
"V500","V501","V502","V503","V504","V505","V506","V507","V508","V509","V510","V511",
"V600","V601","V602","V603","V604","V605","V606","V607","V608","V609","V610","V611",
"JJ_Wave","Amp_Q_Wave","Amp_R_Wave","Amp_S_Wave","R_Prime_Wave","S_Prime_Wave","P_Wave","T_Wave",
"QRSA","QRSTA","DII170","DII171","DII172","DII173","DII174","DII175","DII176","DII177","DII178","DII179",
"DIII180","DIII181","DIII182","DIII183","DIII184","DIII185","DIII186","DIII187","DIII188","DIII189",
"AVR190","AVR191","AVR192","AVR193","AVR194","AVR195","AVR196","AVR197","AVR198","AVR199",
"AVL200","AVL201","AVL202","AVL203","AVL204","AVL205","AVL206","AVL207","AVL208","AVL209",
"AVF210","AVF211","AVF212","AVF213","AVF214","AVF215","AVF216","AVF217","AVF218","AVF219",
"V1220","V1221","V1222","V1223","V1224","V1225","V1226","V1227","V1228","V1229",
"V2230","V2231","V2232","V2233","V2234","V2235","V2236","V2237","V2238","V2239",
"V3240","V3241","V3242","V3243","V3244","V3245","V3246","V3247","V3248","V3249",
"V4250","V4251","V4252","V4253","V4254","V4255","V4256","V4257","V4258","V4259",
"V5260","V5261","V5262","V5263","V5264","V5265","V5266","V5267","V5268","V5269",
"V6270","V6271","V6272","V6273","V6274","V6275","V6276","V6277","V6278","V6279","class"]
#Adding Column names to dataset
new_df.columns=final_df_columns
new_df.to_csv("new data with target class.csv")
new_df.head()
```
As our dataframe is completely cleaned and preprocessed. we will remove the target attribute and store our final dataframe.
```
target=new_df["class"]
final_df = new_df.drop(columns ="class")
final_df.shape
```
# 2. Exploratory Data Analysis (EDA)
Analyzing data sets to summarize their main characteristics.
Making List of all the type of Arrythmia corresponsing to their class label
```
#List with class names
class_names = ["Normal",
"Ischemic changes (CAD)",
"Old Anterior Myocardial Infraction",
"Old Inferior Myocardial Infraction",
"Sinus tachycardy",
"Sinus bradycardy",
"Ventricular Premature Contraction (PVC)",
"Supraventricular Premature Contraction",
"Left Boundle branch block",
"Right boundle branch block",
"1.Degree AtrioVentricular block",
"2.Degree AV block",
"3.Degree AV block",
"Left Ventricule hypertrophy",
"Atrial Fibrillation or Flutter",
"Others"]
```
### Analyzing the dataset and check how many examples we have for each class:
we need to sort our dataset with respect to class attributes to count the number of instances available for each class
```
t=new_df.sort_values(by=["class"])
# Counting the number of instances for each class
la = t["class"].value_counts(sort=False).tolist()
la
sns.countplot(x ='class',data =new_df)
plt.show()
```
Lets Count the total number of instances we have for each class.
```
values = la[0:10]
values.extend([0,0,0])
values.extend(la[10:13])
print(values)
labels = class_names
Log_Norm = []
for i in values:
Log_Norm.append(mt.log10(i+1))
fig1, ax1 = plt.subplots(figsize=(16,9))
patches = plt.pie(Log_Norm, autopct='%1.1f%%', startangle=90)
leg = plt.legend( loc = 'best', labels=['%s, %1.1f %%' % (l, s) for l, s in zip(labels, Log_Norm)])
plt.axis('equal')
for text in leg.get_texts():
plt.setp(text, color = 'Black')
plt.tight_layout()
plt.show()
```
We found that Of the 452 examples, 245 are of class A which refers to "normal" people. We also have 12 different types of arrhythmias and 3 other type of arrthmias are not present in our dataset.
### Handling Outliers & Data Visualization
```
#looking for pairwise relationships and outliers
g = sns.PairGrid(final_df, vars=['Age', 'Sex', 'Height', 'Weight'],hue='Sex', palette='BrBG')
g.map(plt.scatter, alpha=0.8)
g.add_legend();
```
According to scatter plots, there are few outliers in 'height' and 'weight' attributes.check the maximums of heights and weights
```
sorted(final_df['Height'], reverse=True)[:10]
```
The tallest person ever lived in the world was **272** cm (1940). His followers were **267** cm(1905) and **263.5** cm(1969). Replacing **780** and **608** with **180** and **108** cm respectively
```
final_df['Height']=final_df['Height'].replace(608,108)
final_df['Height']=final_df['Height'].replace(780,180)
sorted(final_df['Weight'], reverse=True)[:10]
```
**176 kgs** is a possible weight. so we'll keep them in the dataframe.
```
sns.boxplot(data=final_df[["QRS_Dur","P-R_Int","Q-T_Int","T_Int","P_Int"]]);
```
PR interval is the period, measured in milliseconds, that extends from the beginning of the P wave until the beginning of the QRS complex; it is normally between 120 and 200ms in duration.
```
final_df['P-R_Int'].value_counts().sort_index().head().plot(kind='bar')
plt.xlabel('P-R Interval Values')
plt.ylabel('Count');
final_df['P-R_Int'].value_counts().sort_index().tail().plot(kind='bar')
plt.xlabel('P-R Interval Values')
plt.ylabel('Count');
```
PR Interval data is including outliers 0(x18). we'll keep them
```
sns.boxplot(data=final_df[["QRS","T","P","J","Heart_Rate"]]);
sns.boxplot(data=final_df[["R'_Wave","S'_Wave","Int_Def","Rag_R_Nom"]]);
```
*S*'Wave has 0's which is not a NaN. So, we can't assume it as including outliers.
```
final_df["S'_Wave"].value_counts().sort_index(ascending=False)
final_df["V101"].value_counts().sort_index(ascending=False)
```
**V101** has an outlier, but when we look at other sets (V201, V301, V501) we can see that there's an outlier similarly. Since our data is heavily biased, I can't say these outliers should be dropped.
For example, when we look at our data, we can see that class # 8 (Supraventricular Premature Contraction) **has only 2 instances**. Or # 3 (Ventricular Premature Contraction (PVC)) has only 3. The outliers appearing with our plots might belong to these instances and needs to be kept.
```
final_df["V201"].value_counts().sort_index(ascending=False)
final_df["V301"].value_counts().sort_index(ascending=False)
final_df["V501"].value_counts().sort_index(ascending=False)
```
Now we can see outlier within the last two attributes of each series(DIII188, DIII189, AVR198, AVR199, AVL208, AVL209, AVF218, AVF219, V2238, V2239, V3248, V3249,V4258, V4259,V5268, V5269, V6278, V6279). Similiarly assuming that these outliers might belong to the classes with few instances.
```
sns.set(rc={'figure.figsize':(11.7,5.27)})
sns.boxplot(data=final_df[["AVR190","AVR191","AVR192","AVR193","AVR194","AVR195","AVR196","AVR197","AVR198","AVR199"]]);
sns.set(rc={'figure.figsize':(11.7,5.27)})
sns.boxplot(data=final_df[["AVL200","AVL201","AVL202","AVL203","AVL204","AVL205","AVL206","AVL207","AVL208","AVL209"]]);
sns.set(rc={'figure.figsize':(11.7,5.27)})
sns.boxplot(data=final_df[["AVF210","AVF211","AVF212","AVF213","AVF214","AVF215","AVF216","AVF217","AVF218","AVF219"]]);
#finding correlation with target feature using pearson correlation
target=new_df["class"]
pearsoncorr = final_df.corrwith(other = target,method='pearson')
pearsoncorr.values
```
## Feature Scaling and Splitting dataset
<br />
We will be using 80% of our dataset for training purpose and 20% for testing purpose.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(final_df, target ,test_size=0.2, random_state=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
import warnings
warnings.filterwarnings('ignore')
```
## Evaluation strategy
As the dependent variable is a categorical variable we will be using classification models. The best evaluation strategy for classification models is comparing the precision and recall. Thinking about the classification evaluation metrics, the importance of our models' precitions (we can't accept a result having the probability of saying to a healthy person that you have Cardiac Arrhythmia (FN)).
We definitely will focus on Sensitivity (the percentage of sick people who are correctly identified as having the condition) not Specificity (percentage of healthy people who are correctly identified as not having the condition).
```
# importing evaluation metrices.
from sklearn.metrics import r2_score,mean_squared_error,accuracy_score,recall_score,precision_score,confusion_matrix
```
# **3. Modeling**
```
# will store result of each model.
result = pd.DataFrame(columns=['Model','Train Accuracy','Test Accuracy'])
```
## KNN Classifier
```
from sklearn.neighbors import KNeighborsClassifier
knnclassifier = KNeighborsClassifier()
knnclassifier.fit(X_train, y_train)
y_pred = knnclassifier.predict(X_test)
knn_train_accuracy = accuracy_score(y_train, knnclassifier.predict(X_train))
knn_test_accuracy = accuracy_score(y_test, knnclassifier.predict(X_test))
result = result.append(pd.Series({'Model':'KNN Classifier','Train Accuracy':knn_train_accuracy,'Test Accuracy':knn_test_accuracy}),ignore_index=True)
result
```
## Logistic regression
```
from sklearn.linear_model import LogisticRegression
lgclassifier = LogisticRegression(solver = 'saga',random_state = 0)
lgclassifier.fit(X_train, y_train)
y_pred = lgclassifier.predict(X_test)
lg_train_recall = recall_score(y_train, lgclassifier.predict(X_train),average='weighted')
lg_test_recall = recall_score(y_test, lgclassifier.predict(X_test),average='weighted')
lg_train_accuracy = accuracy_score(y_train, lgclassifier.predict(X_train))
lg_test_accuracy = accuracy_score(y_test, lgclassifier.predict(X_test))
result = result.append(pd.Series({'Model':'Logestic Regression','Train Accuracy':lg_train_accuracy,'Test Accuracy':lg_test_accuracy}),ignore_index=True )
result
```
## Decision Tree Classifier
```
from sklearn.tree import DecisionTreeClassifier
dtclassifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0,max_depth=5)
dtclassifier.fit(X_train, y_train)
y_pred_test = dtclassifier.predict(X_test)
y_pred_train = dtclassifier.predict(X_train)
dt_train_accuracy = accuracy_score(y_train,y_pred_train )
dt_test_accuracy = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Decision Tree Classifier','Train Accuracy':dt_train_accuracy,'Test Accuracy':dt_test_accuracy}),ignore_index=True )
result
```
## Linear SVM
```
from sklearn.svm import LinearSVC
lsvclassifier = LinearSVC(C=0.01)
lsvclassifier.fit(X_train, y_train)
y_pred_test = lsvclassifier.predict(X_test)
y_pred_train = lsvclassifier.predict(X_train)
lsvc_train_accuracy_score = accuracy_score(y_train, y_pred_train)
lsvc_test_accuracy_score = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Linear SVC','Train Accuracy':lsvc_train_accuracy_score,'Test Accuracy':lsvc_test_accuracy_score}),ignore_index=True )
result
```
## Kernelized SVM
```
from sklearn import svm
KSVC_clf = svm.SVC(kernel='sigmoid',C=10,gamma=0.001)
KSVC_clf.fit(X_train, y_train)
y_pred_train = KSVC_clf.predict(X_train)
y_pred_test = KSVC_clf.predict(X_test)
ksvc_train_accuracy_score = accuracy_score(y_train, y_pred_train)
ksvc_test_accuracy_score = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Kernelized SVC','Train Accuracy':ksvc_train_accuracy_score,'Test Accuracy':ksvc_test_accuracy_score}),ignore_index=True )
result
```
## Random Forest Classifier
```
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=300, criterion='gini',max_features=100,max_depth=10,max_leaf_nodes=30)
rf_clf.fit(X_train, y_train)
y_pred_train = rf_clf.predict(X_train)
y_pred_test = rf_clf.predict(X_test)
rf_train_accuracy_score = accuracy_score(y_train, y_pred_train)
rf_test_accuracy_score = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Random Forest Classifier','Train Accuracy':rf_train_accuracy_score,'Test Accuracy':rf_test_accuracy_score}),ignore_index=True )
result
```
We found that the best model in term of recall is kernelized SVM with accuracy percentage of **79.12** over other models. We also found that Logestic Regression has better accuracy score.
---
---
# PCA
```
from sklearn.decomposition import PCA
pca = PCA(.98)
pca.fit(X_train)
pca.n_components_
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
from sklearn.model_selection import StratifiedKFold
kFold = StratifiedKFold(n_splits=5)
from sklearn.model_selection import GridSearchCV
```
# KNN with PCA @
```
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_jobs=-1)
param_grid={'n_neighbors':[1,2,3,4,5,7,10]}
grid_search = GridSearchCV(knn_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
knn_clf = KNeighborsClassifier(n_neighbors=5)
knn_clf.fit(X_train_pca, y_train)
y_pred_train = knn_clf.predict(X_train_pca)
y_pred_test = knn_clf.predict(X_test_pca)
lreg_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
lreg_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(lreg_train_recall_score))
print('Test Recall score: {}'
.format(lreg_test_recall_score))
confusion_matrix(y_test, y_pred_test)
```
# Logestic with PCA @
```
from sklearn.linear_model import LogisticRegression
lgclassifier = LogisticRegression(solver = 'saga',random_state = 0)
lgclassifier.fit(X_train_pca, y_train)
y_pred = lgclassifier.predict(X_test_pca)
y_pred_train = lgclassifier.predict(X_train_pca)
y_pred_test = lgclassifier.predict(X_test_pca)
lreg_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
lreg_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(lreg_train_recall_score))
print('Test Recall score: {}'
.format(lreg_test_recall_score))
confusion_matrix(y_test, y_pred_test)
```
# Linear svm with PCA @
```
from sklearn.svm import LinearSVC
LSVC_clf = LinearSVC()
param_grid = {'C': [0.00001,0.0001,0.001,0.01,0.1,1,10,100]}
grid_search = GridSearchCV(LSVC_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
LSVC_clf = LinearSVC(C=0.001)
LSVC_clf.fit(X_train_pca, y_train)
y_pred_train = LSVC_clf.predict(X_train_pca)
y_pred_test = LSVC_clf.predict(X_test_pca)
lscvc_pca_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
lscv_pca_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(lscvc_pca_train_recall_score))
print('Test Recall score: {}'
.format(lscv_pca_test_recall_score))
confusion_matrix(y_test, y_pred_test)
```
# Kernal svm with PCA @
```
from sklearn import svm
KSVC_clf = svm.SVC(kernel='sigmoid')
param_grid = {'C': [0.0001,0.001,0.01,0.1,1,10],
'gamma': [0.0001,0.001,0.01,0.1,1,10]}
grid_search = GridSearchCV(KSVC_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
from sklearn import svm
KSVC_clf = svm.SVC(kernel='sigmoid',C=10,gamma=0.001)
KSVC_clf.fit(X_train_pca, y_train)
y_pred_train = KSVC_clf.predict(X_train_pca)
y_pred_test = KSVC_clf.predict(X_test_pca)
kscv_pca_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
kscv_pca_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(kscv_pca_train_recall_score))
print('Test Recall score: {}'
.format(kscv_pca_test_recall_score))
confusion_matrix(y_test, y_pred_test)
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier()
param_grid = {'max_depth': [1,2, 3, 4, 5,6,7,8,9,10,15,20]}
grid_search = GridSearchCV(dt_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
dt_clf = DecisionTreeClassifier(max_depth=1)
dt_clf.fit(X_train_pca, y_train)
y_pred_train = dt_clf.predict(X_train_pca)
y_pred_test = dt_clf.predict(X_test_pca)
dt_pca_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
dt_pca_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(dt_pca_train_recall_score))
print('Test Recall score: {}'
.format(dt_pca_test_recall_score))
confusion_matrix(y_test, y_pred_test)
```
---
---
# Sampling and PCA
Since our dataset is imbalance with number of instances of class 7 and class 8 are 2,3 respectively, whereas for class 1, having examples 245. So we will try to solve class imbalancy by randomly resample the training dataset using Oversampling.
We will be using PCA(Principal Component Analysis) to reduce the dimension of our sampled dataset to get best feature to find better accuracy.
## Random Over Sampling
```
#performing over sampling
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X_resampled, y_resampled = ros.fit_resample(final_df, target)
X_resampled.shape
#finding frequency of each class
import collections
counter = collections.Counter(y_resampled)
counter
sns.countplot(y_resampled)
plt.show()
X_train1, X_test1, y_train1, y_test1 = train_test_split(X_resampled, y_resampled , test_size=0.2, random_state=1)
scaler = StandardScaler()
scaler.fit(X_train1)
X_train1 = scaler.transform(X_train1)
X_test1 = scaler.transform(X_test1)
```
## PCA
```
from sklearn.decomposition import PCA
pca = PCA(.98)
pca.fit(X_train1)
pca.n_components_
X_train1 = pca.transform(X_train1)
X_test1 = pca.transform(X_test1)
```
## KNN with PCA
```
classifier = KNeighborsClassifier()
classifier.fit(X_train1, y_train1)
Y_pred = classifier.predict(X_test1)
knnp_train_accuracy = accuracy_score(y_train1,classifier.predict(X_train1))
knnp_test_accuracy = accuracy_score(y_test1,Y_pred)
#print(knnp_train_accuracy,knnp_test_accuracy)
result = result.append(pd.Series({'Model':'Knn with PCA','Train Accuracy':knnp_train_accuracy,'Test Accuracy':knnp_test_accuracy}),ignore_index=True )
result
```
## Logistic Regression with PCA
```
from sklearn.linear_model import LogisticRegression
lgpclassifier = LogisticRegression(C=10,random_state = 0)
lgpclassifier.fit(X_train1, y_train1)
y_pred_train1 = lgpclassifier.predict(X_train1)
y_pred_test1 = lgpclassifier.predict(X_test1)
lgp_train_accuracy = accuracy_score(y_train1,y_pred_train1)
lgp_test_accuracy = accuracy_score(y_test1,y_pred_test1)
#print(lgp_train_accuracy,knnp_test_accuracy)
result = result.append(pd.Series({'Model':'Logestic Regression PCA','Train Accuracy':lgp_train_accuracy,'Test Accuracy':lgp_test_accuracy}),ignore_index=True )
result
```
## Decision Tree Classifier with PCA
```
from sklearn.tree import DecisionTreeClassifier
dtpclassifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
dtpclassifier.fit(X_train1, y_train1)
y_pred_test = dtpclassifier.predict(X_test1)
y_pred_train = dtpclassifier.predict(X_train1)
dtp_train_recall_score = recall_score(y_train1, y_pred_train, average='weighted')
dtp_test_recall_score = recall_score(y_test1, y_pred_test, average='weighted')
dtp_train_accuracy_score = accuracy_score(y_train1, y_pred_train)
dtp_test_accuracy_score = accuracy_score(y_test1, y_pred_test)
result = result.append(pd.Series({'Model':'Decision Tree with PCA','Train Accuracy':dtp_train_accuracy_score,'Test Accuracy':dtp_test_accuracy_score}),ignore_index=True )
result
```
## Linear SVM with PCA
```
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state = 0,probability=True)
classifier.fit(X_train1, y_train1)
y_pred = classifier.predict(X_test1)
#print("Accuracy Score",accuracy_score(y_test1,y_pred)*100)
lsvcp_train_accuracy = accuracy_score(y_train1,classifier.predict(X_train1))
lsvcp_test_accuracy = accuracy_score(y_test1,y_pred)
result = result.append(pd.Series({'Model':'Linear SVM with PCA','Train Accuracy':lsvcp_train_accuracy,'Test Accuracy':lsvcp_test_accuracy}),ignore_index=True )
result
```
## Kernelized SVM with PCA
```
from sklearn import svm
KSVC_clf = svm.SVC(kernel='rbf',C=1,gamma=0.1)
KSVC_clf.fit(X_train1, y_train1)
y_pred_train1 = KSVC_clf.predict(X_train1)
y_pred_test1 = KSVC_clf.predict(X_test1)
ksvcp_train_accuracy_score = accuracy_score(y_train1, y_pred_train1)
ksvcp_test_accuracy_score = accuracy_score(y_test1, y_pred_test1)
result = result.append(pd.Series({'Model':'Kernelized SVM with PCA','Train Accuracy':ksvcp_train_accuracy_score,'Test Accuracy':ksvcp_test_accuracy_score}),ignore_index=True )
result
```
## Random Forest with PCA
```
from sklearn.ensemble import RandomForestClassifier
rfp_clf = RandomForestClassifier()
rfp_clf.fit(X_train1, y_train1)
y_pred_train1 = rfp_clf.predict(X_train1)
y_pred_test1 = rfp_clf.predict(X_test1)
rfp_train_accuracy_score = accuracy_score(y_train1, y_pred_train1)
rfp_test_accuracy_score = accuracy_score(y_test1, y_pred_test1)
result = result.append(pd.Series({'Model':'Random Forest with PCA','Train Accuracy':rfp_train_accuracy_score,'Test Accuracy':rfp_test_accuracy_score}),ignore_index=True )
result
result.plot(kind="bar",figsize=(15,4))
plt.title('Train&Test Scores of Classifiers')
plt.xlabel('Models')
plt.ylabel('Scores')
plt.legend(loc=4 , bbox_to_anchor=(1.2, 0))
plt.show();
```
# Result
The models started performing better after we applied PCA on the resampled data. The reason behind this is, PCA reduces the complexity of the data. It creates components based on giving importance to variables with large variance and also the components which it creates are non collinear in nature which means it takes care of collinearity in large data set. PCA also improves the overall execution time and quality of the models and it is very beneficial when we are working with huge amount of variables.
The Best model recall score is Kernalized SVM with PCA having accuracy of **99.52%.**
|
github_jupyter
|
import pandas as pd
import numpy as np
import scipy as sp
import math as mt
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.impute import SimpleImputer
df=pd.read_csv("arrhythmia.csv",header=None)
df.head()
df.tail()
#Dimension of dataset.
df.shape
#concise summary of the dataframe.
df.info()
#descriptive statistics of dataframe.
df.describe().T
#Counting total Number of null values
pd.isnull(df).sum().sum()
#Replacing ? with np.nan value-
df = df.replace('?', np.NaN)
#final counting total number of null values in dataset
nu=pd.isnull(df).sum().sum()
nu
pd.isnull(df).sum().plot()
plt.xlabel('Columns')
plt.ylabel('Total number of null value in each column')
#Zooming in
pd.isnull(df).sum()[7:17].plot(kind="bar")
plt.xlabel('Columns')
plt.ylabel('Total number of null value in each column')
#Dropping the column 13
df.drop(columns = 13, inplace=True)
# make copy to avoid changing original data (when Imputing)
new_df = df.copy()
# make new columns indicating what will be imputed
cols_with_missing = (col for col in new_df.columns if new_df[col].isnull().any())
for col in cols_with_missing:
new_df[col] = new_df[col].isnull()
# Imputation
# my_imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
my_imputer = SimpleImputer()
new_df = pd.DataFrame(my_imputer.fit_transform(new_df))
new_df.columns = df.columns
# imputed dataframe
new_df.head()
# DataSet with Zero null Values.
pd.isnull(new_df).sum().sum()
#Creating column names
final_df_columns=["Age","Sex","Height","Weight","QRS_Dur",
"P-R_Int","Q-T_Int","T_Int","P_Int","QRS","T","P","J","Heart_Rate",
"Q_Wave","R_Wave","S_Wave","R'_Wave","S'_Wave","Int_Def","Rag_R_Nom",
"Diph_R_Nom","Rag_P_Nom","Diph_P_Nom","Rag_T_Nom","Diph_T_Nom",
"DII00", "DII01","DII02", "DII03", "DII04","DII05","DII06","DII07","DII08","DII09","DII10","DII11",
"DIII00","DIII01","DIII02", "DIII03", "DIII04","DIII05","DIII06","DIII07","DIII08","DIII09","DIII10","DIII11",
"AVR00","AVR01","AVR02","AVR03","AVR04","AVR05","AVR06","AVR07","AVR08","AVR09","AVR10","AVR11",
"AVL00","AVL01","AVL02","AVL03","AVL04","AVL05","AVL06","AVL07","AVL08","AVL09","AVL10","AVL11",
"AVF00","AVF01","AVF02","AVF03","AVF04","AVF05","AVF06","AVF07","AVF08","AVF09","AVF10","AVF11",
"V100","V101","V102","V103","V104","V105","V106","V107","V108","V109","V110","V111",
"V200","V201","V202","V203","V204","V205","V206","V207","V208","V209","V210","V211",
"V300","V301","V302","V303","V304","V305","V306","V307","V308","V309","V310","V311",
"V400","V401","V402","V403","V404","V405","V406","V407","V408","V409","V410","V411",
"V500","V501","V502","V503","V504","V505","V506","V507","V508","V509","V510","V511",
"V600","V601","V602","V603","V604","V605","V606","V607","V608","V609","V610","V611",
"JJ_Wave","Amp_Q_Wave","Amp_R_Wave","Amp_S_Wave","R_Prime_Wave","S_Prime_Wave","P_Wave","T_Wave",
"QRSA","QRSTA","DII170","DII171","DII172","DII173","DII174","DII175","DII176","DII177","DII178","DII179",
"DIII180","DIII181","DIII182","DIII183","DIII184","DIII185","DIII186","DIII187","DIII188","DIII189",
"AVR190","AVR191","AVR192","AVR193","AVR194","AVR195","AVR196","AVR197","AVR198","AVR199",
"AVL200","AVL201","AVL202","AVL203","AVL204","AVL205","AVL206","AVL207","AVL208","AVL209",
"AVF210","AVF211","AVF212","AVF213","AVF214","AVF215","AVF216","AVF217","AVF218","AVF219",
"V1220","V1221","V1222","V1223","V1224","V1225","V1226","V1227","V1228","V1229",
"V2230","V2231","V2232","V2233","V2234","V2235","V2236","V2237","V2238","V2239",
"V3240","V3241","V3242","V3243","V3244","V3245","V3246","V3247","V3248","V3249",
"V4250","V4251","V4252","V4253","V4254","V4255","V4256","V4257","V4258","V4259",
"V5260","V5261","V5262","V5263","V5264","V5265","V5266","V5267","V5268","V5269",
"V6270","V6271","V6272","V6273","V6274","V6275","V6276","V6277","V6278","V6279","class"]
#Adding Column names to dataset
new_df.columns=final_df_columns
new_df.to_csv("new data with target class.csv")
new_df.head()
target=new_df["class"]
final_df = new_df.drop(columns ="class")
final_df.shape
#List with class names
class_names = ["Normal",
"Ischemic changes (CAD)",
"Old Anterior Myocardial Infraction",
"Old Inferior Myocardial Infraction",
"Sinus tachycardy",
"Sinus bradycardy",
"Ventricular Premature Contraction (PVC)",
"Supraventricular Premature Contraction",
"Left Boundle branch block",
"Right boundle branch block",
"1.Degree AtrioVentricular block",
"2.Degree AV block",
"3.Degree AV block",
"Left Ventricule hypertrophy",
"Atrial Fibrillation or Flutter",
"Others"]
t=new_df.sort_values(by=["class"])
# Counting the number of instances for each class
la = t["class"].value_counts(sort=False).tolist()
la
sns.countplot(x ='class',data =new_df)
plt.show()
values = la[0:10]
values.extend([0,0,0])
values.extend(la[10:13])
print(values)
labels = class_names
Log_Norm = []
for i in values:
Log_Norm.append(mt.log10(i+1))
fig1, ax1 = plt.subplots(figsize=(16,9))
patches = plt.pie(Log_Norm, autopct='%1.1f%%', startangle=90)
leg = plt.legend( loc = 'best', labels=['%s, %1.1f %%' % (l, s) for l, s in zip(labels, Log_Norm)])
plt.axis('equal')
for text in leg.get_texts():
plt.setp(text, color = 'Black')
plt.tight_layout()
plt.show()
#looking for pairwise relationships and outliers
g = sns.PairGrid(final_df, vars=['Age', 'Sex', 'Height', 'Weight'],hue='Sex', palette='BrBG')
g.map(plt.scatter, alpha=0.8)
g.add_legend();
sorted(final_df['Height'], reverse=True)[:10]
final_df['Height']=final_df['Height'].replace(608,108)
final_df['Height']=final_df['Height'].replace(780,180)
sorted(final_df['Weight'], reverse=True)[:10]
sns.boxplot(data=final_df[["QRS_Dur","P-R_Int","Q-T_Int","T_Int","P_Int"]]);
final_df['P-R_Int'].value_counts().sort_index().head().plot(kind='bar')
plt.xlabel('P-R Interval Values')
plt.ylabel('Count');
final_df['P-R_Int'].value_counts().sort_index().tail().plot(kind='bar')
plt.xlabel('P-R Interval Values')
plt.ylabel('Count');
sns.boxplot(data=final_df[["QRS","T","P","J","Heart_Rate"]]);
sns.boxplot(data=final_df[["R'_Wave","S'_Wave","Int_Def","Rag_R_Nom"]]);
final_df["S'_Wave"].value_counts().sort_index(ascending=False)
final_df["V101"].value_counts().sort_index(ascending=False)
final_df["V201"].value_counts().sort_index(ascending=False)
final_df["V301"].value_counts().sort_index(ascending=False)
final_df["V501"].value_counts().sort_index(ascending=False)
sns.set(rc={'figure.figsize':(11.7,5.27)})
sns.boxplot(data=final_df[["AVR190","AVR191","AVR192","AVR193","AVR194","AVR195","AVR196","AVR197","AVR198","AVR199"]]);
sns.set(rc={'figure.figsize':(11.7,5.27)})
sns.boxplot(data=final_df[["AVL200","AVL201","AVL202","AVL203","AVL204","AVL205","AVL206","AVL207","AVL208","AVL209"]]);
sns.set(rc={'figure.figsize':(11.7,5.27)})
sns.boxplot(data=final_df[["AVF210","AVF211","AVF212","AVF213","AVF214","AVF215","AVF216","AVF217","AVF218","AVF219"]]);
#finding correlation with target feature using pearson correlation
target=new_df["class"]
pearsoncorr = final_df.corrwith(other = target,method='pearson')
pearsoncorr.values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(final_df, target ,test_size=0.2, random_state=1)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
import warnings
warnings.filterwarnings('ignore')
# importing evaluation metrices.
from sklearn.metrics import r2_score,mean_squared_error,accuracy_score,recall_score,precision_score,confusion_matrix
# will store result of each model.
result = pd.DataFrame(columns=['Model','Train Accuracy','Test Accuracy'])
from sklearn.neighbors import KNeighborsClassifier
knnclassifier = KNeighborsClassifier()
knnclassifier.fit(X_train, y_train)
y_pred = knnclassifier.predict(X_test)
knn_train_accuracy = accuracy_score(y_train, knnclassifier.predict(X_train))
knn_test_accuracy = accuracy_score(y_test, knnclassifier.predict(X_test))
result = result.append(pd.Series({'Model':'KNN Classifier','Train Accuracy':knn_train_accuracy,'Test Accuracy':knn_test_accuracy}),ignore_index=True)
result
from sklearn.linear_model import LogisticRegression
lgclassifier = LogisticRegression(solver = 'saga',random_state = 0)
lgclassifier.fit(X_train, y_train)
y_pred = lgclassifier.predict(X_test)
lg_train_recall = recall_score(y_train, lgclassifier.predict(X_train),average='weighted')
lg_test_recall = recall_score(y_test, lgclassifier.predict(X_test),average='weighted')
lg_train_accuracy = accuracy_score(y_train, lgclassifier.predict(X_train))
lg_test_accuracy = accuracy_score(y_test, lgclassifier.predict(X_test))
result = result.append(pd.Series({'Model':'Logestic Regression','Train Accuracy':lg_train_accuracy,'Test Accuracy':lg_test_accuracy}),ignore_index=True )
result
from sklearn.tree import DecisionTreeClassifier
dtclassifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0,max_depth=5)
dtclassifier.fit(X_train, y_train)
y_pred_test = dtclassifier.predict(X_test)
y_pred_train = dtclassifier.predict(X_train)
dt_train_accuracy = accuracy_score(y_train,y_pred_train )
dt_test_accuracy = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Decision Tree Classifier','Train Accuracy':dt_train_accuracy,'Test Accuracy':dt_test_accuracy}),ignore_index=True )
result
from sklearn.svm import LinearSVC
lsvclassifier = LinearSVC(C=0.01)
lsvclassifier.fit(X_train, y_train)
y_pred_test = lsvclassifier.predict(X_test)
y_pred_train = lsvclassifier.predict(X_train)
lsvc_train_accuracy_score = accuracy_score(y_train, y_pred_train)
lsvc_test_accuracy_score = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Linear SVC','Train Accuracy':lsvc_train_accuracy_score,'Test Accuracy':lsvc_test_accuracy_score}),ignore_index=True )
result
from sklearn import svm
KSVC_clf = svm.SVC(kernel='sigmoid',C=10,gamma=0.001)
KSVC_clf.fit(X_train, y_train)
y_pred_train = KSVC_clf.predict(X_train)
y_pred_test = KSVC_clf.predict(X_test)
ksvc_train_accuracy_score = accuracy_score(y_train, y_pred_train)
ksvc_test_accuracy_score = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Kernelized SVC','Train Accuracy':ksvc_train_accuracy_score,'Test Accuracy':ksvc_test_accuracy_score}),ignore_index=True )
result
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=300, criterion='gini',max_features=100,max_depth=10,max_leaf_nodes=30)
rf_clf.fit(X_train, y_train)
y_pred_train = rf_clf.predict(X_train)
y_pred_test = rf_clf.predict(X_test)
rf_train_accuracy_score = accuracy_score(y_train, y_pred_train)
rf_test_accuracy_score = accuracy_score(y_test, y_pred_test)
result = result.append(pd.Series({'Model':'Random Forest Classifier','Train Accuracy':rf_train_accuracy_score,'Test Accuracy':rf_test_accuracy_score}),ignore_index=True )
result
from sklearn.decomposition import PCA
pca = PCA(.98)
pca.fit(X_train)
pca.n_components_
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
from sklearn.model_selection import StratifiedKFold
kFold = StratifiedKFold(n_splits=5)
from sklearn.model_selection import GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_jobs=-1)
param_grid={'n_neighbors':[1,2,3,4,5,7,10]}
grid_search = GridSearchCV(knn_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
knn_clf = KNeighborsClassifier(n_neighbors=5)
knn_clf.fit(X_train_pca, y_train)
y_pred_train = knn_clf.predict(X_train_pca)
y_pred_test = knn_clf.predict(X_test_pca)
lreg_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
lreg_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(lreg_train_recall_score))
print('Test Recall score: {}'
.format(lreg_test_recall_score))
confusion_matrix(y_test, y_pred_test)
from sklearn.linear_model import LogisticRegression
lgclassifier = LogisticRegression(solver = 'saga',random_state = 0)
lgclassifier.fit(X_train_pca, y_train)
y_pred = lgclassifier.predict(X_test_pca)
y_pred_train = lgclassifier.predict(X_train_pca)
y_pred_test = lgclassifier.predict(X_test_pca)
lreg_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
lreg_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(lreg_train_recall_score))
print('Test Recall score: {}'
.format(lreg_test_recall_score))
confusion_matrix(y_test, y_pred_test)
from sklearn.svm import LinearSVC
LSVC_clf = LinearSVC()
param_grid = {'C': [0.00001,0.0001,0.001,0.01,0.1,1,10,100]}
grid_search = GridSearchCV(LSVC_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
LSVC_clf = LinearSVC(C=0.001)
LSVC_clf.fit(X_train_pca, y_train)
y_pred_train = LSVC_clf.predict(X_train_pca)
y_pred_test = LSVC_clf.predict(X_test_pca)
lscvc_pca_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
lscv_pca_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(lscvc_pca_train_recall_score))
print('Test Recall score: {}'
.format(lscv_pca_test_recall_score))
confusion_matrix(y_test, y_pred_test)
from sklearn import svm
KSVC_clf = svm.SVC(kernel='sigmoid')
param_grid = {'C': [0.0001,0.001,0.01,0.1,1,10],
'gamma': [0.0001,0.001,0.01,0.1,1,10]}
grid_search = GridSearchCV(KSVC_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
from sklearn import svm
KSVC_clf = svm.SVC(kernel='sigmoid',C=10,gamma=0.001)
KSVC_clf.fit(X_train_pca, y_train)
y_pred_train = KSVC_clf.predict(X_train_pca)
y_pred_test = KSVC_clf.predict(X_test_pca)
kscv_pca_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
kscv_pca_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(kscv_pca_train_recall_score))
print('Test Recall score: {}'
.format(kscv_pca_test_recall_score))
confusion_matrix(y_test, y_pred_test)
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier()
param_grid = {'max_depth': [1,2, 3, 4, 5,6,7,8,9,10,15,20]}
grid_search = GridSearchCV(dt_clf, param_grid, scoring = 'recall_weighted',cv=kFold, return_train_score=True)
grid_search.fit(X_train_pca,y_train)
grid_search.best_params_
dt_clf = DecisionTreeClassifier(max_depth=1)
dt_clf.fit(X_train_pca, y_train)
y_pred_train = dt_clf.predict(X_train_pca)
y_pred_test = dt_clf.predict(X_test_pca)
dt_pca_train_recall_score = recall_score(y_train, y_pred_train, average='weighted')
dt_pca_test_recall_score = recall_score(y_test, y_pred_test, average='weighted')
print('Train Recall score: {}'
.format(dt_pca_train_recall_score))
print('Test Recall score: {}'
.format(dt_pca_test_recall_score))
confusion_matrix(y_test, y_pred_test)
#performing over sampling
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(random_state=0)
X_resampled, y_resampled = ros.fit_resample(final_df, target)
X_resampled.shape
#finding frequency of each class
import collections
counter = collections.Counter(y_resampled)
counter
sns.countplot(y_resampled)
plt.show()
X_train1, X_test1, y_train1, y_test1 = train_test_split(X_resampled, y_resampled , test_size=0.2, random_state=1)
scaler = StandardScaler()
scaler.fit(X_train1)
X_train1 = scaler.transform(X_train1)
X_test1 = scaler.transform(X_test1)
from sklearn.decomposition import PCA
pca = PCA(.98)
pca.fit(X_train1)
pca.n_components_
X_train1 = pca.transform(X_train1)
X_test1 = pca.transform(X_test1)
classifier = KNeighborsClassifier()
classifier.fit(X_train1, y_train1)
Y_pred = classifier.predict(X_test1)
knnp_train_accuracy = accuracy_score(y_train1,classifier.predict(X_train1))
knnp_test_accuracy = accuracy_score(y_test1,Y_pred)
#print(knnp_train_accuracy,knnp_test_accuracy)
result = result.append(pd.Series({'Model':'Knn with PCA','Train Accuracy':knnp_train_accuracy,'Test Accuracy':knnp_test_accuracy}),ignore_index=True )
result
from sklearn.linear_model import LogisticRegression
lgpclassifier = LogisticRegression(C=10,random_state = 0)
lgpclassifier.fit(X_train1, y_train1)
y_pred_train1 = lgpclassifier.predict(X_train1)
y_pred_test1 = lgpclassifier.predict(X_test1)
lgp_train_accuracy = accuracy_score(y_train1,y_pred_train1)
lgp_test_accuracy = accuracy_score(y_test1,y_pred_test1)
#print(lgp_train_accuracy,knnp_test_accuracy)
result = result.append(pd.Series({'Model':'Logestic Regression PCA','Train Accuracy':lgp_train_accuracy,'Test Accuracy':lgp_test_accuracy}),ignore_index=True )
result
from sklearn.tree import DecisionTreeClassifier
dtpclassifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 0)
dtpclassifier.fit(X_train1, y_train1)
y_pred_test = dtpclassifier.predict(X_test1)
y_pred_train = dtpclassifier.predict(X_train1)
dtp_train_recall_score = recall_score(y_train1, y_pred_train, average='weighted')
dtp_test_recall_score = recall_score(y_test1, y_pred_test, average='weighted')
dtp_train_accuracy_score = accuracy_score(y_train1, y_pred_train)
dtp_test_accuracy_score = accuracy_score(y_test1, y_pred_test)
result = result.append(pd.Series({'Model':'Decision Tree with PCA','Train Accuracy':dtp_train_accuracy_score,'Test Accuracy':dtp_test_accuracy_score}),ignore_index=True )
result
from sklearn.svm import SVC
classifier = SVC(kernel = 'linear', random_state = 0,probability=True)
classifier.fit(X_train1, y_train1)
y_pred = classifier.predict(X_test1)
#print("Accuracy Score",accuracy_score(y_test1,y_pred)*100)
lsvcp_train_accuracy = accuracy_score(y_train1,classifier.predict(X_train1))
lsvcp_test_accuracy = accuracy_score(y_test1,y_pred)
result = result.append(pd.Series({'Model':'Linear SVM with PCA','Train Accuracy':lsvcp_train_accuracy,'Test Accuracy':lsvcp_test_accuracy}),ignore_index=True )
result
from sklearn import svm
KSVC_clf = svm.SVC(kernel='rbf',C=1,gamma=0.1)
KSVC_clf.fit(X_train1, y_train1)
y_pred_train1 = KSVC_clf.predict(X_train1)
y_pred_test1 = KSVC_clf.predict(X_test1)
ksvcp_train_accuracy_score = accuracy_score(y_train1, y_pred_train1)
ksvcp_test_accuracy_score = accuracy_score(y_test1, y_pred_test1)
result = result.append(pd.Series({'Model':'Kernelized SVM with PCA','Train Accuracy':ksvcp_train_accuracy_score,'Test Accuracy':ksvcp_test_accuracy_score}),ignore_index=True )
result
from sklearn.ensemble import RandomForestClassifier
rfp_clf = RandomForestClassifier()
rfp_clf.fit(X_train1, y_train1)
y_pred_train1 = rfp_clf.predict(X_train1)
y_pred_test1 = rfp_clf.predict(X_test1)
rfp_train_accuracy_score = accuracy_score(y_train1, y_pred_train1)
rfp_test_accuracy_score = accuracy_score(y_test1, y_pred_test1)
result = result.append(pd.Series({'Model':'Random Forest with PCA','Train Accuracy':rfp_train_accuracy_score,'Test Accuracy':rfp_test_accuracy_score}),ignore_index=True )
result
result.plot(kind="bar",figsize=(15,4))
plt.title('Train&Test Scores of Classifiers')
plt.xlabel('Models')
plt.ylabel('Scores')
plt.legend(loc=4 , bbox_to_anchor=(1.2, 0))
plt.show();
| 0.348534 | 0.936343 |
```
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.utils.data as data
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
import torchvision.models as models
import torchvision.transforms as transforms
from collections import defaultdict
history_dict = defaultdict(list)
# CIFAR-10
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
original_train_data = CIFAR10(root='data', train=True, download=False, transform=None)
original_test_data = CIFAR10(root='data', train=False, download=False, transform=None)
def preview(dataset):
offset = 0
fig, axs = plt.subplots(1, 10, figsize=(20,20))
for i, ax in enumerate(axs):
img = dataset[i+offset][0]
target = dataset[i+offset][1]
ax.imshow(img)
if classes != None:
label = classes[dataset[i+offset][1]]
ax.set_title('{}: {}'.format(target, label))
else:
ax.set_title(target)
ax.set_xticks([])
ax.set_yticks([])
offset = offset + 100
plt.show()
print('Original train dataset preview')
preview(original_train_data)
print('Original test dataset preview')
preview(original_test_data)
# Mean and std calculation
train_data = CIFAR10(root='data', train=True, download=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_data, batch_size=200, shuffle=False, num_workers=2)
mean = []
std = []
for i, batch_data in enumerate(train_loader, 0):
# shape (batch_size, 3, height, width)
numpy_image = batch_data[0].numpy()
# shape (3,)
batch_mean = np.mean(numpy_image, axis=(0,2,3))
batch_std = np.std(numpy_image, axis=(0,2,3))
mean.append(batch_mean)
std.append(batch_std)
# shape (num_iterations, 3) -> (mean across 0th axis) -> shape (3,)
mean = np.array(mean).mean(axis=0)
std = np.array(std).mean(axis=0)
print('mean =', mean, 'std =', std)
batch_size = 200
train_transf = transforms.Compose([
#transforms.RandomResizedCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
test_transf = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
train_data = CIFAR10(root='data', train=True, download=False, transform=train_transf)
train_size = int(len(train_data) * 0.8)
valid_size = len(train_data) - train_size
train_data, valid_data = data.random_split(train_data, (train_size, valid_size))
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=2)
valid_loader = DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=2)
test_data = CIFAR10(root='data', train=False, download=False, transform=test_transf)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=2)
def time_format(milliseconds):
milliseconds = int(milliseconds)
hours = int(milliseconds / (3600 * 1000))
minutes = int((milliseconds % (3600 * 1000)) / (60 * 1000))
seconds = int(((milliseconds % (3600 * 1000)) % (60 * 1000)) / 1000)
milliseconds = int(((milliseconds % (3600 * 1000)) % (60 * 1000)) % 1000)
return '%d:%02d:%02d.%d' % (hours, minutes, seconds, milliseconds)
def train_epoch(model, device, train_loader, optimizer, criterion, log_interval):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
history_dict['loss'].append(loss.item())
optimizer.step()
if batch_idx % log_interval == 0:
print(' {:5}/{} ({:2.0f}%)\tLoss: {:.6f}'.
format(batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
def test(model, device, criterion, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
accuracy = 100. * correct / len(test_loader.dataset)
return accuracy
def train(model, optimizer, scheduler, criterion, train_loader, test_loader, device, lr, nb_epochs=3, log_interval=100):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(1, nb_epochs + 1):
print('================================================================================')
print('\nEpoch {}/{} - Training at LR {:.6}\n'.format(epoch, nb_epochs, scheduler.get_lr()[0]))
train_epoch(model, device, train_loader, optimizer, criterion, log_interval)
acc = test(model, device, criterion, valid_loader)
print('\n Validation accuracy: {:.2f}%\n'.format(acc))
history_dict['val_acc'].append(acc)
scheduler.step()
end.record()
torch.cuda.synchronize()
print('================================================================================')
print('\nElapsed time', time_format(start.elapsed_time(end)))
return acc
def plot_curves(loss_values, val_acc):
fig, ax = plt.subplots(2, 1, figsize=(15,5))
ax[0].set_title('Training Loss')
ax[0].plot(loss_values)
ax[0].set_title('Loss')
ax[0].set_xticks([])
ax[1].set_title('Validation Accuracy')
ax[1].plot(val_acc)
ax[1].set_xticks([])
device = torch.device('cuda:0') #'cpu'
print('Batch size:', batch_size)
nb_epochs = 24
log_interval = 20
lr = 1e-2
step_size=8
model = models.resnet101(pretrained=True)
#print(model)
for name, module in model.named_children():
if name in ['layer1', 'layer2', 'layer3']:
for param in module.parameters():
param.requires_grad = False
model.fc = nn.Linear(2048, 10, bias=True)
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=10, gamma=0.1)
criterion = nn.CrossEntropyLoss().to(device)
history_dict = defaultdict(list)
acc = train(model, optimizer, scheduler, criterion, train_loader, valid_loader, device, lr, nb_epochs, log_interval)
print('\nFinal accuracy: {:.2f}%'.format(acc))
plot_curves(history_dict['loss'], history_dict['val_acc'])
acc = test(model, device, criterion, test_loader)
print('\n Test accuracy: {:.2f}%\n'.format(acc))
```
|
github_jupyter
|
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.utils.data as data
from torch.optim.lr_scheduler import StepLR
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
import torchvision.models as models
import torchvision.transforms as transforms
from collections import defaultdict
history_dict = defaultdict(list)
# CIFAR-10
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
original_train_data = CIFAR10(root='data', train=True, download=False, transform=None)
original_test_data = CIFAR10(root='data', train=False, download=False, transform=None)
def preview(dataset):
offset = 0
fig, axs = plt.subplots(1, 10, figsize=(20,20))
for i, ax in enumerate(axs):
img = dataset[i+offset][0]
target = dataset[i+offset][1]
ax.imshow(img)
if classes != None:
label = classes[dataset[i+offset][1]]
ax.set_title('{}: {}'.format(target, label))
else:
ax.set_title(target)
ax.set_xticks([])
ax.set_yticks([])
offset = offset + 100
plt.show()
print('Original train dataset preview')
preview(original_train_data)
print('Original test dataset preview')
preview(original_test_data)
# Mean and std calculation
train_data = CIFAR10(root='data', train=True, download=False, transform=transforms.ToTensor())
train_loader = DataLoader(train_data, batch_size=200, shuffle=False, num_workers=2)
mean = []
std = []
for i, batch_data in enumerate(train_loader, 0):
# shape (batch_size, 3, height, width)
numpy_image = batch_data[0].numpy()
# shape (3,)
batch_mean = np.mean(numpy_image, axis=(0,2,3))
batch_std = np.std(numpy_image, axis=(0,2,3))
mean.append(batch_mean)
std.append(batch_std)
# shape (num_iterations, 3) -> (mean across 0th axis) -> shape (3,)
mean = np.array(mean).mean(axis=0)
std = np.array(std).mean(axis=0)
print('mean =', mean, 'std =', std)
batch_size = 200
train_transf = transforms.Compose([
#transforms.RandomResizedCrop(32),
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
test_transf = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
])
train_data = CIFAR10(root='data', train=True, download=False, transform=train_transf)
train_size = int(len(train_data) * 0.8)
valid_size = len(train_data) - train_size
train_data, valid_data = data.random_split(train_data, (train_size, valid_size))
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=2)
valid_loader = DataLoader(valid_data, batch_size=batch_size, shuffle=True, num_workers=2)
test_data = CIFAR10(root='data', train=False, download=False, transform=test_transf)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=2)
def time_format(milliseconds):
milliseconds = int(milliseconds)
hours = int(milliseconds / (3600 * 1000))
minutes = int((milliseconds % (3600 * 1000)) / (60 * 1000))
seconds = int(((milliseconds % (3600 * 1000)) % (60 * 1000)) / 1000)
milliseconds = int(((milliseconds % (3600 * 1000)) % (60 * 1000)) % 1000)
return '%d:%02d:%02d.%d' % (hours, minutes, seconds, milliseconds)
def train_epoch(model, device, train_loader, optimizer, criterion, log_interval):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
history_dict['loss'].append(loss.item())
optimizer.step()
if batch_idx % log_interval == 0:
print(' {:5}/{} ({:2.0f}%)\tLoss: {:.6f}'.
format(batch_idx * len(data),
len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item()))
def test(model, device, criterion, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
accuracy = 100. * correct / len(test_loader.dataset)
return accuracy
def train(model, optimizer, scheduler, criterion, train_loader, test_loader, device, lr, nb_epochs=3, log_interval=100):
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
for epoch in range(1, nb_epochs + 1):
print('================================================================================')
print('\nEpoch {}/{} - Training at LR {:.6}\n'.format(epoch, nb_epochs, scheduler.get_lr()[0]))
train_epoch(model, device, train_loader, optimizer, criterion, log_interval)
acc = test(model, device, criterion, valid_loader)
print('\n Validation accuracy: {:.2f}%\n'.format(acc))
history_dict['val_acc'].append(acc)
scheduler.step()
end.record()
torch.cuda.synchronize()
print('================================================================================')
print('\nElapsed time', time_format(start.elapsed_time(end)))
return acc
def plot_curves(loss_values, val_acc):
fig, ax = plt.subplots(2, 1, figsize=(15,5))
ax[0].set_title('Training Loss')
ax[0].plot(loss_values)
ax[0].set_title('Loss')
ax[0].set_xticks([])
ax[1].set_title('Validation Accuracy')
ax[1].plot(val_acc)
ax[1].set_xticks([])
device = torch.device('cuda:0') #'cpu'
print('Batch size:', batch_size)
nb_epochs = 24
log_interval = 20
lr = 1e-2
step_size=8
model = models.resnet101(pretrained=True)
#print(model)
for name, module in model.named_children():
if name in ['layer1', 'layer2', 'layer3']:
for param in module.parameters():
param.requires_grad = False
model.fc = nn.Linear(2048, 10, bias=True)
model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
scheduler = StepLR(optimizer, step_size=10, gamma=0.1)
criterion = nn.CrossEntropyLoss().to(device)
history_dict = defaultdict(list)
acc = train(model, optimizer, scheduler, criterion, train_loader, valid_loader, device, lr, nb_epochs, log_interval)
print('\nFinal accuracy: {:.2f}%'.format(acc))
plot_curves(history_dict['loss'], history_dict['val_acc'])
acc = test(model, device, criterion, test_loader)
print('\n Test accuracy: {:.2f}%\n'.format(acc))
| 0.824709 | 0.710013 |
# Synthesize Time Series data from your own DataFrame
This Blueprint demonstrates how to create synthetic time series data with Gretel. We assume that within the dataset
there is at least:
1) A specific column holding time data points
2) One or more columns that contain measurements or numerical observations for each point in time.
For this Blueprint, we will generate a very simple sine wave as our time series data.
```
%%capture
!pip install -U "gretel-client<0.8.0" gretel-synthetics pandas
# Load your Gretel API key. You can acquire this from the Gretel Console
# @ https://console.gretel.cloud
from gretel_client import get_cloud_client
client = get_cloud_client(prefix="api", api_key="prompt")
client.install_packages()
# Create a simple timeseries sine wave
import datetime
import pandas as pd
import numpy as np
day = 24 * 60 * 60
year = 365.2425 * day
def load_dataframe() -> pd.DataFrame:
""" Create a time series x sin wave dataframe. """
df = pd.DataFrame(columns=['date', 'sin'])
df.date = pd.date_range(start='2018-01-01', end='2021-03-01', freq='D')
df.sin = 1 + np.sin(df.date.astype('int64') // 1e9 * (2 * np.pi / year))
df.sin = (df.sin * 100).round(2)
df.date = df.date.apply(lambda d: d.strftime('%Y-%m-%d'))
return df
train_df = load_dataframe()
train_df.set_index('date').plot()
# Create the Gretel Synthtetics Training / Model Configuration
from pathlib import Path
checkpoint_dir = str(Path.cwd() / "checkpoints-sin")
config_template = {
"epochs": 100,
"early_stopping": False,
"vocab_size": 20,
"reset_states": True,
"checkpoint_dir": checkpoint_dir,
"overwrite": True,
}
# Capture transient import errors in Google Colab
try:
from gretel_helpers.series_models import TimeseriesModel
except FileNotFoundError:
from gretel_helpers.series_models import TimeseriesModel
# Params:
# - time_column: The single column name that represents your points in time
# - trend_columns: One or more columns that are the observations / measurements that are associated with
# the points in time. These should be numerical.
# - other_seed_columns: An optional list of other columns that should be used along with the time_column
# as seeds to the synthetic generator.
synthetic_df = TimeseriesModel(
training_df=train_df,
time_column="date",
trend_columns=["sin"],
synthetic_config=config_template
).train().generate().df
# Does our synthetic data look the same? Yup!
synthetic_df.set_index('date').plot()
```
|
github_jupyter
|
%%capture
!pip install -U "gretel-client<0.8.0" gretel-synthetics pandas
# Load your Gretel API key. You can acquire this from the Gretel Console
# @ https://console.gretel.cloud
from gretel_client import get_cloud_client
client = get_cloud_client(prefix="api", api_key="prompt")
client.install_packages()
# Create a simple timeseries sine wave
import datetime
import pandas as pd
import numpy as np
day = 24 * 60 * 60
year = 365.2425 * day
def load_dataframe() -> pd.DataFrame:
""" Create a time series x sin wave dataframe. """
df = pd.DataFrame(columns=['date', 'sin'])
df.date = pd.date_range(start='2018-01-01', end='2021-03-01', freq='D')
df.sin = 1 + np.sin(df.date.astype('int64') // 1e9 * (2 * np.pi / year))
df.sin = (df.sin * 100).round(2)
df.date = df.date.apply(lambda d: d.strftime('%Y-%m-%d'))
return df
train_df = load_dataframe()
train_df.set_index('date').plot()
# Create the Gretel Synthtetics Training / Model Configuration
from pathlib import Path
checkpoint_dir = str(Path.cwd() / "checkpoints-sin")
config_template = {
"epochs": 100,
"early_stopping": False,
"vocab_size": 20,
"reset_states": True,
"checkpoint_dir": checkpoint_dir,
"overwrite": True,
}
# Capture transient import errors in Google Colab
try:
from gretel_helpers.series_models import TimeseriesModel
except FileNotFoundError:
from gretel_helpers.series_models import TimeseriesModel
# Params:
# - time_column: The single column name that represents your points in time
# - trend_columns: One or more columns that are the observations / measurements that are associated with
# the points in time. These should be numerical.
# - other_seed_columns: An optional list of other columns that should be used along with the time_column
# as seeds to the synthetic generator.
synthetic_df = TimeseriesModel(
training_df=train_df,
time_column="date",
trend_columns=["sin"],
synthetic_config=config_template
).train().generate().df
# Does our synthetic data look the same? Yup!
synthetic_df.set_index('date').plot()
| 0.794185 | 0.811863 |
```
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
plt.plot(dates1, df1["cpu_t_degC"])
plt.plot(dates2, df2["cpu_t_degC"])
plt.show()
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
import datetime as dt
import time
n=20
duration=1000
now=time.mktime(time.localtime())
timestamps=np.linspace(now,now+duration,n)
dates=[dt.datetime.fromtimestamp(ts) for ts in timestamps]
values=np.sin((timestamps-now)/duration*2*np.pi)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates,values)
plt.show()
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
total1 = np.sqrt(np.array(df1["imu_x_g"])* np.array(df1["imu_x_g"]) + \
np.array(df1["imu_y_g"])* np.array(df1["imu_y_g"]) + \
np.array(df1["imu_z_g"])* np.array(df1["imu_z_g"]) )
total2 = np.sqrt(np.array(df2["imu_x_g"])* np.array(df2["imu_x_g"]) + \
np.array(df2["imu_y_g"])* np.array(df2["imu_y_g"]) + \
np.array(df2["imu_z_g"])* np.array(df2["imu_z_g"]) )
plt.plot(dates1[65:], total1[65:])
plt.plot(dates2[65:], total2[65:])
plt.title("vibration vs time without zeroes")
plt.show()
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
plt.plot(dates1, df1["Z1_m"])
plt.plot(dates2, df2["Z1_m"])
plt.title("Z1 position vs time")
plt.show()
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
plt.plot(dates1, df1["WOB_gm"])
plt.plot(dates2, df2["WOB_gm"])
plt.axhline(y=-15300, color = 'r')
plt.title("WOB vs time, line at -15300g")
plt.show()
import pandas as pd
df2 = pd.read_csv("all_data_1623529956.4780936.csv")
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
dates2=[ts-df2["time_s"][0] for ts in df2["time_s"] ]
plt.plot(dates2, df2["WOB_gm"])
plt.title("WOB of run 2 vs seconds since start")
plt.show()
import pandas as pd
df2 = pd.read_csv("all_data_1623529956.4780936.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
dZ = df2["Z1_m"][7538:]
dt = df2["time_s"][7538:]
from scipy.stats import linregress
print(linregress(dt, dZ))
import pandas as pd
df2 = pd.read_csv("all_data_1623529956.4780936.csv")
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
dates2=[ts-df2["time_s"][0] for ts in df2["time_s"] ]
plt.plot(dates2, df2["Z1_m"])
plt.title("Z1 position of run 2 vs seconds since start")
plt.show()
```
|
github_jupyter
|
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
plt.plot(dates1, df1["cpu_t_degC"])
plt.plot(dates2, df2["cpu_t_degC"])
plt.show()
import matplotlib.pyplot as plt
import matplotlib.dates as md
import numpy as np
import datetime as dt
import time
n=20
duration=1000
now=time.mktime(time.localtime())
timestamps=np.linspace(now,now+duration,n)
dates=[dt.datetime.fromtimestamp(ts) for ts in timestamps]
values=np.sin((timestamps-now)/duration*2*np.pi)
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
plt.plot(dates,values)
plt.show()
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
total1 = np.sqrt(np.array(df1["imu_x_g"])* np.array(df1["imu_x_g"]) + \
np.array(df1["imu_y_g"])* np.array(df1["imu_y_g"]) + \
np.array(df1["imu_z_g"])* np.array(df1["imu_z_g"]) )
total2 = np.sqrt(np.array(df2["imu_x_g"])* np.array(df2["imu_x_g"]) + \
np.array(df2["imu_y_g"])* np.array(df2["imu_y_g"]) + \
np.array(df2["imu_z_g"])* np.array(df2["imu_z_g"]) )
plt.plot(dates1[65:], total1[65:])
plt.plot(dates2[65:], total2[65:])
plt.title("vibration vs time without zeroes")
plt.show()
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
plt.plot(dates1, df1["Z1_m"])
plt.plot(dates2, df2["Z1_m"])
plt.title("Z1 position vs time")
plt.show()
import pandas as pd
df1 = pd.read_csv("all_data_1623529956.4780936.csv")
df2 = pd.read_csv("all_data_1623529194.46996.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
dates1=[dt.datetime.fromtimestamp(ts) for ts in df1["time_s"] ]
dates2=[dt.datetime.fromtimestamp(ts) for ts in df2["time_s"] ]
plt.plot(dates1, df1["WOB_gm"])
plt.plot(dates2, df2["WOB_gm"])
plt.axhline(y=-15300, color = 'r')
plt.title("WOB vs time, line at -15300g")
plt.show()
import pandas as pd
df2 = pd.read_csv("all_data_1623529956.4780936.csv")
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
dates2=[ts-df2["time_s"][0] for ts in df2["time_s"] ]
plt.plot(dates2, df2["WOB_gm"])
plt.title("WOB of run 2 vs seconds since start")
plt.show()
import pandas as pd
df2 = pd.read_csv("all_data_1623529956.4780936.csv")
import matplotlib.pyplot as plt
import datetime as dt
import matplotlib.dates as md
import numpy as np
%matplotlib inline
dZ = df2["Z1_m"][7538:]
dt = df2["time_s"][7538:]
from scipy.stats import linregress
print(linregress(dt, dZ))
import pandas as pd
df2 = pd.read_csv("all_data_1623529956.4780936.csv")
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
plt.subplots_adjust(bottom=0.2)
plt.xticks( rotation=25 )
ax=plt.gca()
dates2=[ts-df2["time_s"][0] for ts in df2["time_s"] ]
plt.plot(dates2, df2["Z1_m"])
plt.title("Z1 position of run 2 vs seconds since start")
plt.show()
| 0.416797 | 0.402187 |
# VMD
```
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = vmd(signal1,order=6,reularization=40,)
ut.probe((imfs.sum(axis=0)))
freqs = vmd(signal1,order=6,reularization=40,ret_freqs=True)
print(freqs)
ut.probe((imfs[2])); plt.show()
```
# EWT
```
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = ewt(signal1,order=None, gamma=0.21, average=4)
ut.probe((imfs.sum(axis=0)))
ut.probe((imfs[2])); plt.show()
```
# HVD
```
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = hvd(signal1, order=16, fpar=43)
ut.probe((imfs.sum(axis=0)));
ut.probe((imfs[2])); plt.show()
freqs = hvd(signal1, order=16, fpar=43,ret_freqs=True)
print(freqs)
```
# EMD
```
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = emd(signal1.real,order=16,method='cubic',max_itter=100)
ut.probe((imfs.sum(axis=0))); plt.show()
ut.probe((imfs[1]))
ut.probe((imfs[2]))
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = emd(signal1,order=16,method='thin_plate',max_itter=100)
ut.probe((imfs.sum(axis=0))); plt.show()
ut.probe((imfs[2])); plt.show()
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = emd(signal1,order=16,method='rbf', max_itter=100, tol = 0.01)
ut.probe((imfs.sum(axis=0))); plt.show()
ut.probe((imfs[2])); plt.show()
```
|
github_jupyter
|
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = vmd(signal1,order=6,reularization=40,)
ut.probe((imfs.sum(axis=0)))
freqs = vmd(signal1,order=6,reularization=40,ret_freqs=True)
print(freqs)
ut.probe((imfs[2])); plt.show()
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = ewt(signal1,order=None, gamma=0.21, average=4)
ut.probe((imfs.sum(axis=0)))
ut.probe((imfs[2])); plt.show()
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = hvd(signal1, order=16, fpar=43)
ut.probe((imfs.sum(axis=0)));
ut.probe((imfs[2])); plt.show()
freqs = hvd(signal1, order=16, fpar=43,ret_freqs=True)
print(freqs)
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = emd(signal1.real,order=16,method='cubic',max_itter=100)
ut.probe((imfs.sum(axis=0))); plt.show()
ut.probe((imfs[1]))
ut.probe((imfs[2]))
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = emd(signal1,order=16,method='thin_plate',max_itter=100)
ut.probe((imfs.sum(axis=0))); plt.show()
ut.probe((imfs[2])); plt.show()
import dsatools
from dsatools import utilits as ut
import matplotlib.pyplot as plt
from dsatools._base._imf_decomposition import *
signal1 = dsatools.generator.harmonics(amplitude=[1],
f0=[0.3,0.2,0.5,5.1,1.04],
delta_f=[0.1],fs=10)
imfs = emd(signal1,order=16,method='rbf', max_itter=100, tol = 0.01)
ut.probe((imfs.sum(axis=0))); plt.show()
ut.probe((imfs[2])); plt.show()
| 0.296756 | 0.774882 |
# Multi-ConvNet Sentiment Classifier
In this notebook, we concatenate the outputs of *multiple, parallel convolutional layers* to classify IMDB movie reviews by their sentiment.
[](https://colab.research.google.com/github/alhaol/DLTFpT/blob/master/notebooks/multi_convnet_sentiment_classifier.ipynb)
#### Load dependencies
```
import tensorflow
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model # new!
from tensorflow.keras.layers import Input, concatenate # new!
from tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.callbacks import ModelCheckpoint
import os
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
```
#### Set hyperparameters
```
# output directory name:
output_dir = 'model_output/multiconv'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000
max_review_length = 400
pad_type = trunc_type = 'pre'
drop_embed = 0.2
# convolutional layer architecture:
n_conv_1 = n_conv_2 = n_conv_3 = 256
k_conv_1 = 3
k_conv_2 = 2
k_conv_3 = 4
# dense layer architecture:
n_dense = 256
dropout = 0.2
```
#### Load data
```
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words)
```
#### Preprocess data
```
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
```
#### Design neural network architecture
```
input_layer = Input(shape=(max_review_length,),
dtype='int16', name='input')
# embedding:
embedding_layer = Embedding(n_unique_words, n_dim,
name='embedding')(input_layer)
drop_embed_layer = SpatialDropout1D(drop_embed,
name='drop_embed')(embedding_layer)
# three parallel convolutional streams:
conv_1 = Conv1D(n_conv_1, k_conv_1,
activation='relu', name='conv_1')(drop_embed_layer)
maxp_1 = GlobalMaxPooling1D(name='maxp_1')(conv_1)
conv_2 = Conv1D(n_conv_2, k_conv_2,
activation='relu', name='conv_2')(drop_embed_layer)
maxp_2 = GlobalMaxPooling1D(name='maxp_2')(conv_2)
conv_3 = Conv1D(n_conv_3, k_conv_3,
activation='relu', name='conv_3')(drop_embed_layer)
maxp_3 = GlobalMaxPooling1D(name='maxp_3')(conv_3)
# concatenate the activations from the three streams:
concat = concatenate([maxp_1, maxp_2, maxp_3])
# dense hidden layers:
dense_layer = Dense(n_dense,
activation='relu', name='dense')(concat)
drop_dense_layer = Dropout(dropout, name='drop_dense')(dense_layer)
dense_2 = Dense(int(n_dense/4),
activation='relu', name='dense_2')(drop_dense_layer)
dropout_2 = Dropout(dropout, name='drop_dense_2')(dense_2)
# sigmoid output layer:
predictions = Dense(1, activation='sigmoid', name='output')(dropout_2)
# create model:
model = Model(input_layer, predictions)
model.summary()
```
#### Configure model
```
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
```
#### Train!
```
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
```
#### Evaluate
```
model.load_weights(output_dir+"/weights.02.hdf5")
y_hat = model.predict(x_valid)
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
"{:0.2f}".format(roc_auc_score(y_valid, y_hat)*100.0)
```
|
github_jupyter
|
import tensorflow
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model # new!
from tensorflow.keras.layers import Input, concatenate # new!
from tensorflow.keras.layers import Dense, Dropout, Embedding, SpatialDropout1D, Conv1D, GlobalMaxPooling1D
from tensorflow.keras.callbacks import ModelCheckpoint
import os
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
# output directory name:
output_dir = 'model_output/multiconv'
# training:
epochs = 4
batch_size = 128
# vector-space embedding:
n_dim = 64
n_unique_words = 5000
max_review_length = 400
pad_type = trunc_type = 'pre'
drop_embed = 0.2
# convolutional layer architecture:
n_conv_1 = n_conv_2 = n_conv_3 = 256
k_conv_1 = 3
k_conv_2 = 2
k_conv_3 = 4
# dense layer architecture:
n_dense = 256
dropout = 0.2
(x_train, y_train), (x_valid, y_valid) = imdb.load_data(num_words=n_unique_words)
x_train = pad_sequences(x_train, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
x_valid = pad_sequences(x_valid, maxlen=max_review_length, padding=pad_type, truncating=trunc_type, value=0)
input_layer = Input(shape=(max_review_length,),
dtype='int16', name='input')
# embedding:
embedding_layer = Embedding(n_unique_words, n_dim,
name='embedding')(input_layer)
drop_embed_layer = SpatialDropout1D(drop_embed,
name='drop_embed')(embedding_layer)
# three parallel convolutional streams:
conv_1 = Conv1D(n_conv_1, k_conv_1,
activation='relu', name='conv_1')(drop_embed_layer)
maxp_1 = GlobalMaxPooling1D(name='maxp_1')(conv_1)
conv_2 = Conv1D(n_conv_2, k_conv_2,
activation='relu', name='conv_2')(drop_embed_layer)
maxp_2 = GlobalMaxPooling1D(name='maxp_2')(conv_2)
conv_3 = Conv1D(n_conv_3, k_conv_3,
activation='relu', name='conv_3')(drop_embed_layer)
maxp_3 = GlobalMaxPooling1D(name='maxp_3')(conv_3)
# concatenate the activations from the three streams:
concat = concatenate([maxp_1, maxp_2, maxp_3])
# dense hidden layers:
dense_layer = Dense(n_dense,
activation='relu', name='dense')(concat)
drop_dense_layer = Dropout(dropout, name='drop_dense')(dense_layer)
dense_2 = Dense(int(n_dense/4),
activation='relu', name='dense_2')(drop_dense_layer)
dropout_2 = Dropout(dropout, name='drop_dense_2')(dense_2)
# sigmoid output layer:
predictions = Dense(1, activation='sigmoid', name='output')(dropout_2)
# create model:
model = Model(input_layer, predictions)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='nadam', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(filepath=output_dir+"/weights.{epoch:02d}.hdf5")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_valid, y_valid), callbacks=[modelcheckpoint])
model.load_weights(output_dir+"/weights.02.hdf5")
y_hat = model.predict(x_valid)
plt.hist(y_hat)
_ = plt.axvline(x=0.5, color='orange')
"{:0.2f}".format(roc_auc_score(y_valid, y_hat)*100.0)
| 0.797557 | 0.92617 |
<div class="alert alert-block alert-info">
<b><h1>ENGR 1330 Computational Thinking with Data Science </h1></b>
</div>
Copyright © 2021 Theodore G. Cleveland and Farhang Forghanparast
# 0: Introduction
- Introduction to Course and Web-enabled content
- Computational thinking concepts
- JupyterLab Environment for ENGR 1330
---
## Course Content
The course content is hosted at [http://54.243.252.9/engr-1330-webroot/](http://54.243.252.9/engr-1330-webroot/). Direct access is not the intended way to get the content, as the web-site is comparatively unstructured. Instead use the links in the syllabus located at [http://54.243.252.9/engr-1330-webroot/0-Syllabus/ENGR-1330-2022-1-Syllabus.html](http://54.243.252.9/engr-1330-webroot/0-Syllabus/ENGR-1330-2022-1-Syllabus.html) .
---
## Computational Thinking Concepts
Computational thinking (CT) refers to the thought processes involved in expressing solutions as computational steps or algorithms that can be carried out by a computer. Data science is one of several applications of CT.
Much of what follows is borrowed from (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2696102/).
Computational thinking is taking an approach to solving problems, designing systems and understanding human behaviour that draws on concepts fundamental to computing (http://www.cs.cmu.edu/~15110-s13/Wing06-ct.pdf).
Computational thinking is a kind of analytical thinking:
- It shares with mathematical thinking in the general ways in which we might approach solving a problem.
- It shares with engineering thinking in the general ways in which we might approach designing and evaluating a large, complex system that operates within the constraints of the real world. - It shares with scientific thinking in the general ways in which we might approach understanding computability, intelligence, the mind and human behaviour.
The essence of computational thinking is **abstraction** and **automation**.
In computing, we abstract notions beyond the physical dimensions of time and space. Our abstractions are extremely general because they are symbolic, where numeric abstractions are just a special case.
### CT Foundations
CT is literally a process for breaking down a problem into smaller parts, looking for patterns in the problems, identifying what kind of information is needed, developing a step-by-step solution, and implementing that solution.
1. Decomposition
2. Pattern Recognition
3. Abstraction
4. Algorithms
5. System Integration (implementation)
#### Decomposition
Decomposition is the process of taking a complex problem and breaking it into more manageable sub-problems. Examples include:
- Writing a paper:
- Introduction
- Body
- Conclusion
- Wide-viewed (Panorama) image:
- Taking multiple overlapped photos
- Stitch them
Decomposition often leaves a **framework** of sub-problems that later have to be **assembled (system integration)** to produce a desired solution.
#### Pattern Recognition
Refers to finding similarities, or shared characteristics of problems. Allows a complex problem to become easier to solve. Allows use of same solution method for each occurrence of the pattern.
Pattern recognition allows use of **automation** to process things - its a fundamental drilled shaft of CT. It also provides a way to use analogs from old problems to address new situations; it also will require **assembly (system integration)** to produce a desired solution.
#### Abstraction
Determine important characteristics of the problem and ignore characteristics that are not important. Use these characteristics to create a representation of what we are trying to solve.
Books in an online bookstore
|Important| NOT important|
|:--------|:-------------|
|title | Cover color |
|ISBN |Author’s hometown|
|Authors | ... |
|... | ... |
#### Algorithms
Step-by-step instructions of how to solve a problem (https://en.wikipedia.org/wiki/Algorithm).
Identifies what is to be done, and the order in which they should be done.
```{figure} algorithm.png
---
width: 400px
name: algorithm
---
Humorous "algorithm". Image from [https://www.newyorker.com/magazine/2021/01/18/whats-wrong-with-the-way-we-work?utm_source=pocket-newtab](https://www.newyorker.com/magazine/2021/01/18/whats-wrong-with-the-way-we-work?utm_source=pocket-newtab)
```
{numref}`algorithm`
<!--
||Image from [https://www.newyorker.com/magazine/2021/01/18/whats-wrong-with-the-way-we-work?utm_source=pocket-newtab](https://www.newyorker.com/magazine/2021/01/18/whats-wrong-with-the-way-we-work?utm_source=pocket-newtab)||
|---|------------|---| -->
An algorithm is a **finite** sequence of defined, instructions, typically to solve a class of problems or to perform a computation. Algorithms are unambiguous and are used as specifications for performing calculations, data processing, automated reasoning, and other tasks. Starting from an initial state and initial input (perhaps empty), the instructions describe a computation that, when executed, proceeds through a finite number of defined successive states, eventually producing "output" and terminating at a final ending state. The transition from one state to the next is not necessarily deterministic; some algorithms, known as randomized algorithms, can incorporate random input.
#### System Integration (implementation)
System integration is the assembly of the parts above into the complete (integrated) solution. Integration combines parts into a program which is the realization of an algorithm using a syntax that the computer can understand.
## JupyterLab Environment
```{note}
here is a note
```
```{admonition} Tip
:class: tip
here is a tip
```
```{warning}
here is a warning
```
## References
1. Driscoll, M. (2021) *Jupyter Notebook: An Introduction*, [https://realpython.com/jupyter-notebook-introduction/](https://realpython.com/jupyter-notebook-introduction/)
2. Computational and Inferential Thinking Ani Adhikari and John DeNero, Computational and Inferential Thinking, The Foundations of Data Science, Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND) Chapter 1 [https://www.inferentialthinking.com/chapters/01/what-is-data-science.html](https://www.inferentialthinking.com/chapters/01/what-is-data-science.html)
3. Learn Python the Hard Way (Online Book) [https://learnpythonthehardway.org/book/](https://learnpythonthehardway.org/book/) Recommended for beginners who want a complete course in programming with Python.
4. LearnPython.org (Interactive Tutorial) [https://www.learnpython.org/](https://www.learnpython.org/) Short, interactive tutorial for those who just need a quick way to pick up Python syntax.
5. How to Think Like a Computer Scientist (Interactive Book) [https://runestone.academy/runestone/books/published/thinkcspy/index.html](https://runestone.academy/runestone/books/published/thinkcspy/index.html) Interactive "CS 101" course taught in Python that really focuses on the art of problem solving.
6. Beginning Programming with Python® For Dummies®, John Wiley & Sons, Inc., 111 River Street, Hoboken, NJ 07030-5774 [https://we.riseup.net/assets/345912/Beginning+Programming+with+Python+For+Dummies+Mueller%2C+John+Paul+%5BSRG%5D.pdf](https://we.riseup.net/assets/345912/Beginning+Programming+with+Python+For+Dummies+Mueller%2C+John+Paul+%5BSRG%5D.pdf)
<!--
[http://54.243.252.9/engr-1330-webroot/3-Readings/BeginningProgrammingwithPythonForDummiesMuellerJohnPaul[SRG].pdf](http://54.243.252.9/engr-1330-webroot/3-Readings/BeginningProgrammingwithPythonForDummiesMuellerJohnPaul[SRG].pdf)
-->
---
## Laboratory 0
**Examine** (click) Laboratory 0 as a webpage at [Laboratory 0.html](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab00/Lab00.html)
**Download** (right-click, save target as ...) Laboratory 0 as a jupyterlab notebook from [Laboratory 0.ipynb](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab00/Lab00.ipynb)
<hr><hr>
## Exercise Set 0
**Examine** (click) Exercise Set 0 as a webpage at [Exercise 0.html](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab00/Lab00-TH.html)
**Download** (right-click, save target as ...) Exercise Set 0 as a jupyterlab notebook at [Exercise Set 0.ipynb](http://54.243.252.9/engr-1330-webroot/8-Labs/Lab00/Lab00-TH.ipynb)
|
github_jupyter
|
{numref}`algorithm`
<!--
||Image from [https://www.newyorker.com/magazine/2021/01/18/whats-wrong-with-the-way-we-work?utm_source=pocket-newtab](https://www.newyorker.com/magazine/2021/01/18/whats-wrong-with-the-way-we-work?utm_source=pocket-newtab)||
|---|------------|---| -->
An algorithm is a **finite** sequence of defined, instructions, typically to solve a class of problems or to perform a computation. Algorithms are unambiguous and are used as specifications for performing calculations, data processing, automated reasoning, and other tasks. Starting from an initial state and initial input (perhaps empty), the instructions describe a computation that, when executed, proceeds through a finite number of defined successive states, eventually producing "output" and terminating at a final ending state. The transition from one state to the next is not necessarily deterministic; some algorithms, known as randomized algorithms, can incorporate random input.
#### System Integration (implementation)
System integration is the assembly of the parts above into the complete (integrated) solution. Integration combines parts into a program which is the realization of an algorithm using a syntax that the computer can understand.
## JupyterLab Environment
| 0.600423 | 0.950273 |
```
# Course - DSC 630 - Predictive Analytics
# Name - Vinay Nagaraj & Vikas Ranjan
# Assignment - Final Project - Credit Card Fraud Detection
```
## Credit card fraud detection
### Problem Statement:
Each year financial institutions lose a chunk of money as a result of credit card fraud. In year 2018, a total of $24.26 Billion was lost due to payment card fraud across the globe and United States being the most fraud prone country. Credit card fraud was ranked number one type of identity theft fraud. Credit card fraud increased by 18.4 percent in 2018 and is still climbing. Credit card fraud includes fraudulent transactions on a credit card or debit card. There can be two kinds of card fraud -- card-present fraud and card-not-present fraud. Card not present fraud is almost 81 percent more likely than point-of-sale fraud. It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase. This would not only result in financial loss but also loss of customer confidence in payment industry. Losses to financial institution can be avoided by detecting credit card fraud and alerting banks about potential fraudulent transactions.
```
# Load necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, make_scorer
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import confusion_matrix,precision_recall_curve,auc,roc_auc_score,roc_curve,recall_score
from sklearn.metrics import classification_report
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, validation_curve, learning_curve, GridSearchCV
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from numpy import mean, std
import scikitplot as skplt
from sklearn.metrics import log_loss, average_precision_score, make_scorer
import warnings
warnings.filterwarnings("ignore")
# Load credit card fraud dataset into a dataframe
df = pd.read_csv("creditcard.csv")
# Check the dimension of the data frame
print("The dimension of the table is: ", df.shape)
print(df.head(5))
# Check the types of each feature
df.dtypes
```
#### It contains only numerical input variables which are the result of a PCA transformation.
#### Attribute Information:
##### 1) Time - Number of seconds elapsed between this transaction and the first transaction in the dataset.
##### 2) V1- V28 – These are the result of a PCA Dimensionality reduction to protect user identities and sensitive features.
##### 3) Amount – Transaction amount
##### 4) Class – This is a response variable and has the values of 1 for fraudulent transactions, and 0 for non-fraudulent transactions.
#### There are 29 decimal fields and 2 integer fields in the dataset.
```
# Check if any missing values
df.isnull().sum()
```
#### There are no null values in the data frame
```
# Check the data summary
df.describe()
```
### Graph Analysis
```
# Plot of fraudulent vs non-fraudulent transactions
sns.countplot('Class', data=df)
plt.title('0: Not Fraud, 1: Fraud', fontsize=14)
# Percentage of fraudulent vs non-fraudulent transactions
Count_Normal_transacation = len(df[df["Class"]==0]) # normal transaction are repersented by 0
Count_Fraud_transacation = len(df[df["Class"]==1]) # fraud by 1
print("Total count of non-fraud transacation is",Count_Normal_transacation)
print("Total count of fraud transacation is",Count_Fraud_transacation)
Percentage_of_Normal_transacation = Count_Normal_transacation/(Count_Normal_transacation+Count_Fraud_transacation)
print("Percentage of normal transacation is",Percentage_of_Normal_transacation*100)
Percentage_of_Fraud_transacation= Count_Fraud_transacation/(Count_Normal_transacation+Count_Fraud_transacation)
print("Percentage of fraud transacation",Percentage_of_Fraud_transacation*100)
```
Our Dataset contains a total of 284,315 non-fraud transactions and 492 fraud transactions. We can observe that our dataset is highly imbalanced and we will handle that by over-sampling (SMOTE) before we perform model analysis.
```
# Distribution of fraudulent transactions amounts
sns.distplot(df[df['Class'] == 1]['Amount'])
# Analysis of fraudulent transaction amounts
print("Fraudulent transaction Amounts Analysis: \n")
avg_amt = df[df['Class']== 1]['Amount'].mean()
std_dev_amt = df[df['Class']== 1]['Amount'].std()
min_amt = df[df['Class']== 1]['Amount'].min()
max_amt = df[df['Class']== 1]['Amount'].max()
print(f"The average fraudulent transaction amount is {avg_amt}")
print(f"The std deviation for fraudulent transaction amount is {std_dev_amt}")
print(f"The min fraudulent transaction amount is {min_amt}")
print(f"The max fraudulent transaction amount is {max_amt}")
```
From the dataset, we can see a trend that transaction amount of the Fraud transactions are not on higher amounts. Rather they are more likely at smaller numbers where the chances of these transactions being identified are minimal.
```
# Distribution of transaction time
sns.distplot(df['Time'])
# Split fraud and non-fraud for histogram
fraud_class = df[df.Class == 1]
non_fraud_class = df[df.Class == 0]
# Histograms of transaction times in fraudulent and non-fraudulent transactions
plt.figure(figsize=(12, 10))
plt.subplot(2, 2, 1)
fraud_class.Time.hist(bins=35, color='blue', alpha=0.6, label="Fraudulent transaction times")
plt.legend()
plt.subplot(2, 2, 2)
non_fraud_class.Time.hist(bins=35, color='blue', alpha=0.6, label="Non-fraudulent transaction times")
plt.legend()
```
From above, fraudulent transaction shows couple of peak times but it can be observed that time of the trasaction cannot be considered much in the analysis to determine if the transaction is fraudlent or not.
```
# Pearson Correlation Heatmap
plt.figure(figsize = (14,14))
plt.title('Credit Card Transactions features correlation plot (Pearson)')
corr = df.corr()
sns.heatmap(corr,xticklabels=corr.columns,yticklabels=corr.columns,linewidths=.1,cmap="YlGnBu")
plt.show()
# Density plot of the features
var = df.columns.values
i = 0
t0 = df.loc[df['Class'] == 0]
t1 = df.loc[df['Class'] == 1]
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(8,4,figsize=(16,28))
for feature in var:
i += 1
plt.subplot(8,4,i)
sns.kdeplot(t0[feature], bw=0.5,label="Class = 0")
sns.kdeplot(t1[feature], bw=0.5,label="Class = 1")
plt.xlabel(feature, fontsize=12)
locs, labels = plt.xticks()
plt.tick_params(axis='both', which='major', labelsize=12)
plt.show();
```
For some of the features we can observe a good selectivity in terms of distribution for the two values of Class: V4, V11 have clearly separated distributions for Class values 0 and 1, V12, V14, V18 are partially separated, V1, V2, V3, V10 have a quite distinct profile, whilst V25, V26, V28 have similar profiles for the two values of Class.
In general, with just few exceptions (Time and Amount), the features distribution for legitimate transactions (values of Class = 0) is centered around 0, sometime with a long queue at one of the extremities. In the same time, the fraudulent transactions (values of Class = 1) have a skewed (asymmetric) distribution.
### Train and Test
```
# Train and test data
x=df.drop(columns=["Time","Class"],axis="columns")
y=df.Class
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=.3,random_state=42)
# Details of training dataset
print("Transaction Number x_train dataset: ", x_train.shape)
print("Transaction Number y_train dataset: ", y_train.shape)
print("Transaction Number x_test dataset: ", x_test.shape)
print("Transaction Number y_test dataset: ", y_test.shape)
print("Before OverSampling, counts of label '1': {}".format(sum(y_train==1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train==0)))
```
As we see above, the dataset is highly imbalanced as most of the transactions are non-fraudulent. Therefore the algorithms are much more likely to classify new observations to the majority class and high accuracy won't tell us anything. In order to address this challenge, we are using oversampling data approach instead of undersampling. Oversampling increases the number of minority class members in the training set. The advantage of oversampling is that no information from the original training set is lost unlike in undersampling, as all observations from the minority and majority classes are kept.
Since this approach is prone to overfitting, we have to be cautious. We are using oversampling technique called SMOTE (Synthetic Minority Oversampling Technique), to make our dataset balanced. It creates synthetic points from the minority class.
```
# Oversample the training dataset
sm = SMOTE(random_state=2)
x_train_s, y_train_s = sm.fit_resample(x_train, y_train.ravel())
print('After OverSampling, the shape of train_x: {}'.format(x_train_s.shape))
print('After OverSampling, the shape of train_y: {} \n'.format(y_train_s.shape))
print("After OverSampling, counts of label '1', %: {}".format(sum(y_train_s==1)/len(y_train_s)*100.0,2))
print("After OverSampling, counts of label '0', %: {}".format(sum(y_train_s==0)/len(y_train_s)*100.0,2))
sns.countplot(x=y_train_s, data=df, palette='CMRmap')
# Feature selection using Variance Threshold with threshold of 0.5
var = VarianceThreshold(threshold=.5)
var.fit(x_train_s,y_train_s)
x_train_var=var.transform(x_train_s)
x_test_var=var.transform(x_test)
x_train_var.shape
# Alternate way to perform feature selection and display the features
def variance_threshold_selector(data, threshold=0.5):
selector = VarianceThreshold(threshold)
selector.fit(data)
return data[data.columns[selector.get_support(indices=True)]]
variance_threshold_selector(x_train_s, 0.5)
varth_features=var.get_support()
varth_features
```
Variance threshold is calculated based on probability density function of a particular distribution.
If a feature has 95% or more variability then is very close to zero and the feature may not help in the model prediciton and it can be removed. The values with True are the features selected using Variance threshold technique. The columns from V23 to V28 are removed.
```
# Feature selection using SelectKBest feature selection
skbest = SelectKBest(k=10)
skbest.fit(x_train_s,y_train_s)
x_train_skbest=skbest.transform(x_train_s)
x_test_skbest=skbest.transform(x_test)
x_train_skbest.shape
kbest_features=skbest.get_support()
kbest_features
# Determine 10 best features using SelectKBest
best_features = SelectKBest(score_func=f_classif, k=10)
fit = best_features.fit(x_train_s,y_train_s)
df_scores = pd.DataFrame(fit.scores_)
df_columns = pd.DataFrame(x_train_s.columns)
# concatenate dataframes
feature_scores = pd.concat([df_columns, df_scores],axis=1)
feature_scores.columns = ['Feature_Name','Score'] # name output columns
print(feature_scores.nlargest(29,'Score')) # print 29 best features
# Bar plot showing features in the order of score
tmp = feature_scores.sort_values(by='Score',ascending=False)
plt.title('Features importance',fontsize=14)
s = sns.barplot(x='Feature_Name',y='Score',data=tmp)
s.set_xticklabels(s.get_xticklabels(),rotation=90)
plt.show()
```
The values with True are the features selected using SelectKBest technique. Most relevant 10 features are selected. The features selected can be tested by running throught the model.
```
# calculate precision recall area under curve
def preci_auc(y_true, pred_prob):
# calculate precision-recall curve
p, r, _ = precision_recall_curve(y_true, pred_prob)
# calculate area under curve
return auc(r, p)
# Evaluate a model
def evaluate_model(x, y, model):
# Define evaluation procedure
CV = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# Define the model evaluation the metric
metric = make_scorer(preci_auc, needs_proba=True)
# Evaluate model
scores = cross_val_score(model, x, y, scoring='roc_auc', cv=CV, n_jobs=-1)
return scores
# Define reference model
model = DummyClassifier(strategy='constant', constant=1)
```
SelectKBest returned top 10 features and we will be using this training and test data for further process
```
# define the reference model
model = DummyClassifier(strategy='constant', constant=1)
# Evaluate the model
scores = evaluate_model(x_train_skbest, y_train_s, model)
# summarize performance
print('Mean area under curve: %.3f (%.3f)' % (mean(scores), std(scores)))
```
From above, the baseline score is 0.50. Hence the model selected should be atleast above this score.
Since the values are of PCA transformation, it is better to normalize the data as it could impact the performance of the model.
```
# Normalize the input
scaler = StandardScaler()
scaler.fit(x_train_skbest)
x_train_norm = scaler.transform(x_train_skbest)
x_test_norm = scaler.transform(x_test_skbest)
```
Model selection - One of the common models is Logistic regression. Few other models are compared to see the results. Cross validation method is used.
```
def model_val(x, y, classifier, scor, show):
x = np.array(x)
y = np.array(y)
scores = cross_val_score(classifier, x, y, scoring=scor)
if show == True:
print("Score: {:.2f} (+/- {:.2f})".format(scores.mean(), scores.std()))
return scores.mean()
# List of models
rfc = RandomForestClassifier()
ctc = DecisionTreeClassifier()
sglc = SGDClassifier()
lr = LogisticRegression()
model = []
score = []
# Check model score
for classifier in (rfc, ctc, sglc, lr):
model.append(classifier.__class__.__name__)
score.append(model_val(x_train_norm, y_train_s, classifier, scor='roc_auc', show=True))
pd.DataFrame(data=score, index=model, columns=['roc_auc'])
```
The table here shows an aggregate measure of performance across the 4 classification models that we choose.
We have used our training dataset to compute this score.
The roc_auc scores of all the models are very good and we will perform Model evaluation for Random Forest Classifier and Logistic Regression.
### Random Forest Model Evaluation
```
pipeline_rf = Pipeline([
('model', RandomForestClassifier(n_jobs=-1, random_state=1))
])
```
As the time taken to process the large dataset is more, just specified the number of estimators instead of hyperparameter grid search.
```
parm_gridscv_rf = {'model__n_estimators': [75]}
grid_rf = GridSearchCV(estimator=pipeline_rf, param_grid=parm_gridscv_rf, scoring='roc_auc', n_jobs=-1,
pre_dispatch='2*n_jobs', cv=5, verbose=1, return_train_score=False)
grid_rf.fit(x_train_norm, y_train_s)
pd.DataFrame(grid_rf.cv_results_)
grid_rf.best_score_, grid_rf.best_params_
```
### Test Random Forest model
```
y_pred = grid_rf.predict(x_test_norm)
# Decimal places based on number of samples
dec = np.int64(np.ceil(np.log10(len(y_test))))
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred), '\n')
print('Classification report')
print(classification_report(y_test, y_pred, digits=dec))
print('Scalar Metrics')
format_str = '%%13s = %%.%if' % dec
if y_test.nunique() <= 2: # metrics for binary classification
try:
y_score = grid_rf.predict_proba(x_test_norm)[:,1]
except:
y_score = grid_rf.decision_function(x_test_norm)
print(format_str % ('AUROC', roc_auc_score(y_test, y_score)))
# Plot confusion matrix
skplt.metrics.plot_confusion_matrix(y_test, y_pred)
log_loss(y_test, y_pred)
```
### Logistic Regression Model Evaluation
```
# Logistic regression model with different C values
parameters = {
'tol': [0.00001, 0.0001, 0.001],
'C': [1, 50, 100]
}
lgr = GridSearchCV(LogisticRegression(random_state=101, n_jobs=1, max_iter=1000),
param_grid=parameters,
cv=3,
n_jobs=1,
scoring='roc_auc'
)
lgr.fit(x_train_norm, y_train_s)
clf = lgr.best_estimator_
print(lgr.best_estimator_)
print("The best classifier score:",lgr.best_score_)
```
### Test Logistic Regression Model
```
y_pred1 = clf.predict(x_test_norm)
# Decimal places based on number of samples
dec = np.int64(np.ceil(np.log10(len(y_test))))
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred1), '\n')
print('Classification report')
print(classification_report(y_test, y_pred1, digits=dec))
print('Scalar Metrics')
format_str = '%%13s = %%.%if' % dec
if y_test.nunique() <= 2: # metrics for binary classification
try:
y_score1 = clf.predict_proba(x_test_norm)[:,1]
except:
y_score1 = clf.decision_function(X_test_norm)
print(format_str % ('AUROC', roc_auc_score(y_test, y_score1)))
# Plot confusion matrix
skplt.metrics.plot_confusion_matrix(y_test, y_pred1)
log_loss(y_test, y_pred1)
```
|
github_jupyter
|
# Course - DSC 630 - Predictive Analytics
# Name - Vinay Nagaraj & Vikas Ranjan
# Assignment - Final Project - Credit Card Fraud Detection
# Load necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import auc, make_scorer
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import confusion_matrix,precision_recall_curve,auc,roc_auc_score,roc_curve,recall_score
from sklearn.metrics import classification_report
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import train_test_split, StratifiedShuffleSplit, validation_curve, learning_curve, GridSearchCV
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from numpy import mean, std
import scikitplot as skplt
from sklearn.metrics import log_loss, average_precision_score, make_scorer
import warnings
warnings.filterwarnings("ignore")
# Load credit card fraud dataset into a dataframe
df = pd.read_csv("creditcard.csv")
# Check the dimension of the data frame
print("The dimension of the table is: ", df.shape)
print(df.head(5))
# Check the types of each feature
df.dtypes
# Check if any missing values
df.isnull().sum()
# Check the data summary
df.describe()
# Plot of fraudulent vs non-fraudulent transactions
sns.countplot('Class', data=df)
plt.title('0: Not Fraud, 1: Fraud', fontsize=14)
# Percentage of fraudulent vs non-fraudulent transactions
Count_Normal_transacation = len(df[df["Class"]==0]) # normal transaction are repersented by 0
Count_Fraud_transacation = len(df[df["Class"]==1]) # fraud by 1
print("Total count of non-fraud transacation is",Count_Normal_transacation)
print("Total count of fraud transacation is",Count_Fraud_transacation)
Percentage_of_Normal_transacation = Count_Normal_transacation/(Count_Normal_transacation+Count_Fraud_transacation)
print("Percentage of normal transacation is",Percentage_of_Normal_transacation*100)
Percentage_of_Fraud_transacation= Count_Fraud_transacation/(Count_Normal_transacation+Count_Fraud_transacation)
print("Percentage of fraud transacation",Percentage_of_Fraud_transacation*100)
# Distribution of fraudulent transactions amounts
sns.distplot(df[df['Class'] == 1]['Amount'])
# Analysis of fraudulent transaction amounts
print("Fraudulent transaction Amounts Analysis: \n")
avg_amt = df[df['Class']== 1]['Amount'].mean()
std_dev_amt = df[df['Class']== 1]['Amount'].std()
min_amt = df[df['Class']== 1]['Amount'].min()
max_amt = df[df['Class']== 1]['Amount'].max()
print(f"The average fraudulent transaction amount is {avg_amt}")
print(f"The std deviation for fraudulent transaction amount is {std_dev_amt}")
print(f"The min fraudulent transaction amount is {min_amt}")
print(f"The max fraudulent transaction amount is {max_amt}")
# Distribution of transaction time
sns.distplot(df['Time'])
# Split fraud and non-fraud for histogram
fraud_class = df[df.Class == 1]
non_fraud_class = df[df.Class == 0]
# Histograms of transaction times in fraudulent and non-fraudulent transactions
plt.figure(figsize=(12, 10))
plt.subplot(2, 2, 1)
fraud_class.Time.hist(bins=35, color='blue', alpha=0.6, label="Fraudulent transaction times")
plt.legend()
plt.subplot(2, 2, 2)
non_fraud_class.Time.hist(bins=35, color='blue', alpha=0.6, label="Non-fraudulent transaction times")
plt.legend()
# Pearson Correlation Heatmap
plt.figure(figsize = (14,14))
plt.title('Credit Card Transactions features correlation plot (Pearson)')
corr = df.corr()
sns.heatmap(corr,xticklabels=corr.columns,yticklabels=corr.columns,linewidths=.1,cmap="YlGnBu")
plt.show()
# Density plot of the features
var = df.columns.values
i = 0
t0 = df.loc[df['Class'] == 0]
t1 = df.loc[df['Class'] == 1]
sns.set_style('whitegrid')
plt.figure()
fig, ax = plt.subplots(8,4,figsize=(16,28))
for feature in var:
i += 1
plt.subplot(8,4,i)
sns.kdeplot(t0[feature], bw=0.5,label="Class = 0")
sns.kdeplot(t1[feature], bw=0.5,label="Class = 1")
plt.xlabel(feature, fontsize=12)
locs, labels = plt.xticks()
plt.tick_params(axis='both', which='major', labelsize=12)
plt.show();
# Train and test data
x=df.drop(columns=["Time","Class"],axis="columns")
y=df.Class
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=.3,random_state=42)
# Details of training dataset
print("Transaction Number x_train dataset: ", x_train.shape)
print("Transaction Number y_train dataset: ", y_train.shape)
print("Transaction Number x_test dataset: ", x_test.shape)
print("Transaction Number y_test dataset: ", y_test.shape)
print("Before OverSampling, counts of label '1': {}".format(sum(y_train==1)))
print("Before OverSampling, counts of label '0': {} \n".format(sum(y_train==0)))
# Oversample the training dataset
sm = SMOTE(random_state=2)
x_train_s, y_train_s = sm.fit_resample(x_train, y_train.ravel())
print('After OverSampling, the shape of train_x: {}'.format(x_train_s.shape))
print('After OverSampling, the shape of train_y: {} \n'.format(y_train_s.shape))
print("After OverSampling, counts of label '1', %: {}".format(sum(y_train_s==1)/len(y_train_s)*100.0,2))
print("After OverSampling, counts of label '0', %: {}".format(sum(y_train_s==0)/len(y_train_s)*100.0,2))
sns.countplot(x=y_train_s, data=df, palette='CMRmap')
# Feature selection using Variance Threshold with threshold of 0.5
var = VarianceThreshold(threshold=.5)
var.fit(x_train_s,y_train_s)
x_train_var=var.transform(x_train_s)
x_test_var=var.transform(x_test)
x_train_var.shape
# Alternate way to perform feature selection and display the features
def variance_threshold_selector(data, threshold=0.5):
selector = VarianceThreshold(threshold)
selector.fit(data)
return data[data.columns[selector.get_support(indices=True)]]
variance_threshold_selector(x_train_s, 0.5)
varth_features=var.get_support()
varth_features
# Feature selection using SelectKBest feature selection
skbest = SelectKBest(k=10)
skbest.fit(x_train_s,y_train_s)
x_train_skbest=skbest.transform(x_train_s)
x_test_skbest=skbest.transform(x_test)
x_train_skbest.shape
kbest_features=skbest.get_support()
kbest_features
# Determine 10 best features using SelectKBest
best_features = SelectKBest(score_func=f_classif, k=10)
fit = best_features.fit(x_train_s,y_train_s)
df_scores = pd.DataFrame(fit.scores_)
df_columns = pd.DataFrame(x_train_s.columns)
# concatenate dataframes
feature_scores = pd.concat([df_columns, df_scores],axis=1)
feature_scores.columns = ['Feature_Name','Score'] # name output columns
print(feature_scores.nlargest(29,'Score')) # print 29 best features
# Bar plot showing features in the order of score
tmp = feature_scores.sort_values(by='Score',ascending=False)
plt.title('Features importance',fontsize=14)
s = sns.barplot(x='Feature_Name',y='Score',data=tmp)
s.set_xticklabels(s.get_xticklabels(),rotation=90)
plt.show()
# calculate precision recall area under curve
def preci_auc(y_true, pred_prob):
# calculate precision-recall curve
p, r, _ = precision_recall_curve(y_true, pred_prob)
# calculate area under curve
return auc(r, p)
# Evaluate a model
def evaluate_model(x, y, model):
# Define evaluation procedure
CV = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# Define the model evaluation the metric
metric = make_scorer(preci_auc, needs_proba=True)
# Evaluate model
scores = cross_val_score(model, x, y, scoring='roc_auc', cv=CV, n_jobs=-1)
return scores
# Define reference model
model = DummyClassifier(strategy='constant', constant=1)
# define the reference model
model = DummyClassifier(strategy='constant', constant=1)
# Evaluate the model
scores = evaluate_model(x_train_skbest, y_train_s, model)
# summarize performance
print('Mean area under curve: %.3f (%.3f)' % (mean(scores), std(scores)))
# Normalize the input
scaler = StandardScaler()
scaler.fit(x_train_skbest)
x_train_norm = scaler.transform(x_train_skbest)
x_test_norm = scaler.transform(x_test_skbest)
def model_val(x, y, classifier, scor, show):
x = np.array(x)
y = np.array(y)
scores = cross_val_score(classifier, x, y, scoring=scor)
if show == True:
print("Score: {:.2f} (+/- {:.2f})".format(scores.mean(), scores.std()))
return scores.mean()
# List of models
rfc = RandomForestClassifier()
ctc = DecisionTreeClassifier()
sglc = SGDClassifier()
lr = LogisticRegression()
model = []
score = []
# Check model score
for classifier in (rfc, ctc, sglc, lr):
model.append(classifier.__class__.__name__)
score.append(model_val(x_train_norm, y_train_s, classifier, scor='roc_auc', show=True))
pd.DataFrame(data=score, index=model, columns=['roc_auc'])
pipeline_rf = Pipeline([
('model', RandomForestClassifier(n_jobs=-1, random_state=1))
])
parm_gridscv_rf = {'model__n_estimators': [75]}
grid_rf = GridSearchCV(estimator=pipeline_rf, param_grid=parm_gridscv_rf, scoring='roc_auc', n_jobs=-1,
pre_dispatch='2*n_jobs', cv=5, verbose=1, return_train_score=False)
grid_rf.fit(x_train_norm, y_train_s)
pd.DataFrame(grid_rf.cv_results_)
grid_rf.best_score_, grid_rf.best_params_
y_pred = grid_rf.predict(x_test_norm)
# Decimal places based on number of samples
dec = np.int64(np.ceil(np.log10(len(y_test))))
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred), '\n')
print('Classification report')
print(classification_report(y_test, y_pred, digits=dec))
print('Scalar Metrics')
format_str = '%%13s = %%.%if' % dec
if y_test.nunique() <= 2: # metrics for binary classification
try:
y_score = grid_rf.predict_proba(x_test_norm)[:,1]
except:
y_score = grid_rf.decision_function(x_test_norm)
print(format_str % ('AUROC', roc_auc_score(y_test, y_score)))
# Plot confusion matrix
skplt.metrics.plot_confusion_matrix(y_test, y_pred)
log_loss(y_test, y_pred)
# Logistic regression model with different C values
parameters = {
'tol': [0.00001, 0.0001, 0.001],
'C': [1, 50, 100]
}
lgr = GridSearchCV(LogisticRegression(random_state=101, n_jobs=1, max_iter=1000),
param_grid=parameters,
cv=3,
n_jobs=1,
scoring='roc_auc'
)
lgr.fit(x_train_norm, y_train_s)
clf = lgr.best_estimator_
print(lgr.best_estimator_)
print("The best classifier score:",lgr.best_score_)
y_pred1 = clf.predict(x_test_norm)
# Decimal places based on number of samples
dec = np.int64(np.ceil(np.log10(len(y_test))))
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred1), '\n')
print('Classification report')
print(classification_report(y_test, y_pred1, digits=dec))
print('Scalar Metrics')
format_str = '%%13s = %%.%if' % dec
if y_test.nunique() <= 2: # metrics for binary classification
try:
y_score1 = clf.predict_proba(x_test_norm)[:,1]
except:
y_score1 = clf.decision_function(X_test_norm)
print(format_str % ('AUROC', roc_auc_score(y_test, y_score1)))
# Plot confusion matrix
skplt.metrics.plot_confusion_matrix(y_test, y_pred1)
log_loss(y_test, y_pred1)
| 0.77437 | 0.980487 |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# `MaxwellVacuumID`: An Einstein Toolkit thorn for generating initial data for Maxwell's equations
## Authors: Terrence Pierre Jacques, Patrick Nelson, & Zach Etienne
### Formatting improvements courtesy Brandon Clark
### NRPy+ Source Code for this module: [Maxwell/InitialData.py](../edit/Maxwell/InitialData.py) [\[**tutorial**\]](Tutorial-VacuumMaxwell_InitialData.ipynb) Contructs the SymPy expressions for toroidal dipole field initial data
## Introduction:
In this part of the tutorial, we will construct an Einstein Toolkit (ETK) thorn (module) that will set up *initial data* for two formulations Maxwell's equations. In a [previous tutorial notebook](Tutorial-VacuumMaxwell_InitialData.ipynb), we used NRPy+ to contruct the SymPy expressions for toroidal dipole initial data. This thorn is largely based on and should function similarly to the NRPy+ generated [`IDScalarWaveNRPy`](Tutorial-ETK_thorn-IDScalarWaveNRPy.ipynb) thorn.
We will construct this thorn in two steps.
1. Call on NRPy+ to convert the SymPy expressions for the initial data into one C-code kernel.
1. Write the C code and linkages to the Einstein Toolkit infrastructure (i.e., the .ccl files) to complete this Einstein Toolkit module.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Initialize needed Python/NRPy+ modules
1. [Step 2](#toroidal_id): NRPy+-generated C code kernels for toroidal dipole field initial data
1. [Step 3](#cclfiles): CCL files - Define how this module interacts and interfaces with the wider Einstein Toolkit infrastructure
1. [Step 3.a](#paramccl): `param.ccl`: specify free parameters within `MaxwellVacuumID`
1. [Step 3.b](#interfaceccl): `interface.ccl`: define needed gridfunctions; provide keywords denoting what this thorn provides and what it should inherit from other thorns
1. [Step 3.c](#scheduleccl): `schedule.ccl`:schedule all functions used within `MaxwellVacuumID`, specify data dependencies within said functions, and allocate memory for gridfunctions
1. [Step 4](#cdrivers): C driver functions for ETK registration & NRPy+-generated kernels
1. [Step 4.a](#etkfunctions): Initial data function
1. [Step 4.b](#makecodedefn): `make.code.defn`: List of all C driver functions needed to compile `MaxwellVacuumID`
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Initialize needed Python/NRPy+ modules \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
```
# Step 1: Import needed core NRPy+ modules
from outputC import lhrh # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import loop as lp # NRPy+: Generate C code loops
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import os, sys # Standard Python modules for multiplatform OS-level functions
# Step 1a: Create directories for the thorn if they don't exist.
# Create directory for MaxwellVacuumID thorn & subdirectories in case they don't exist.
outrootdir = "MaxwellVacuumID/"
cmd.mkdir(os.path.join(outrootdir))
outdir = os.path.join(outrootdir,"src") # Main C code output directory
cmd.mkdir(outdir)
# Step 1b: This is an Einstein Toolkit (ETK) thorn. Here we
# tell NRPy+ that gridfunction memory access will
# therefore be in the "ETK" style.
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
```
<a id='toroidal_id'></a>
# Step 2: Constructing the Einstein Toolkit C-code calling functions that include the C code kernels \[Back to [top](#toc)\]
$$\label{toroidal_id}$$
Using sympy, we construct the exact expressions for toroidal dipole field initial data currently supported in NRPy, documented in [Tutorial-VacuumMaxwell_InitialData.ipynb](Tutorial-VacuumMaxwell_InitialData.ipynb). We write the generated C codes into different C files, corresponding to the type of initial data the may want to choose at run time. Note that the code below can be easily extensible to include other types of initial data.
```
import Maxwell.InitialData as mwid
# Set coordinate system. ETK only supports cartesian coordinates
CoordSystem = "Cartesian"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
# set up ID sympy expressions - System I
mwid.InitialData()
# x,y,z = gri.register_gridfunctions("AUX",["x","y","z"])
AIU = ixp.register_gridfunctions_for_single_rank1("EVOL","AIU")
EIU = ixp.register_gridfunctions_for_single_rank1("EVOL","EIU")
psiI = gri.register_gridfunctions("EVOL","psiI")
# Set which system to use, which are defined in Maxwell/VacuumMaxwell_Flat_Cartesian_ID.py
par.set_parval_from_str("Maxwell.InitialData::System_to_use","System_II")
# set up ID sympy expressions - System II
mwid.InitialData()
AIIU = ixp.register_gridfunctions_for_single_rank1("EVOL","AIIU")
EIIU = ixp.register_gridfunctions_for_single_rank1("EVOL","EIIU")
psiII = gri.register_gridfunctions("EVOL","psiII")
GammaII = gri.register_gridfunctions("EVOL","GammaII")
Maxwell_ID_SymbExpressions = [\
lhrh(lhs=gri.gfaccess("out_gfs","AIU0"),rhs=mwid.AidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIU1"),rhs=mwid.AidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIU2"),rhs=mwid.AidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIU0"),rhs=mwid.EidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIU1"),rhs=mwid.EidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIU2"),rhs=mwid.EidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","psiI"),rhs=mwid.psi_ID),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIU0"),rhs=mwid.AidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIU1"),rhs=mwid.AidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIU2"),rhs=mwid.AidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIU0"),rhs=mwid.EidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIU1"),rhs=mwid.EidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIU2"),rhs=mwid.EidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","psiII"),rhs=mwid.psi_ID),\
lhrh(lhs=gri.gfaccess("out_gfs","GammaII"),rhs=mwid.Gamma_ID)]
declare_string = """
const double x = xGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
const double y = yGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
const double z = zGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
"""
Maxwell_ID_CcodeKernel = fin.FD_outputC("returnstring",
Maxwell_ID_SymbExpressions,\
params="outCverbose=True")
Maxwell_ID_looped = lp.loop(["i2","i1","i0"],["0","0","0"],["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],\
["1","1","1"],["#pragma omp parallel for","",""],"",\
declare_string+Maxwell_ID_CcodeKernel).replace("time","cctk_time")\
.replace("xx0", "x")\
.replace("xx1", "y")\
.replace("xx2", "z")
# Step 4: Write the C code kernel to file.
with open(os.path.join(outdir,"Maxwell_ID.h"), "w") as file:
file.write(str(Maxwell_ID_looped))
```
<a id='cclfiles'></a>
# Step 3: ETK `ccl` file generation \[Back to [top](#toc)\]
$$\label{cclfiles}$$
<a id='paramccl'></a>
## Step 3.a: `param.ccl`: specify free parameters within `MaxwellVacuumID` \[Back to [top](#toc)\]
$$\label{paramccl}$$
All parameters necessary for the computation of the initial data expressions are registered within NRPy+; we use this information to automatically generate `param.ccl`. NRPy+ also specifies default values for each parameter.
More information on `param.ccl` syntax can be found in the [official Einstein Toolkit documentation](https://einsteintoolkit.org/usersguide/UsersGuide.html#x1-184000D2.3).
```
def keep_param__return_type(paramtuple):
keep_param = True # We'll not set some parameters in param.ccl;
# e.g., those that should be #define'd like M_PI.
typestring = ""
# Separate thorns within the ETK take care of grid/coordinate parameters;
# thus we ignore NRPy+ grid/coordinate parameters:
if paramtuple.module == "grid" or paramtuple.module == "reference_metric":
keep_param = False
partype = paramtuple.type
if partype == "bool":
typestring += "BOOLEAN "
elif partype == "REAL":
if paramtuple.defaultval != 1e300: # 1e300 is a magic value indicating that the C parameter should be mutable
typestring += "CCTK_REAL "
else:
keep_param = False
elif partype == "int":
typestring += "CCTK_INT "
elif partype == "#define":
keep_param = False
elif partype == "char":
print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+
" has unsupported type: \""+ paramtuple.type + "\"")
sys.exit(1)
else:
print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+
" has unsupported type: \""+ paramtuple.type + "\"")
sys.exit(1)
return keep_param, typestring
with open(os.path.join(outrootdir,"param.ccl"), "w") as file:
file.write("""
# This param.ccl file was automatically generated by NRPy+.
# You are advised against modifying it directly; instead
# modify the Python code that generates it.
shares: grid
USES KEYWORD type
CCTK_KEYWORD initial_data "Type of initial data"
{
"toroid" :: "Toroidal Dipole field"
} "toroid"
restricted:
""")
paramccl_str = ""
for i in range(len(par.glb_Cparams_list)):
# keep_param is a boolean indicating whether we should accept or reject
# the parameter. singleparstring will contain the string indicating
# the variable type.
keep_param, singleparstring = keep_param__return_type(par.glb_Cparams_list[i])
if keep_param:
parname = par.glb_Cparams_list[i].parname
partype = par.glb_Cparams_list[i].type
singleparstring += parname + " \""+ parname +" (see NRPy+ for parameter definition)\"\n"
singleparstring += "{\n"
if partype != "bool":
singleparstring += " *:* :: \"All values accepted. NRPy+ does not restrict the allowed ranges of parameters yet.\"\n"
singleparstring += "} "+str(par.glb_Cparams_list[i].defaultval)+"\n\n"
paramccl_str += singleparstring
file.write(paramccl_str)
```
<a id='interfaceccl'></a>
## Step 3.b: `interface.ccl`: define needed gridfunctions; provide keywords denoting what this thorn provides and what it should inherit from other thorns \[Back to [top](#toc)\]
$$\label{interfaceccl}$$
`interface.ccl` declares all gridfunctions and determines how `MaxwellVacuumID` interacts with other Einstein Toolkit thorns.
The [official Einstein Toolkit documentation](https://einsteintoolkit.org/usersguide/UsersGuide.html#x1-179000D2.2) defines what must/should be included in an `interface.ccl` file.
```
evol_gfs_list = []
for i in range(len(gri.glb_gridfcs_list)):
if gri.glb_gridfcs_list[i].gftype == "EVOL":
evol_gfs_list.append( gri.glb_gridfcs_list[i].name+"GF")
# NRPy+'s finite-difference code generator assumes gridfunctions
# are alphabetized; not sorting may result in unnecessary
# cache misses.
evol_gfs_list.sort()
with open(os.path.join(outrootdir,"interface.ccl"), "w") as file:
file.write("""
# With "implements", we give our thorn its unique name.
implements: MaxwellVacuumID
# By "inheriting" other thorns, we tell the Toolkit that we
# will rely on variables/function that exist within those
# functions.
inherits: MaxwellVacuum grid
""")
```
<a id='scheduleccl'></a>
## Step 3.c: `schedule.ccl`: schedule all functions used within `MaxwellVacuumID`, specify data dependencies within said functions, and allocate memory for gridfunctions \[Back to [top](#toc)\]
$$\label{scheduleccl}$$
Official documentation on constructing ETK `schedule.ccl` files is found [here](https://einsteintoolkit.org/usersguide/UsersGuide.html#x1-187000D2.4).
```
with open(os.path.join(outrootdir,"schedule.ccl"), "w") as file:
file.write("""
# This schedule.ccl file was automatically generated by NRPy+.
# You are advised against modifying it directly; instead
# modify the Python code that generates it.
schedule Maxwell_InitialData at CCTK_INITIAL as Maxwell_InitialData
{
STORAGE: MaxwellVacuum::evol_variables[3]
LANG: C
} "Initial data for Maxwell's equations"
""")
```
<a id='cdrivers'></a>
# Step 4: C driver functions for ETK registration & NRPy+-generated kernels \[Back to [top](#toc)\]
$$\label{cdrivers}$$
Now that we have constructed the basic C code kernels and the needed Einstein Toolkit `ccl` files, we next write the driver functions for registering `MaxwellVacuumID` within the Toolkit and the C code kernels. Each of these driver functions is called directly from [`schedule.ccl`](#scheduleccl).
```
make_code_defn_list = []
def append_to_make_code_defn_list(filename):
if filename not in make_code_defn_list:
make_code_defn_list.append(filename)
return os.path.join(outdir,filename)
```
<a id='etkfunctions'></a>
## Step 4.a: Initial data function \[Back to [top](#toc)\]
$$\label{etkfunctions}$$
Here we define the initial data function, and how it's to be called in the `schedule.ccl` file by ETK.
```
with open(append_to_make_code_defn_list("InitialData.c"),"w") as file:
file.write("""
#include <math.h>
#include <stdio.h>
#include "cctk.h"
#include "cctk_Parameters.h"
#include "cctk_Arguments.h"
void Maxwell_InitialData(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS
DECLARE_CCTK_PARAMETERS
const CCTK_REAL *xGF = x;
const CCTK_REAL *yGF = y;
const CCTK_REAL *zGF = z;
#include "Maxwell_ID.h"
}
""")
```
<a id='makecodedefn'></a>
## Step 4.b: `make.code.defn`: List of all C driver functions needed to compile `MaxwellVacuumID` \[Back to [top](#toc)\]
$$\label{makecodedefn}$$
When constructing each C code driver function above, we called the `append_to_make_code_defn_list()` function, which built a list of each C code driver file. We'll now add each of those files to the `make.code.defn` file, used by the Einstein Toolkit's build system.
```
with open(os.path.join(outdir,"make.code.defn"), "w") as file:
file.write("""
# Main make.code.defn file for thorn MaxwellVacuumID
# Source files in this directory
SRCS =""")
filestring = ""
for i in range(len(make_code_defn_list)):
filestring += " "+make_code_defn_list[i]
if i != len(make_code_defn_list)-1:
filestring += " \\\n"
else:
filestring += "\n"
file.write(filestring)
```
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-ETK_thorn-MaxwellVacuumID.pdf](Tutorial-ETK_thorn-MaxwellVacuumID.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ETK_thorn-MaxwellVacuumID")
```
|
github_jupyter
|
# Step 1: Import needed core NRPy+ modules
from outputC import lhrh # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import loop as lp # NRPy+: Generate C code loops
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import os, sys # Standard Python modules for multiplatform OS-level functions
# Step 1a: Create directories for the thorn if they don't exist.
# Create directory for MaxwellVacuumID thorn & subdirectories in case they don't exist.
outrootdir = "MaxwellVacuumID/"
cmd.mkdir(os.path.join(outrootdir))
outdir = os.path.join(outrootdir,"src") # Main C code output directory
cmd.mkdir(outdir)
# Step 1b: This is an Einstein Toolkit (ETK) thorn. Here we
# tell NRPy+ that gridfunction memory access will
# therefore be in the "ETK" style.
par.set_parval_from_str("grid::GridFuncMemAccess","ETK")
import Maxwell.InitialData as mwid
# Set coordinate system. ETK only supports cartesian coordinates
CoordSystem = "Cartesian"
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
# set up ID sympy expressions - System I
mwid.InitialData()
# x,y,z = gri.register_gridfunctions("AUX",["x","y","z"])
AIU = ixp.register_gridfunctions_for_single_rank1("EVOL","AIU")
EIU = ixp.register_gridfunctions_for_single_rank1("EVOL","EIU")
psiI = gri.register_gridfunctions("EVOL","psiI")
# Set which system to use, which are defined in Maxwell/VacuumMaxwell_Flat_Cartesian_ID.py
par.set_parval_from_str("Maxwell.InitialData::System_to_use","System_II")
# set up ID sympy expressions - System II
mwid.InitialData()
AIIU = ixp.register_gridfunctions_for_single_rank1("EVOL","AIIU")
EIIU = ixp.register_gridfunctions_for_single_rank1("EVOL","EIIU")
psiII = gri.register_gridfunctions("EVOL","psiII")
GammaII = gri.register_gridfunctions("EVOL","GammaII")
Maxwell_ID_SymbExpressions = [\
lhrh(lhs=gri.gfaccess("out_gfs","AIU0"),rhs=mwid.AidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIU1"),rhs=mwid.AidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIU2"),rhs=mwid.AidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIU0"),rhs=mwid.EidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIU1"),rhs=mwid.EidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIU2"),rhs=mwid.EidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","psiI"),rhs=mwid.psi_ID),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIU0"),rhs=mwid.AidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIU1"),rhs=mwid.AidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","AIIU2"),rhs=mwid.AidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIU0"),rhs=mwid.EidU[0]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIU1"),rhs=mwid.EidU[1]),\
lhrh(lhs=gri.gfaccess("out_gfs","EIIU2"),rhs=mwid.EidU[2]),\
lhrh(lhs=gri.gfaccess("out_gfs","psiII"),rhs=mwid.psi_ID),\
lhrh(lhs=gri.gfaccess("out_gfs","GammaII"),rhs=mwid.Gamma_ID)]
declare_string = """
const double x = xGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
const double y = yGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
const double z = zGF[CCTK_GFINDEX3D(cctkGH, i0,i1,i2)];
"""
Maxwell_ID_CcodeKernel = fin.FD_outputC("returnstring",
Maxwell_ID_SymbExpressions,\
params="outCverbose=True")
Maxwell_ID_looped = lp.loop(["i2","i1","i0"],["0","0","0"],["cctk_lsh[2]","cctk_lsh[1]","cctk_lsh[0]"],\
["1","1","1"],["#pragma omp parallel for","",""],"",\
declare_string+Maxwell_ID_CcodeKernel).replace("time","cctk_time")\
.replace("xx0", "x")\
.replace("xx1", "y")\
.replace("xx2", "z")
# Step 4: Write the C code kernel to file.
with open(os.path.join(outdir,"Maxwell_ID.h"), "w") as file:
file.write(str(Maxwell_ID_looped))
def keep_param__return_type(paramtuple):
keep_param = True # We'll not set some parameters in param.ccl;
# e.g., those that should be #define'd like M_PI.
typestring = ""
# Separate thorns within the ETK take care of grid/coordinate parameters;
# thus we ignore NRPy+ grid/coordinate parameters:
if paramtuple.module == "grid" or paramtuple.module == "reference_metric":
keep_param = False
partype = paramtuple.type
if partype == "bool":
typestring += "BOOLEAN "
elif partype == "REAL":
if paramtuple.defaultval != 1e300: # 1e300 is a magic value indicating that the C parameter should be mutable
typestring += "CCTK_REAL "
else:
keep_param = False
elif partype == "int":
typestring += "CCTK_INT "
elif partype == "#define":
keep_param = False
elif partype == "char":
print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+
" has unsupported type: \""+ paramtuple.type + "\"")
sys.exit(1)
else:
print("Error: parameter "+paramtuple.module+"::"+paramtuple.parname+
" has unsupported type: \""+ paramtuple.type + "\"")
sys.exit(1)
return keep_param, typestring
with open(os.path.join(outrootdir,"param.ccl"), "w") as file:
file.write("""
# This param.ccl file was automatically generated by NRPy+.
# You are advised against modifying it directly; instead
# modify the Python code that generates it.
shares: grid
USES KEYWORD type
CCTK_KEYWORD initial_data "Type of initial data"
{
"toroid" :: "Toroidal Dipole field"
} "toroid"
restricted:
""")
paramccl_str = ""
for i in range(len(par.glb_Cparams_list)):
# keep_param is a boolean indicating whether we should accept or reject
# the parameter. singleparstring will contain the string indicating
# the variable type.
keep_param, singleparstring = keep_param__return_type(par.glb_Cparams_list[i])
if keep_param:
parname = par.glb_Cparams_list[i].parname
partype = par.glb_Cparams_list[i].type
singleparstring += parname + " \""+ parname +" (see NRPy+ for parameter definition)\"\n"
singleparstring += "{\n"
if partype != "bool":
singleparstring += " *:* :: \"All values accepted. NRPy+ does not restrict the allowed ranges of parameters yet.\"\n"
singleparstring += "} "+str(par.glb_Cparams_list[i].defaultval)+"\n\n"
paramccl_str += singleparstring
file.write(paramccl_str)
evol_gfs_list = []
for i in range(len(gri.glb_gridfcs_list)):
if gri.glb_gridfcs_list[i].gftype == "EVOL":
evol_gfs_list.append( gri.glb_gridfcs_list[i].name+"GF")
# NRPy+'s finite-difference code generator assumes gridfunctions
# are alphabetized; not sorting may result in unnecessary
# cache misses.
evol_gfs_list.sort()
with open(os.path.join(outrootdir,"interface.ccl"), "w") as file:
file.write("""
# With "implements", we give our thorn its unique name.
implements: MaxwellVacuumID
# By "inheriting" other thorns, we tell the Toolkit that we
# will rely on variables/function that exist within those
# functions.
inherits: MaxwellVacuum grid
""")
with open(os.path.join(outrootdir,"schedule.ccl"), "w") as file:
file.write("""
# This schedule.ccl file was automatically generated by NRPy+.
# You are advised against modifying it directly; instead
# modify the Python code that generates it.
schedule Maxwell_InitialData at CCTK_INITIAL as Maxwell_InitialData
{
STORAGE: MaxwellVacuum::evol_variables[3]
LANG: C
} "Initial data for Maxwell's equations"
""")
make_code_defn_list = []
def append_to_make_code_defn_list(filename):
if filename not in make_code_defn_list:
make_code_defn_list.append(filename)
return os.path.join(outdir,filename)
with open(append_to_make_code_defn_list("InitialData.c"),"w") as file:
file.write("""
#include <math.h>
#include <stdio.h>
#include "cctk.h"
#include "cctk_Parameters.h"
#include "cctk_Arguments.h"
void Maxwell_InitialData(CCTK_ARGUMENTS)
{
DECLARE_CCTK_ARGUMENTS
DECLARE_CCTK_PARAMETERS
const CCTK_REAL *xGF = x;
const CCTK_REAL *yGF = y;
const CCTK_REAL *zGF = z;
#include "Maxwell_ID.h"
}
""")
with open(os.path.join(outdir,"make.code.defn"), "w") as file:
file.write("""
# Main make.code.defn file for thorn MaxwellVacuumID
# Source files in this directory
SRCS =""")
filestring = ""
for i in range(len(make_code_defn_list)):
filestring += " "+make_code_defn_list[i]
if i != len(make_code_defn_list)-1:
filestring += " \\\n"
else:
filestring += "\n"
file.write(filestring)
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-ETK_thorn-MaxwellVacuumID")
| 0.32029 | 0.896024 |
# WorkFlow
### Imports
### Load the data
### Cleanning
### FE
### Data.corr()
### Analytics
### Preproccessing
### Decomposition
### Feature Selection
### Modelling
### Random Search
### Gird Search
## Imports
```
import random
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import torch,torchvision
from torch.nn import *
from torch.optim import *
# Preproccessing
from sklearn.preprocessing import (
StandardScaler,
RobustScaler,
MinMaxScaler,
MaxAbsScaler,
OneHotEncoder,
Normalizer,
Binarizer
)
# Decomposition
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
# Feature Selection
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
# Model Eval
from sklearn.compose import make_column_transformer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score,train_test_split
from sklearn.metrics import mean_absolute_error,mean_squared_error,accuracy_score,precision_score,f1_score,recall_score
# Models
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LogisticRegression,LogisticRegressionCV
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor,AdaBoostRegressor,VotingRegressor,BaggingRegressor,RandomForestRegressor
from sklearn.svm import SVR
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from catboost import CatBoost,CatBoostRegressor
from xgboost import XGBRegressor,XGBRFRegressor
from flaml import AutoML
# Other
import pickle
import wandb
PROJECT_NAME = 'House-Prices-Advanced-Regression-Techniques-V9'
device = 'cuda'
np.random.seed(21)
random.seed(21)
torch.manual_seed(21)
```
### Funtions
```
def make_submission(model):
pass
def valid(model,X,y,valid=False):
preds = model.predict(X)
if valid:
results = {
'val mean_absolute_error':mean_absolute_error(y_true=y,y_pred=preds),
'val mean_squared_error':mean_squared_error(y_true=y,y_pred=preds),
}
else:
results = {
'mean_absolute_error':mean_absolute_error(y_true=y,y_pred=preds),
'mean_squared_error':mean_squared_error(y_true=y,y_pred=preds),
}
return results
def train(model,X_train,X_test,y_train,y_test,name):
wandb.init(project=PROJECT_NAME,name=name)
model.fit(X_train,y_train)
wandb.log(valid(model,X_train,y_train))
wandb.log(valid(model,X_test,y_test,True))
make_submission(model)
return model
def object_to_int(data,col):
data_col = data[col].to_dict()
idx = -1
labels_and_int_index = {}
for data_col_vals in data_col.values():
if data_col_vals not in labels_and_int_index.keys():
idx += 1
labels_and_int_index[data_col_vals] = idx
new_data = []
for data_col_vals in data_col.values():
new_data.append(labels_and_int_index[data_col_vals])
data[col] = new_data
return data,idx,labels_and_int_index,new_data
def fe(data,col,quantile_max_num=0.99,quantile_min_num=0.05):
max_num = data[col].quantile(quantile_max_num)
min_num = data[col].quantile(quantile_min_num)
print(max_num)
print(min_num)
data = data[data[col] < max_num]
data = data[data[col] > min_num]
return data
def decomposition(X,pca=False,kernal_pca=False):
if pca:
pca = PCA()
X = pca.fit_transform(X)
if kernal_pca:
kernal_pca = KernelPCA()
X = kernal_pca.fit_transform(X)
return X
def feature_selection_prep_data(model,X,y,select_from_model=False,variance_threshold=False,select_k_best=False,rfecv=False):
if select_from_model:
transform = SelectFromModel(estimator=model.fit(X, y))
X = transform.transform(X)
if variance_threshold:
transform = VarianceThreshold()
X = transform.fit_transform(X)
if select_k_best:
X = SelectKBest(chi2, k='all').fit_transform(X, y)
if rfecv:
X = RFECV(model, step=1, cv=5).fit(X, y)
X = X.transform(X)
return X
def prep_data(X,transformer):
mct = make_column_transformer(
(transformer,list(X.columns)),
remainder='passthrough'
)
X = mct.fit_transform(X)
return X
```
## Load the data
```
data = pd.read_csv('./data/train.csv')
preproccessings = [StandardScaler,RobustScaler,MinMaxScaler,MaxAbsScaler,OneHotEncoder,Normalizer,Binarizer]
models = [
['KNeighborsRegressor',KNeighborsRegressor],
['LogisticRegression',LogisticRegression],
['LogisticRegressionCV',LogisticRegressionCV],
['DecisionTreeRegressor',DecisionTreeRegressor],
['GradientBoostingRegressor',GradientBoostingRegressor],
['AdaBoostRegressor',AdaBoostRegressor],
['RandomForestRegressor',RandomForestRegressor],
['BaggingRegressor',BaggingRegressor],
['GaussianNB',GaussianNB],
['ExtraTreesRegressor',ExtraTreesRegressor],
['CatBoost',CatBoost],
['CatBoostRegressor',CatBoostRegressor],
['XGBRegressor',XGBRegressor],
['XGBRFRegressor',XGBRFRegressor],
['ExtraTreesRegressor',ExtraTreesRegressor],
]
```
## Cleanning the data
```
X = data.drop('SalePrice',axis=1)
y = data['SalePrice']
str_cols = []
int_cols = []
for col_name,num_of_missing_rows,dtype in zip(list(X.columns),X.isna().sum(),X.dtypes):
if dtype == object:
str_cols.append(col_name)
else:
int_cols.append(col_name)
for str_col in str_cols:
X,idx,labels_and_int_index,new_data = object_to_int(X,str_col)
X.head()
nan_cols = []
for col_name,num_of_missing_rows,dtype in zip(list(X.columns),X.isna().sum(),X.dtypes):
if num_of_missing_rows > 0:
nan_cols.append(col_name)
for nan_col in nan_cols:
X[nan_col].fillna(X[nan_col].median(),inplace=True)
nan_cols = []
for col_name,num_of_missing_rows,dtype in zip(list(X.columns),X.isna().sum(),X.dtypes):
if num_of_missing_rows > 0:
nan_cols.append(col_name)
train(GradientBoostingRegressor(),X,X,y,y,name='baseline-without-fe')
X_old = X.copy()
for col_name in list(X.columns):
try:
X = X_old.copy()
X = fe(X,col_name)
train(GradientBoostingRegressor(),X,X,y,y,name=f'baseline-with-fe-{col_name}')
except:
print('*'*50)
print('*'*50)
X = X_old.copy()
X_corr = X_old.corr()
plt.figure(figsize=(12,7))
sns.heatmap(X_corr)
```
|
github_jupyter
|
import random
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn
import torch,torchvision
from torch.nn import *
from torch.optim import *
# Preproccessing
from sklearn.preprocessing import (
StandardScaler,
RobustScaler,
MinMaxScaler,
MaxAbsScaler,
OneHotEncoder,
Normalizer,
Binarizer
)
# Decomposition
from sklearn.decomposition import PCA
from sklearn.decomposition import KernelPCA
# Feature Selection
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import SelectFromModel
# Model Eval
from sklearn.compose import make_column_transformer
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_val_score,train_test_split
from sklearn.metrics import mean_absolute_error,mean_squared_error,accuracy_score,precision_score,f1_score,recall_score
# Models
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LogisticRegression,LogisticRegressionCV
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor,AdaBoostRegressor,VotingRegressor,BaggingRegressor,RandomForestRegressor
from sklearn.svm import SVR
from sklearn.ensemble import BaggingRegressor
from sklearn.ensemble import ExtraTreesRegressor
from catboost import CatBoost,CatBoostRegressor
from xgboost import XGBRegressor,XGBRFRegressor
from flaml import AutoML
# Other
import pickle
import wandb
PROJECT_NAME = 'House-Prices-Advanced-Regression-Techniques-V9'
device = 'cuda'
np.random.seed(21)
random.seed(21)
torch.manual_seed(21)
def make_submission(model):
pass
def valid(model,X,y,valid=False):
preds = model.predict(X)
if valid:
results = {
'val mean_absolute_error':mean_absolute_error(y_true=y,y_pred=preds),
'val mean_squared_error':mean_squared_error(y_true=y,y_pred=preds),
}
else:
results = {
'mean_absolute_error':mean_absolute_error(y_true=y,y_pred=preds),
'mean_squared_error':mean_squared_error(y_true=y,y_pred=preds),
}
return results
def train(model,X_train,X_test,y_train,y_test,name):
wandb.init(project=PROJECT_NAME,name=name)
model.fit(X_train,y_train)
wandb.log(valid(model,X_train,y_train))
wandb.log(valid(model,X_test,y_test,True))
make_submission(model)
return model
def object_to_int(data,col):
data_col = data[col].to_dict()
idx = -1
labels_and_int_index = {}
for data_col_vals in data_col.values():
if data_col_vals not in labels_and_int_index.keys():
idx += 1
labels_and_int_index[data_col_vals] = idx
new_data = []
for data_col_vals in data_col.values():
new_data.append(labels_and_int_index[data_col_vals])
data[col] = new_data
return data,idx,labels_and_int_index,new_data
def fe(data,col,quantile_max_num=0.99,quantile_min_num=0.05):
max_num = data[col].quantile(quantile_max_num)
min_num = data[col].quantile(quantile_min_num)
print(max_num)
print(min_num)
data = data[data[col] < max_num]
data = data[data[col] > min_num]
return data
def decomposition(X,pca=False,kernal_pca=False):
if pca:
pca = PCA()
X = pca.fit_transform(X)
if kernal_pca:
kernal_pca = KernelPCA()
X = kernal_pca.fit_transform(X)
return X
def feature_selection_prep_data(model,X,y,select_from_model=False,variance_threshold=False,select_k_best=False,rfecv=False):
if select_from_model:
transform = SelectFromModel(estimator=model.fit(X, y))
X = transform.transform(X)
if variance_threshold:
transform = VarianceThreshold()
X = transform.fit_transform(X)
if select_k_best:
X = SelectKBest(chi2, k='all').fit_transform(X, y)
if rfecv:
X = RFECV(model, step=1, cv=5).fit(X, y)
X = X.transform(X)
return X
def prep_data(X,transformer):
mct = make_column_transformer(
(transformer,list(X.columns)),
remainder='passthrough'
)
X = mct.fit_transform(X)
return X
data = pd.read_csv('./data/train.csv')
preproccessings = [StandardScaler,RobustScaler,MinMaxScaler,MaxAbsScaler,OneHotEncoder,Normalizer,Binarizer]
models = [
['KNeighborsRegressor',KNeighborsRegressor],
['LogisticRegression',LogisticRegression],
['LogisticRegressionCV',LogisticRegressionCV],
['DecisionTreeRegressor',DecisionTreeRegressor],
['GradientBoostingRegressor',GradientBoostingRegressor],
['AdaBoostRegressor',AdaBoostRegressor],
['RandomForestRegressor',RandomForestRegressor],
['BaggingRegressor',BaggingRegressor],
['GaussianNB',GaussianNB],
['ExtraTreesRegressor',ExtraTreesRegressor],
['CatBoost',CatBoost],
['CatBoostRegressor',CatBoostRegressor],
['XGBRegressor',XGBRegressor],
['XGBRFRegressor',XGBRFRegressor],
['ExtraTreesRegressor',ExtraTreesRegressor],
]
X = data.drop('SalePrice',axis=1)
y = data['SalePrice']
str_cols = []
int_cols = []
for col_name,num_of_missing_rows,dtype in zip(list(X.columns),X.isna().sum(),X.dtypes):
if dtype == object:
str_cols.append(col_name)
else:
int_cols.append(col_name)
for str_col in str_cols:
X,idx,labels_and_int_index,new_data = object_to_int(X,str_col)
X.head()
nan_cols = []
for col_name,num_of_missing_rows,dtype in zip(list(X.columns),X.isna().sum(),X.dtypes):
if num_of_missing_rows > 0:
nan_cols.append(col_name)
for nan_col in nan_cols:
X[nan_col].fillna(X[nan_col].median(),inplace=True)
nan_cols = []
for col_name,num_of_missing_rows,dtype in zip(list(X.columns),X.isna().sum(),X.dtypes):
if num_of_missing_rows > 0:
nan_cols.append(col_name)
train(GradientBoostingRegressor(),X,X,y,y,name='baseline-without-fe')
X_old = X.copy()
for col_name in list(X.columns):
try:
X = X_old.copy()
X = fe(X,col_name)
train(GradientBoostingRegressor(),X,X,y,y,name=f'baseline-with-fe-{col_name}')
except:
print('*'*50)
print('*'*50)
X = X_old.copy()
X_corr = X_old.corr()
plt.figure(figsize=(12,7))
sns.heatmap(X_corr)
| 0.573559 | 0.840717 |
# <span style="color:Maroon">Trade Strategy
__Summary:__ <span style="color:Blue">In this code we shall test the results of given model
```
# Import required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
np.random.seed(0)
import warnings
warnings.filterwarnings('ignore')
# User defined names
index = "S&P500"
filename_whole = "whole_dataset"+index+"_rf_model.csv"
filename_trending = "Trending_dataset"+index+"_rf_model.csv"
filename_meanreverting = "MeanReverting_dataset"+index+"_rf_model.csv"
date_col = "Date"
Rf = 0.01 #Risk free rate of return
# Get current working directory
mycwd = os.getcwd()
print(mycwd)
# Change to data directory
os.chdir("..")
os.chdir(str(os.getcwd()) + "\\Data")
# Read the datasets
df_whole = pd.read_csv(filename_whole, index_col=date_col)
df_trending = pd.read_csv(filename_trending, index_col=date_col)
df_meanreverting = pd.read_csv(filename_meanreverting, index_col=date_col)
# Convert index to datetime
df_whole.index = pd.to_datetime(df_whole.index)
df_trending.index = pd.to_datetime(df_trending.index)
df_meanreverting.index = pd.to_datetime(df_meanreverting.index)
# Head for whole dataset
df_whole.head()
df_whole.shape
# Head for Trending dataset
df_trending.head()
df_trending.shape
# Head for Mean Reverting dataset
df_meanreverting.head()
df_meanreverting.shape
# Merge results from both models to one
df_model = df_trending.append(df_meanreverting)
df_model.sort_index(inplace=True)
df_model.head()
df_model.shape
```
## <span style="color:Maroon">Functions
```
def initialize(df):
days, Action1, Action2, current_status, Money, Shares = ([] for i in range(6))
Open_price = list(df['Open'])
Close_price = list(df['Adj Close'])
Predicted = list(df['Predicted'])
Action1.append(Predicted[0])
Action2.append(0)
current_status.append(Predicted[0])
if(Predicted[0] != 0):
days.append(1)
if(Predicted[0] == 1):
Money.append(0)
else:
Money.append(200)
Shares.append(Predicted[0] * (100/Open_price[0]))
else:
days.append(0)
Money.append(100)
Shares.append(0)
return days, Action1, Action2, current_status, Predicted, Money, Shares, Open_price, Close_price
def Action_SA_SA(days, Action1, Action2, current_status, i):
if(current_status[i-1] != 0):
days.append(1)
else:
days.append(0)
current_status.append(current_status[i-1])
Action1.append(0)
Action2.append(0)
return days, Action1, Action2, current_status
def Action_ZE_NZE(days, Action1, Action2, current_status, i):
if(days[i-1] < 5):
days.append(days[i-1] + 1)
Action1.append(0)
Action2.append(0)
current_status.append(current_status[i-1])
else:
days.append(0)
Action1.append(current_status[i-1] * (-1))
Action2.append(0)
current_status.append(0)
return days, Action1, Action2, current_status
def Action_NZE_ZE(days, Action1, Action2, current_status, Predicted, i):
current_status.append(Predicted[i])
Action1.append(Predicted[i])
Action2.append(0)
days.append(days[i-1] + 1)
return days, Action1, Action2, current_status
def Action_NZE_NZE(days, Action1, Action2, current_status, Predicted, i):
current_status.append(Predicted[i])
Action1.append(Predicted[i])
Action2.append(Predicted[i])
days.append(1)
return days, Action1, Action2, current_status
def get_df(df, Action1, Action2, days, current_status, Money, Shares):
df['Action1'] = Action1
df['Action2'] = Action2
df['days'] = days
df['current_status'] = current_status
df['Money'] = Money
df['Shares'] = Shares
return df
def Get_TradeSignal(Predicted, days, Action1, Action2, current_status):
# Loop over 1 to N
for i in range(1, len(Predicted)):
# When model predicts no action..
if(Predicted[i] == 0):
if(current_status[i-1] != 0):
days, Action1, Action2, current_status = Action_ZE_NZE(days, Action1, Action2, current_status, i)
else:
days, Action1, Action2, current_status = Action_SA_SA(days, Action1, Action2, current_status, i)
# When Model predicts sell
elif(Predicted[i] == -1):
if(current_status[i-1] == -1):
days, Action1, Action2, current_status = Action_SA_SA(days, Action1, Action2, current_status, i)
elif(current_status[i-1] == 0):
days, Action1, Action2, current_status = Action_NZE_ZE(days, Action1, Action2, current_status, Predicted,
i)
else:
days, Action1, Action2, current_status = Action_NZE_NZE(days, Action1, Action2, current_status, Predicted,
i)
# When model predicts Buy
elif(Predicted[i] == 1):
if(current_status[i-1] == 1):
days, Action1, Action2, current_status = Action_SA_SA(days, Action1, Action2, current_status, i)
elif(current_status[i-1] == 0):
days, Action1, Action2, current_status = Action_NZE_ZE(days, Action1, Action2, current_status, Predicted,
i)
else:
days, Action1, Action2, current_status = Action_NZE_NZE(days, Action1, Action2, current_status, Predicted,
i)
return days, Action1, Action2, current_status
def Get_FinancialSignal(Open_price, Action1, Action2, Money, Shares, Close_price):
for i in range(1, len(Open_price)):
if(Action1[i] == 0):
Money.append(Money[i-1])
Shares.append(Shares[i-1])
else:
if(Action2[i] == 0):
# Enter new position
if(Shares[i-1] == 0):
Shares.append(Action1[i] * (Money[i-1]/Open_price[i]))
Money.append(Money[i-1] - Action1[i] * Money[i-1])
# Exit the current position
else:
Shares.append(0)
Money.append(Money[i-1] - Action1[i] * np.abs(Shares[i-1]) * Open_price[i])
else:
Money.append(Money[i-1] -1 *Action1[i] *np.abs(Shares[i-1]) * Open_price[i])
Shares.append(Action2[i] * (Money[i]/Open_price[i]))
Money[i] = Money[i] - 1 * Action2[i] * np.abs(Shares[i]) * Open_price[i]
return Money, Shares
def Get_TradeData(df):
# Initialize the variables
days,Action1,Action2,current_status,Predicted,Money,Shares,Open_price,Close_price = initialize(df)
# Get Buy/Sell trade signal
days, Action1, Action2, current_status = Get_TradeSignal(Predicted, days, Action1, Action2, current_status)
Money, Shares = Get_FinancialSignal(Open_price, Action1, Action2, Money, Shares, Close_price)
df = get_df(df, Action1, Action2, days, current_status, Money, Shares)
df['CurrentVal'] = df['Money'] + df['current_status'] * np.abs(df['Shares']) * df['Adj Close']
return df
def Print_Fromated_PL(active_days, number_of_trades, drawdown, annual_returns, std_dev, sharpe_ratio, year):
"""
Prints the metrics
"""
print("++++++++++++++++++++++++++++++++++++++++++++++++++++")
print(" Year: {0}".format(year))
print(" Number of Trades Executed: {0}".format(number_of_trades))
print("Number of days with Active Position: {}".format(active_days))
print(" Annual Return: {:.6f} %".format(annual_returns*100))
print(" Sharpe Ratio: {:.2f}".format(sharpe_ratio))
print(" Maximum Drawdown (Daily basis): {:.2f} %".format(drawdown*100))
print("----------------------------------------------------")
return
def Get_results_PL_metrics(df, Rf, year):
df['tmp'] = np.where(df['current_status'] == 0, 0, 1)
active_days = df['tmp'].sum()
number_of_trades = np.abs(df['Action1']).sum()+np.abs(df['Action2']).sum()
df['tmp_max'] = df['CurrentVal'].rolling(window=20).max()
df['tmp_min'] = df['CurrentVal'].rolling(window=20).min()
df['tmp'] = np.where(df['tmp_max'] > 0, (df['tmp_max'] - df['tmp_min'])/df['tmp_max'], 0)
drawdown = df['tmp'].max()
annual_returns = (df['CurrentVal'].iloc[-1]/100 - 1)
std_dev = df['CurrentVal'].pct_change(1).std()
sharpe_ratio = (annual_returns - Rf)/std_dev
Print_Fromated_PL(active_days, number_of_trades, drawdown, annual_returns, std_dev, sharpe_ratio, year)
return
```
```
# Change to Images directory
os.chdir("..")
os.chdir(str(os.getcwd()) + "\\Images")
```
## <span style="color:Maroon">Whole Dataset
```
df_whole_train = df_whole[df_whole["Sample"] == "Train"]
df_whole_test = df_whole[df_whole["Sample"] == "Test"]
df_whole_test_2019 = df_whole_test[df_whole_test.index.year == 2019]
df_whole_test_2020 = df_whole_test[df_whole_test.index.year == 2020]
output_train_whole = Get_TradeData(df_whole_train)
output_test_whole = Get_TradeData(df_whole_test)
output_test_whole_2019 = Get_TradeData(df_whole_test_2019)
output_test_whole_2020 = Get_TradeData(df_whole_test_2020)
output_train_whole["BuyandHold"] = (100 * output_train_whole["Adj Close"])/(output_train_whole.iloc[0]["Adj Close"])
output_test_whole["BuyandHold"] = (100*output_test_whole["Adj Close"])/(output_test_whole.iloc[0]["Adj Close"])
output_test_whole_2019["BuyandHold"] = (100 * output_test_whole_2019["Adj Close"])/(output_test_whole_2019.iloc[0]
["Adj Close"])
output_test_whole_2020["BuyandHold"] = (100 * output_test_whole_2020["Adj Close"])/(output_test_whole_2020.iloc[0]
["Adj Close"])
Get_results_PL_metrics(output_test_whole_2019, Rf, 2019)
Get_results_PL_metrics(output_test_whole_2020, Rf, 2020)
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_train_whole["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_train_whole["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Train Sample "+ str(index) + " RF Whole Dataset", fontsize=16)
plt.savefig("Train Sample Whole Dataset RF Model" + str(index) +'.png')
plt.show()
plt.close()
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_test_whole["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_test_whole["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Test Sample "+ str(index) + " RF Whole Dataset", fontsize=16)
plt.savefig("Test Sample Whole Dataset RF Model" + str(index) +'.png')
plt.show()
plt.close()
```
__Comments:__ <span style="color:Blue"> Based on the performance of model on Train Sample, the model has definitely learnt the patter, instead of over-fitting. But the performance of model in Test Sample is very poor
## <span style="color:Maroon">Segment Model
```
df_model_train = df_model[df_model["Sample"] == "Train"]
df_model_test = df_model[df_model["Sample"] == "Test"]
df_model_test_2019 = df_model_test[df_model_test.index.year == 2019]
df_model_test_2020 = df_model_test[df_model_test.index.year == 2020]
output_train_model = Get_TradeData(df_model_train)
output_test_model = Get_TradeData(df_model_test)
output_test_model_2019 = Get_TradeData(df_model_test_2019)
output_test_model_2020 = Get_TradeData(df_model_test_2020)
output_train_model["BuyandHold"] = (100 * output_train_model["Adj Close"])/(output_train_model.iloc[0]["Adj Close"])
output_test_model["BuyandHold"] = (100 * output_test_model["Adj Close"])/(output_test_model.iloc[0]["Adj Close"])
output_test_model_2019["BuyandHold"] = (100 * output_test_model_2019["Adj Close"])/(output_test_model_2019.iloc[0]
["Adj Close"])
output_test_model_2020["BuyandHold"] = (100 * output_test_model_2020["Adj Close"])/(output_test_model_2020.iloc[0]
["Adj Close"])
Get_results_PL_metrics(output_test_model_2019, Rf, 2019)
Get_results_PL_metrics(output_test_model_2020, Rf, 2020)
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_train_model["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_train_model["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Train Sample Hurst Segment RF Models "+ str(index), fontsize=16)
plt.savefig("Train Sample Hurst Segment RF Models" + str(index) +'.png')
plt.show()
plt.close()
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_test_model["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_test_model["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Test Sample Hurst Segment RF Models" + str(index), fontsize=16)
plt.savefig("Test Sample Hurst Segment RF Models" + str(index) +'.png')
plt.show()
plt.close()
```
__Comments:__ <span style="color:Blue"> Based on the performance of model on Train Sample, the model has definitely learnt the patter, instead of over-fitting. The model does perform well in Test sample (Not compared to Buy and Hold strategy) compared to single model. Hurst Exponent based segmentation has definately added value to the model
|
github_jupyter
|
# Import required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
np.random.seed(0)
import warnings
warnings.filterwarnings('ignore')
# User defined names
index = "S&P500"
filename_whole = "whole_dataset"+index+"_rf_model.csv"
filename_trending = "Trending_dataset"+index+"_rf_model.csv"
filename_meanreverting = "MeanReverting_dataset"+index+"_rf_model.csv"
date_col = "Date"
Rf = 0.01 #Risk free rate of return
# Get current working directory
mycwd = os.getcwd()
print(mycwd)
# Change to data directory
os.chdir("..")
os.chdir(str(os.getcwd()) + "\\Data")
# Read the datasets
df_whole = pd.read_csv(filename_whole, index_col=date_col)
df_trending = pd.read_csv(filename_trending, index_col=date_col)
df_meanreverting = pd.read_csv(filename_meanreverting, index_col=date_col)
# Convert index to datetime
df_whole.index = pd.to_datetime(df_whole.index)
df_trending.index = pd.to_datetime(df_trending.index)
df_meanreverting.index = pd.to_datetime(df_meanreverting.index)
# Head for whole dataset
df_whole.head()
df_whole.shape
# Head for Trending dataset
df_trending.head()
df_trending.shape
# Head for Mean Reverting dataset
df_meanreverting.head()
df_meanreverting.shape
# Merge results from both models to one
df_model = df_trending.append(df_meanreverting)
df_model.sort_index(inplace=True)
df_model.head()
df_model.shape
def initialize(df):
days, Action1, Action2, current_status, Money, Shares = ([] for i in range(6))
Open_price = list(df['Open'])
Close_price = list(df['Adj Close'])
Predicted = list(df['Predicted'])
Action1.append(Predicted[0])
Action2.append(0)
current_status.append(Predicted[0])
if(Predicted[0] != 0):
days.append(1)
if(Predicted[0] == 1):
Money.append(0)
else:
Money.append(200)
Shares.append(Predicted[0] * (100/Open_price[0]))
else:
days.append(0)
Money.append(100)
Shares.append(0)
return days, Action1, Action2, current_status, Predicted, Money, Shares, Open_price, Close_price
def Action_SA_SA(days, Action1, Action2, current_status, i):
if(current_status[i-1] != 0):
days.append(1)
else:
days.append(0)
current_status.append(current_status[i-1])
Action1.append(0)
Action2.append(0)
return days, Action1, Action2, current_status
def Action_ZE_NZE(days, Action1, Action2, current_status, i):
if(days[i-1] < 5):
days.append(days[i-1] + 1)
Action1.append(0)
Action2.append(0)
current_status.append(current_status[i-1])
else:
days.append(0)
Action1.append(current_status[i-1] * (-1))
Action2.append(0)
current_status.append(0)
return days, Action1, Action2, current_status
def Action_NZE_ZE(days, Action1, Action2, current_status, Predicted, i):
current_status.append(Predicted[i])
Action1.append(Predicted[i])
Action2.append(0)
days.append(days[i-1] + 1)
return days, Action1, Action2, current_status
def Action_NZE_NZE(days, Action1, Action2, current_status, Predicted, i):
current_status.append(Predicted[i])
Action1.append(Predicted[i])
Action2.append(Predicted[i])
days.append(1)
return days, Action1, Action2, current_status
def get_df(df, Action1, Action2, days, current_status, Money, Shares):
df['Action1'] = Action1
df['Action2'] = Action2
df['days'] = days
df['current_status'] = current_status
df['Money'] = Money
df['Shares'] = Shares
return df
def Get_TradeSignal(Predicted, days, Action1, Action2, current_status):
# Loop over 1 to N
for i in range(1, len(Predicted)):
# When model predicts no action..
if(Predicted[i] == 0):
if(current_status[i-1] != 0):
days, Action1, Action2, current_status = Action_ZE_NZE(days, Action1, Action2, current_status, i)
else:
days, Action1, Action2, current_status = Action_SA_SA(days, Action1, Action2, current_status, i)
# When Model predicts sell
elif(Predicted[i] == -1):
if(current_status[i-1] == -1):
days, Action1, Action2, current_status = Action_SA_SA(days, Action1, Action2, current_status, i)
elif(current_status[i-1] == 0):
days, Action1, Action2, current_status = Action_NZE_ZE(days, Action1, Action2, current_status, Predicted,
i)
else:
days, Action1, Action2, current_status = Action_NZE_NZE(days, Action1, Action2, current_status, Predicted,
i)
# When model predicts Buy
elif(Predicted[i] == 1):
if(current_status[i-1] == 1):
days, Action1, Action2, current_status = Action_SA_SA(days, Action1, Action2, current_status, i)
elif(current_status[i-1] == 0):
days, Action1, Action2, current_status = Action_NZE_ZE(days, Action1, Action2, current_status, Predicted,
i)
else:
days, Action1, Action2, current_status = Action_NZE_NZE(days, Action1, Action2, current_status, Predicted,
i)
return days, Action1, Action2, current_status
def Get_FinancialSignal(Open_price, Action1, Action2, Money, Shares, Close_price):
for i in range(1, len(Open_price)):
if(Action1[i] == 0):
Money.append(Money[i-1])
Shares.append(Shares[i-1])
else:
if(Action2[i] == 0):
# Enter new position
if(Shares[i-1] == 0):
Shares.append(Action1[i] * (Money[i-1]/Open_price[i]))
Money.append(Money[i-1] - Action1[i] * Money[i-1])
# Exit the current position
else:
Shares.append(0)
Money.append(Money[i-1] - Action1[i] * np.abs(Shares[i-1]) * Open_price[i])
else:
Money.append(Money[i-1] -1 *Action1[i] *np.abs(Shares[i-1]) * Open_price[i])
Shares.append(Action2[i] * (Money[i]/Open_price[i]))
Money[i] = Money[i] - 1 * Action2[i] * np.abs(Shares[i]) * Open_price[i]
return Money, Shares
def Get_TradeData(df):
# Initialize the variables
days,Action1,Action2,current_status,Predicted,Money,Shares,Open_price,Close_price = initialize(df)
# Get Buy/Sell trade signal
days, Action1, Action2, current_status = Get_TradeSignal(Predicted, days, Action1, Action2, current_status)
Money, Shares = Get_FinancialSignal(Open_price, Action1, Action2, Money, Shares, Close_price)
df = get_df(df, Action1, Action2, days, current_status, Money, Shares)
df['CurrentVal'] = df['Money'] + df['current_status'] * np.abs(df['Shares']) * df['Adj Close']
return df
def Print_Fromated_PL(active_days, number_of_trades, drawdown, annual_returns, std_dev, sharpe_ratio, year):
"""
Prints the metrics
"""
print("++++++++++++++++++++++++++++++++++++++++++++++++++++")
print(" Year: {0}".format(year))
print(" Number of Trades Executed: {0}".format(number_of_trades))
print("Number of days with Active Position: {}".format(active_days))
print(" Annual Return: {:.6f} %".format(annual_returns*100))
print(" Sharpe Ratio: {:.2f}".format(sharpe_ratio))
print(" Maximum Drawdown (Daily basis): {:.2f} %".format(drawdown*100))
print("----------------------------------------------------")
return
def Get_results_PL_metrics(df, Rf, year):
df['tmp'] = np.where(df['current_status'] == 0, 0, 1)
active_days = df['tmp'].sum()
number_of_trades = np.abs(df['Action1']).sum()+np.abs(df['Action2']).sum()
df['tmp_max'] = df['CurrentVal'].rolling(window=20).max()
df['tmp_min'] = df['CurrentVal'].rolling(window=20).min()
df['tmp'] = np.where(df['tmp_max'] > 0, (df['tmp_max'] - df['tmp_min'])/df['tmp_max'], 0)
drawdown = df['tmp'].max()
annual_returns = (df['CurrentVal'].iloc[-1]/100 - 1)
std_dev = df['CurrentVal'].pct_change(1).std()
sharpe_ratio = (annual_returns - Rf)/std_dev
Print_Fromated_PL(active_days, number_of_trades, drawdown, annual_returns, std_dev, sharpe_ratio, year)
return
# Change to Images directory
os.chdir("..")
os.chdir(str(os.getcwd()) + "\\Images")
df_whole_train = df_whole[df_whole["Sample"] == "Train"]
df_whole_test = df_whole[df_whole["Sample"] == "Test"]
df_whole_test_2019 = df_whole_test[df_whole_test.index.year == 2019]
df_whole_test_2020 = df_whole_test[df_whole_test.index.year == 2020]
output_train_whole = Get_TradeData(df_whole_train)
output_test_whole = Get_TradeData(df_whole_test)
output_test_whole_2019 = Get_TradeData(df_whole_test_2019)
output_test_whole_2020 = Get_TradeData(df_whole_test_2020)
output_train_whole["BuyandHold"] = (100 * output_train_whole["Adj Close"])/(output_train_whole.iloc[0]["Adj Close"])
output_test_whole["BuyandHold"] = (100*output_test_whole["Adj Close"])/(output_test_whole.iloc[0]["Adj Close"])
output_test_whole_2019["BuyandHold"] = (100 * output_test_whole_2019["Adj Close"])/(output_test_whole_2019.iloc[0]
["Adj Close"])
output_test_whole_2020["BuyandHold"] = (100 * output_test_whole_2020["Adj Close"])/(output_test_whole_2020.iloc[0]
["Adj Close"])
Get_results_PL_metrics(output_test_whole_2019, Rf, 2019)
Get_results_PL_metrics(output_test_whole_2020, Rf, 2020)
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_train_whole["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_train_whole["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Train Sample "+ str(index) + " RF Whole Dataset", fontsize=16)
plt.savefig("Train Sample Whole Dataset RF Model" + str(index) +'.png')
plt.show()
plt.close()
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_test_whole["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_test_whole["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Test Sample "+ str(index) + " RF Whole Dataset", fontsize=16)
plt.savefig("Test Sample Whole Dataset RF Model" + str(index) +'.png')
plt.show()
plt.close()
df_model_train = df_model[df_model["Sample"] == "Train"]
df_model_test = df_model[df_model["Sample"] == "Test"]
df_model_test_2019 = df_model_test[df_model_test.index.year == 2019]
df_model_test_2020 = df_model_test[df_model_test.index.year == 2020]
output_train_model = Get_TradeData(df_model_train)
output_test_model = Get_TradeData(df_model_test)
output_test_model_2019 = Get_TradeData(df_model_test_2019)
output_test_model_2020 = Get_TradeData(df_model_test_2020)
output_train_model["BuyandHold"] = (100 * output_train_model["Adj Close"])/(output_train_model.iloc[0]["Adj Close"])
output_test_model["BuyandHold"] = (100 * output_test_model["Adj Close"])/(output_test_model.iloc[0]["Adj Close"])
output_test_model_2019["BuyandHold"] = (100 * output_test_model_2019["Adj Close"])/(output_test_model_2019.iloc[0]
["Adj Close"])
output_test_model_2020["BuyandHold"] = (100 * output_test_model_2020["Adj Close"])/(output_test_model_2020.iloc[0]
["Adj Close"])
Get_results_PL_metrics(output_test_model_2019, Rf, 2019)
Get_results_PL_metrics(output_test_model_2020, Rf, 2020)
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_train_model["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_train_model["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Train Sample Hurst Segment RF Models "+ str(index), fontsize=16)
plt.savefig("Train Sample Hurst Segment RF Models" + str(index) +'.png')
plt.show()
plt.close()
# Scatter plot to save fig
plt.figure(figsize=(10,5))
plt.plot(output_test_model["CurrentVal"], 'b-', label="Value (Model)")
plt.plot(output_test_model["BuyandHold"], 'r--', alpha=0.5, label="Buy and Hold")
plt.xlabel("Date", fontsize=12)
plt.ylabel("Value", fontsize=12)
plt.legend()
plt.title("Test Sample Hurst Segment RF Models" + str(index), fontsize=16)
plt.savefig("Test Sample Hurst Segment RF Models" + str(index) +'.png')
plt.show()
plt.close()
| 0.120168 | 0.710729 |
# Titanic Survival
Titanic EDA, visualizations, with SkLearn Pipelines for feature engineering, training - evaluation, and inference.
## Challenge Description
On April 15, 1912, during her maiden voyage, the widely considered “unsinkable” RMS Titanic sank after colliding with an iceberg. Unfortunately, there weren’t enough lifeboats for everyone onboard, resulting in the death of 1502 out of 2224 passengers and crew.
While there was some element of luck involved in surviving, it seems some groups of people were more likely to survive than others.
In this challenge, we ask you to build a predictive model that answers the question: “what sorts of people were more likely to survive?” using passenger data (ie name, age, gender, socio-economic class, etc).
### Data Dictionary
|**Variable**|**Definition**|**Values**|**Null Count**|
|-|-|-|-|
|PassengerId|Integer - unique id|892-1309|0|
|Survived|Integer - target|0=No, 1=Yes|0|
|Pclass|Integer - ticket class|1=1st, 2=2nd, 3=3rd|0|
|Name|String - passenger name|418 unique values|0|
|Sex|String - passenger sex|male 64%, female, 36%|0|
|Age|Float - passenger age|0.17 - 76|177|
|SibSp|Integer - # of siblings/spouses aboard|0-8|0|
|Parch|Integer - # of parents/children aboard|0-9|0|
|Ticket|String - ticket number|409 unique values|0|
|Fare|Float - price of ticket|0 - 512|0|
|Cabin|String - cabin number|89 unique values|687|
|Embarked|Enum - port of embarkation|C=Cherbourg, Q=Queenstown, S=Southampton|2|
**Train Record Count:** 891
**Test Record Count:** 418
**Variable Notes**
**pclass:** A proxy for socio-economic status (SES)
* 1st = Upper
* 2nd = Middle
* 3rd = Lower
**Age:**
* Age is fractional if less than 1
* If the age is estimated, is it in the form of xx.5
**sibsp:**
The dataset defines family relations in this way:
* Sibling = brother, sister, stepbrother, stepsister
* Spouse = husband, wife (mistresses and fiancés were ignored)
**parch:**
The dataset defines family relations in this way...
* Parent = mother, father
* Child = daughter, son, stepdaughter, stepson
* Some children travelled only with a nanny, therefore parch=0 for them.
### Imports and Environment Settings
```
import boto3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
import pickle
import io
import re
from sagemaker import get_execution_role
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV, KFold
from sklearn.preprocessing import Normalizer, OneHotEncoder, StandardScaler, OrdinalEncoder
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from io import StringIO
role = get_execution_role()
warnings.filterwarnings("ignore")
pd.options.display.max_rows = 10
pd.options.display.max_columns = 20
print("numpy version: {}".format(np.__version__))
print("pandas version: {}".format(pd.__version__))
print("seaborn version: {}\n".format(sns.__version__))
sns.set_style("whitegrid")
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
sns.set_palette(flatui)
```
## Load the Data
```
# Read the CSV
BUCKET = 'bf-titanic-data'
MODEL_BUCKET = 'bf-titanic-model'
train_key = 'train.csv'
test_key = 'test.csv'
train_path = 's3://{}/{}'.format(BUCKET, train_key)
test_path = 's3://{}/{}'.format(BUCKET, test_key)
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)
# Keep a clean copy of train
clean_copy = train_data.copy()
target_col='Survived'
```
## Basic Train Set Exploration
```
print('Train data shape: {}'.format(train_data.shape))
print('Test data shape: {}'.format(test_data.shape))
train_data.head()
train_data.info()
train_data.describe()
```
### Target Distribution
```
target_value_counts=train_data.Survived.value_counts()
print(target_value_counts)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
clrs = ['#e74c3c', '#2ecc71']
fig = sns.barplot(x=target_value_counts.index,
y=target_value_counts,
capsize=.3,
palette=clrs)
plt.xlabel('0=No or 1=Yes')
plt.ylabel('Number of Passengers')
plt.title('Distribution of Test Data')
plt.show(fig)
```
## Exploratory Data Analysis
### Survival By Passenger Fare
```
plt.figure(figsize=(12,8))
sns.distplot(train_data[train_data.Survived == 0]['Fare'],
bins=10, color='#e74c3c')
sns.distplot(train_data[train_data.Survived == 1]['Fare'],
bins=10, color='#2ecc71')
plt.title('Fares by Survival', fontsize=20)
plt.xlabel('Fare')
plt.ylabel('Survival')
plt.show()
```
### Survival by Passenger Sex
```
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Sex",
y="Survived",
data=train_data)
plt.xlabel('Passenger Sex')
plt.ylabel('Survived')
plt.title('Survival by Sex')
plt.show(fig)
```
### Survival by Pclass
```
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Pclass",
y="Survived",
data=train_data)
plt.xlabel('Passenger Class')
plt.title('Survival by Passenger Class')
plt.show(fig)
```
## Feature Engineering
### Title Feature
#### Create Title Feature
```
train_data['Title'] = train_data.Name.apply(lambda x: re.search(' ([A-Z][a-z]+)\.', x).group(1))
title_dict = {
'Capt': 'Officer',
'Col': 'Officer',
'Major': 'Officer',
'Dr': 'Professional',
'Rev': 'Professional',
'Jonkheer': 'Elite',
'Don': 'Elite',
'Sir' : 'Elite',
'the Countess':'Elite',
'Dona': 'Elite',
'Lady' : 'Elite',
'Mme': 'Elite',
"Ms": 'Mrs',
"Mrs" : 'Mrs',
"Mlle": 'Miss',
"Miss" : 'Miss',
"Mr" : 'Mr',
"Master" : 'Master'
}
train_data['Title'] = train_data.Title.map(title_dict)
train_data['Title'] = train_data.Title.fillna('Unknown')
```
#### Distribution of Titles
```
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
fig = sns.countplot(x='Title',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Passenger Title')
plt.ylabel('Number of Passengers')
plt.title('Title of Passengers')
plt.show(fig)
```
#### Survival by Title
```
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Title",
y="Survived",
data=train_data)
plt.xlabel('Passenger Title')
plt.ylabel('Survived')
plt.title('Survival of Titles')
plt.show(fig)
```
### Family Size
#### Create a Family Size Feature
```
train_data['FamilySize'] = train_data['SibSp'] + train_data['Parch'] + 1
```
#### Distribution of Family Size
```
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='FamilySize',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Family Size')
plt.ylabel('Number of Passengers')
plt.title('Family Sizes')
plt.show(fig)
```
#### Survival By Family Size
```
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="FamilySize",
y="Survived",
data=train_data)
plt.xlabel('Family Size')
plt.ylabel('Survived')
plt.title('Survival of Family Size')
plt.show(fig)
```
### Fare Feature
#### Create Fare Category
```
#Filling missing fares
train_data.Fare = train_data.Fare.fillna(-0.1)
fare_intervals = (-1, 0, 8, 15, 31, 600)
fare_labels = ['None', 'Group1', 'Group2', 'Group3', 'Group4']
train_data["FareCat"] = pd.cut(train_data.Fare, fare_intervals, labels=fare_labels)
```
#### Fare Category Distribution
```
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='FareCat',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Fare Category')
plt.title('Fare Catefory Distribution')
plt.show(fig)
```
#### Survival by Fare Category
```
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="FareCat",
y="Survived",
data=train_data)
plt.xlabel('Fare Category')
plt.title('Survival by Fare Category')
plt.show(fig)
```
### Age Feature
```
# Impute missing ages by sex, class, and title.
train_data.loc[train_data.Age.isnull(), 'Age'] = train_data.groupby(['Sex','Pclass','Title']).Age.transform('median')
print('Null count: {}'.format(train_data['Age'].isnull().sum()))
```
#### Age Distributuion Density
```
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.distplot(train_data['Age'],
bins=20)
plt.xlabel('Age')
plt.title('Distribution of Age')
plt.show(fig)
```
#### Age Density by Survival
```
plt.figure(figsize=(16,8))
g = sns.FacetGrid(train_data, col='Survived',size=7)
g = g.map(sns.distplot, "Age")
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="FamilySize",
y="Survived",
data=train_data)
plt.xlabel('Family Size')
plt.ylabel('Survived')
plt.title('Survival of Family Size')
plt.show(fig)
```
#### Bin The Ages
```
train_data['AgeGroup'] = None
train_data.loc[((train_data['Sex'] == 'male') & (train_data['Age'] <= 15)), 'AgeGroup'] = 'Boy'
train_data.loc[((train_data['Sex'] == 'female') & (train_data['Age'] <= 15)), 'AgeGroup'] = 'Girl'
train_data.loc[((train_data['Sex'] == 'male') & (train_data['Age'] > 15)), 'AgeGroup'] = 'AdultMale'
train_data.loc[((train_data['Sex'] == 'female') & (train_data['Age'] > 15)), 'AgeGroup'] = 'AdultFemale'
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='AgeGroup',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Age Group')
plt.ylabel('Number of Passengers')
plt.title('Distribution of Age Groups')
plt.show(fig)
```
#### Survival Rate for Age Groups
```
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="AgeGroup",
y="Survived",
data=train_data)
plt.xlabel('Age Group')
plt.ylabel('Survived')
plt.title('Survival of Age Groups')
plt.show(fig)
```
### Cabin Feature
#### Create Deck Feature
```
train_data['Deck'] = train_data['Cabin'].apply(lambda s: s[0] if pd.notnull(s) else 'M')
# Passengers in T deck changed to A deck
idx = train_data[train_data['Deck'] == 'T'].index
train_data.loc[idx, 'Deck'] = 'A'
# Group decks
train_data['Deck'] = train_data['Deck'].replace(['A', 'B', 'C'], 'ABC')
train_data['Deck'] = train_data['Deck'].replace(['D', 'E'], 'DE')
train_data['Deck'] = train_data['Deck'].replace(['F', 'G'], 'FG')
```
#### Passenger Distribution by Deck
```
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='Deck',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Deck')
plt.title('Distribution of Passengers by Deck')
plt.show(fig)
```
#### Survival by Deck
```
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Deck",
y="Survived",
data=train_data)
plt.xlabel('Deck')
plt.title('Survival by Deck')
plt.show(fig)
```
## Preprocessing
### Feature Engineering Method
```
# Revert to clean copy
train_data = clean_copy.copy()
def get_title(name):
title_search = re.search(r'([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ''
def replace_titles(title):
if title in ['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona']:
return 'Rare'
elif title in ['Countess', 'Mme']:
return 'Mrs'
elif title in ['Mlle', 'Ms']:
return 'Miss'
elif title =='Dr':
if x['Sex']=='Male':
return 'Mr'
else:
return 'Mrs'
else:
return title
train_data['Parch'].value_counts()
test_data['Parch'].value_counts()
def feature_eng(X):
X['Title'] = X['Name'].apply(get_title)
X['Title'] = X['Title'].fillna('Miss')
X['Title'] = X['Title'].apply(replace_titles)
X.loc[X.Age.isnull(), 'Age'] = X.groupby(['Sex','Pclass','Title']).Age.transform('median')
X['Pclass'] = X['Pclass'].apply(lambda x: 'first' if x==1 else 'second' if x==2 else 'third')
binner = KBinsDiscretizer(encode='ordinal')
binner.fit(X[['Age']])
X['AgeBins'] = binner.transform(X[['Age']])
X['FamilySize'] = X['SibSp'] + X['Parch'] + 1
family_map = {1: 'Alone', 2: 'Small', 3: 'Small', 4: 'Small',
5: 'Large', 6: 'Large', 7: 'Large', 8: 'Large', 11: 'Large'}
X['GroupSize'] = X['FamilySize'].map(family_map)
X['WithFamily'] = (X['FamilySize']>1)
X['WithFamily'] = X['WithFamily'].apply(lambda x: 'yes' if x==1 else 'no')
X.loc[(X.Fare.isnull()), 'Fare'] = X.Fare.median()
X.loc[(X.Fare==0), 'Fare'] = X.Fare.median()
binner.fit(X[['Fare']])
X['FareBins'] = binner.transform(X[['Fare']])
X["Deck"] = X["Cabin"].str.slice(0,1)
X["Deck"] = X["Deck"].fillna("N")
idx = X[X['Deck'] == 'T'].index
X.loc[idx, 'Deck'] = 'A'
X['Embarked'].fillna(X['Embarked'].mode()[0], inplace=True)
X.drop('PassengerId', axis=1, inplace=True)
X.drop('Ticket', axis=1, inplace=True)
X.drop('Name', axis=1, inplace=True)
return X
# Test feature eng method
train_data_proc = feature_eng(train_data)
train_data_proc.head()
```
### Train-Validation Split
```
# Constants
SEED = 37
TRAIN_SPLIT = .15
scores = ['precision', 'recall']
train_data = clean_copy.copy()
# Split off some testing data
strat_split = StratifiedShuffleSplit(n_splits=1,
test_size=TRAIN_SPLIT,
random_state=SEED)
for train_index, val_index in strat_split.split(train_data, train_data[['Sex', 'Pclass']]):
X_train = train_data.loc[train_index]
X_val = train_data.loc[val_index]
X_test = test_data.copy()
y_train = X_train['Survived'].copy()
y_val = X_val['Survived'].copy()
X_train.drop('Survived', axis=1, inplace=True)
X_val.drop('Survived', axis=1, inplace=True)
print('X_train shape: {}'.format(X_train.shape))
print('X_val shape: {}'.format(X_val.shape))
print('{} training records'.format(len(X_train)))
print('{} training labels'.format(len(y_train)))
print('{} validation records'.format(len(X_val)))
print('{} validation labels'.format(len(y_val)))
X_train['dataset'] = 1
X_val['dataset'] = 2
X_test['dataset'] = 3
combined = pd.concat([X_train, X_val, X_test])
combined.reset_index(drop=True, inplace=True)
# call feature engineering method
X_eng = feature_eng(combined)
print('combined shape: {}'.format(X_eng.shape))
X_train_eng = X_eng[X_eng['dataset'] == 1]
X_val_eng = X_eng[X_eng['dataset'] == 2]
X_test_eng = X_eng[X_eng['dataset'] == 3]
X_train_eng.drop('dataset', axis=1, inplace=True)
X_val_eng.drop('dataset', axis=1, inplace=True)
X_test_eng.drop('dataset', axis=1, inplace=True)
X_train_eng.head()
```
### Build Feature Processing Pipeline
```
eng_feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked',
'Title', 'AgeBins', 'FamilySize', 'GroupSize', 'WithFamily',
'FareBins', 'Deck']
numeric_features = ['Age', 'SibSp', 'Parch', 'Fare', 'FamilySize']
numeric_transformer = Pipeline([
('scaler', StandardScaler())
])
categorical_features = ['Pclass', 'Sex', 'Embarked', 'Title', 'Deck', 'GroupSize']
categorical_transformer = Pipeline([
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
])
```
## Build and Evaluate a Logistic Regression Classifier
```
lr_params = [{'C': [1, 3, 5]}]
for score in scores:
print('Tuning hyper-parameters for %s \n' % score)
print ('Creating pipeline instance.')
train_pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', GridSearchCV(LogisticRegression(random_state=SEED),
lr_params,
scoring='%s_macro' % score,
verbose=10,
n_jobs=-1,
cv=8))])
print('Fitting the model.')
train_pipeline.fit(X_train_eng, y_train)
print('Tuning hyper-parameters for %s \n' % score)
print('Best parameters set found on development set: \n')
print(train_pipeline.named_steps['classifier'].best_params_, '\n')
print("Grid scores on development set:\n")
means = train_pipeline.named_steps['classifier'].cv_results_['mean_test_score']
stds = train_pipeline.named_steps['classifier'].cv_results_['std_test_score']
for mean, std, params in zip(means, stds, train_pipeline.named_steps['classifier'].cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print('Detailed classification report:\n')
print('The model is trained on the full development set.')
print('The scores are computed on the full evaluation set.\n')
y_true, y_pred = y_val, train_pipeline.predict(X_val_eng)
print(classification_report(y_val, y_pred), '\n')
```
### Logistic Regression Confusion Matrix
```
cm = confusion_matrix(y_val, y_pred)
fig = plt.figure(figsize = (10,7))
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix')
fig.colorbar(cax)
labels = ['Dead', 'Alive']
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
```
## Retrain and Predict
```
train_data = pd.concat([X_train_eng, X_val_eng])
y_train = pd.concat([y_train, y_val])
train_pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', LogisticRegression(random_state=SEED, C=3))
])
print('Fitting the model.')
train_pipeline.fit(train_data, y_train)
test_ids = test_data['PassengerId'].copy()
final_preds = pd.concat([test_ids, pd.Series(y_pred_test)], axis=1)
```
#### Save Predictions and Trained Model
```
# Save final predicions
csv_buffer = StringIO()
final_preds.to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(MODEL_BUCKET, 'v1/prediction/finl_preds.csv').put(Body=csv_buffer.getvalue())
# Save trained model
s3_resource = boto3.resource('s3')
trained_model = pickle.dumps(voting_clf)
s3_resource.Object(MODEL_BUCKET, 'v1/model/trained_model.pickle').put(Body=trained_model)
```
|
github_jupyter
|
import boto3
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import warnings
import pickle
import io
import re
from sagemaker import get_execution_role
from sklearn.compose import ColumnTransformer
from sklearn.model_selection import StratifiedShuffleSplit, GridSearchCV, KFold
from sklearn.preprocessing import Normalizer, OneHotEncoder, StandardScaler, OrdinalEncoder
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from io import StringIO
role = get_execution_role()
warnings.filterwarnings("ignore")
pd.options.display.max_rows = 10
pd.options.display.max_columns = 20
print("numpy version: {}".format(np.__version__))
print("pandas version: {}".format(pd.__version__))
print("seaborn version: {}\n".format(sns.__version__))
sns.set_style("whitegrid")
flatui = ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
sns.set_palette(flatui)
# Read the CSV
BUCKET = 'bf-titanic-data'
MODEL_BUCKET = 'bf-titanic-model'
train_key = 'train.csv'
test_key = 'test.csv'
train_path = 's3://{}/{}'.format(BUCKET, train_key)
test_path = 's3://{}/{}'.format(BUCKET, test_key)
train_data = pd.read_csv(train_path)
test_data = pd.read_csv(test_path)
# Keep a clean copy of train
clean_copy = train_data.copy()
target_col='Survived'
print('Train data shape: {}'.format(train_data.shape))
print('Test data shape: {}'.format(test_data.shape))
train_data.head()
train_data.info()
train_data.describe()
target_value_counts=train_data.Survived.value_counts()
print(target_value_counts)
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
clrs = ['#e74c3c', '#2ecc71']
fig = sns.barplot(x=target_value_counts.index,
y=target_value_counts,
capsize=.3,
palette=clrs)
plt.xlabel('0=No or 1=Yes')
plt.ylabel('Number of Passengers')
plt.title('Distribution of Test Data')
plt.show(fig)
plt.figure(figsize=(12,8))
sns.distplot(train_data[train_data.Survived == 0]['Fare'],
bins=10, color='#e74c3c')
sns.distplot(train_data[train_data.Survived == 1]['Fare'],
bins=10, color='#2ecc71')
plt.title('Fares by Survival', fontsize=20)
plt.xlabel('Fare')
plt.ylabel('Survival')
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Sex",
y="Survived",
data=train_data)
plt.xlabel('Passenger Sex')
plt.ylabel('Survived')
plt.title('Survival by Sex')
plt.show(fig)
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Pclass",
y="Survived",
data=train_data)
plt.xlabel('Passenger Class')
plt.title('Survival by Passenger Class')
plt.show(fig)
train_data['Title'] = train_data.Name.apply(lambda x: re.search(' ([A-Z][a-z]+)\.', x).group(1))
title_dict = {
'Capt': 'Officer',
'Col': 'Officer',
'Major': 'Officer',
'Dr': 'Professional',
'Rev': 'Professional',
'Jonkheer': 'Elite',
'Don': 'Elite',
'Sir' : 'Elite',
'the Countess':'Elite',
'Dona': 'Elite',
'Lady' : 'Elite',
'Mme': 'Elite',
"Ms": 'Mrs',
"Mrs" : 'Mrs',
"Mlle": 'Miss',
"Miss" : 'Miss',
"Mr" : 'Mr',
"Master" : 'Master'
}
train_data['Title'] = train_data.Title.map(title_dict)
train_data['Title'] = train_data.Title.fillna('Unknown')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
fig = sns.countplot(x='Title',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Passenger Title')
plt.ylabel('Number of Passengers')
plt.title('Title of Passengers')
plt.show(fig)
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Title",
y="Survived",
data=train_data)
plt.xlabel('Passenger Title')
plt.ylabel('Survived')
plt.title('Survival of Titles')
plt.show(fig)
train_data['FamilySize'] = train_data['SibSp'] + train_data['Parch'] + 1
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='FamilySize',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Family Size')
plt.ylabel('Number of Passengers')
plt.title('Family Sizes')
plt.show(fig)
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="FamilySize",
y="Survived",
data=train_data)
plt.xlabel('Family Size')
plt.ylabel('Survived')
plt.title('Survival of Family Size')
plt.show(fig)
#Filling missing fares
train_data.Fare = train_data.Fare.fillna(-0.1)
fare_intervals = (-1, 0, 8, 15, 31, 600)
fare_labels = ['None', 'Group1', 'Group2', 'Group3', 'Group4']
train_data["FareCat"] = pd.cut(train_data.Fare, fare_intervals, labels=fare_labels)
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='FareCat',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Fare Category')
plt.title('Fare Catefory Distribution')
plt.show(fig)
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="FareCat",
y="Survived",
data=train_data)
plt.xlabel('Fare Category')
plt.title('Survival by Fare Category')
plt.show(fig)
# Impute missing ages by sex, class, and title.
train_data.loc[train_data.Age.isnull(), 'Age'] = train_data.groupby(['Sex','Pclass','Title']).Age.transform('median')
print('Null count: {}'.format(train_data['Age'].isnull().sum()))
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.distplot(train_data['Age'],
bins=20)
plt.xlabel('Age')
plt.title('Distribution of Age')
plt.show(fig)
plt.figure(figsize=(16,8))
g = sns.FacetGrid(train_data, col='Survived',size=7)
g = g.map(sns.distplot, "Age")
plt.show()
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="FamilySize",
y="Survived",
data=train_data)
plt.xlabel('Family Size')
plt.ylabel('Survived')
plt.title('Survival of Family Size')
plt.show(fig)
train_data['AgeGroup'] = None
train_data.loc[((train_data['Sex'] == 'male') & (train_data['Age'] <= 15)), 'AgeGroup'] = 'Boy'
train_data.loc[((train_data['Sex'] == 'female') & (train_data['Age'] <= 15)), 'AgeGroup'] = 'Girl'
train_data.loc[((train_data['Sex'] == 'male') & (train_data['Age'] > 15)), 'AgeGroup'] = 'AdultMale'
train_data.loc[((train_data['Sex'] == 'female') & (train_data['Age'] > 15)), 'AgeGroup'] = 'AdultFemale'
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='AgeGroup',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Age Group')
plt.ylabel('Number of Passengers')
plt.title('Distribution of Age Groups')
plt.show(fig)
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="AgeGroup",
y="Survived",
data=train_data)
plt.xlabel('Age Group')
plt.ylabel('Survived')
plt.title('Survival of Age Groups')
plt.show(fig)
train_data['Deck'] = train_data['Cabin'].apply(lambda s: s[0] if pd.notnull(s) else 'M')
# Passengers in T deck changed to A deck
idx = train_data[train_data['Deck'] == 'T'].index
train_data.loc[idx, 'Deck'] = 'A'
# Group decks
train_data['Deck'] = train_data['Deck'].replace(['A', 'B', 'C'], 'ABC')
train_data['Deck'] = train_data['Deck'].replace(['D', 'E'], 'DE')
train_data['Deck'] = train_data['Deck'].replace(['F', 'G'], 'FG')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
sns.countplot(x='Deck',
capsize=.3,
palette=flatui,
data=train_data)
plt.xlabel('Deck')
plt.title('Distribution of Passengers by Deck')
plt.show(fig)
fig, ax = plt.subplots()
fig.set_size_inches(16, 8)
fig = sns.violinplot(x="Deck",
y="Survived",
data=train_data)
plt.xlabel('Deck')
plt.title('Survival by Deck')
plt.show(fig)
# Revert to clean copy
train_data = clean_copy.copy()
def get_title(name):
title_search = re.search(r'([A-Za-z]+)\.', name)
if title_search:
return title_search.group(1)
return ''
def replace_titles(title):
if title in ['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona']:
return 'Rare'
elif title in ['Countess', 'Mme']:
return 'Mrs'
elif title in ['Mlle', 'Ms']:
return 'Miss'
elif title =='Dr':
if x['Sex']=='Male':
return 'Mr'
else:
return 'Mrs'
else:
return title
train_data['Parch'].value_counts()
test_data['Parch'].value_counts()
def feature_eng(X):
X['Title'] = X['Name'].apply(get_title)
X['Title'] = X['Title'].fillna('Miss')
X['Title'] = X['Title'].apply(replace_titles)
X.loc[X.Age.isnull(), 'Age'] = X.groupby(['Sex','Pclass','Title']).Age.transform('median')
X['Pclass'] = X['Pclass'].apply(lambda x: 'first' if x==1 else 'second' if x==2 else 'third')
binner = KBinsDiscretizer(encode='ordinal')
binner.fit(X[['Age']])
X['AgeBins'] = binner.transform(X[['Age']])
X['FamilySize'] = X['SibSp'] + X['Parch'] + 1
family_map = {1: 'Alone', 2: 'Small', 3: 'Small', 4: 'Small',
5: 'Large', 6: 'Large', 7: 'Large', 8: 'Large', 11: 'Large'}
X['GroupSize'] = X['FamilySize'].map(family_map)
X['WithFamily'] = (X['FamilySize']>1)
X['WithFamily'] = X['WithFamily'].apply(lambda x: 'yes' if x==1 else 'no')
X.loc[(X.Fare.isnull()), 'Fare'] = X.Fare.median()
X.loc[(X.Fare==0), 'Fare'] = X.Fare.median()
binner.fit(X[['Fare']])
X['FareBins'] = binner.transform(X[['Fare']])
X["Deck"] = X["Cabin"].str.slice(0,1)
X["Deck"] = X["Deck"].fillna("N")
idx = X[X['Deck'] == 'T'].index
X.loc[idx, 'Deck'] = 'A'
X['Embarked'].fillna(X['Embarked'].mode()[0], inplace=True)
X.drop('PassengerId', axis=1, inplace=True)
X.drop('Ticket', axis=1, inplace=True)
X.drop('Name', axis=1, inplace=True)
return X
# Test feature eng method
train_data_proc = feature_eng(train_data)
train_data_proc.head()
# Constants
SEED = 37
TRAIN_SPLIT = .15
scores = ['precision', 'recall']
train_data = clean_copy.copy()
# Split off some testing data
strat_split = StratifiedShuffleSplit(n_splits=1,
test_size=TRAIN_SPLIT,
random_state=SEED)
for train_index, val_index in strat_split.split(train_data, train_data[['Sex', 'Pclass']]):
X_train = train_data.loc[train_index]
X_val = train_data.loc[val_index]
X_test = test_data.copy()
y_train = X_train['Survived'].copy()
y_val = X_val['Survived'].copy()
X_train.drop('Survived', axis=1, inplace=True)
X_val.drop('Survived', axis=1, inplace=True)
print('X_train shape: {}'.format(X_train.shape))
print('X_val shape: {}'.format(X_val.shape))
print('{} training records'.format(len(X_train)))
print('{} training labels'.format(len(y_train)))
print('{} validation records'.format(len(X_val)))
print('{} validation labels'.format(len(y_val)))
X_train['dataset'] = 1
X_val['dataset'] = 2
X_test['dataset'] = 3
combined = pd.concat([X_train, X_val, X_test])
combined.reset_index(drop=True, inplace=True)
# call feature engineering method
X_eng = feature_eng(combined)
print('combined shape: {}'.format(X_eng.shape))
X_train_eng = X_eng[X_eng['dataset'] == 1]
X_val_eng = X_eng[X_eng['dataset'] == 2]
X_test_eng = X_eng[X_eng['dataset'] == 3]
X_train_eng.drop('dataset', axis=1, inplace=True)
X_val_eng.drop('dataset', axis=1, inplace=True)
X_test_eng.drop('dataset', axis=1, inplace=True)
X_train_eng.head()
eng_feature_columns = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked',
'Title', 'AgeBins', 'FamilySize', 'GroupSize', 'WithFamily',
'FareBins', 'Deck']
numeric_features = ['Age', 'SibSp', 'Parch', 'Fare', 'FamilySize']
numeric_transformer = Pipeline([
('scaler', StandardScaler())
])
categorical_features = ['Pclass', 'Sex', 'Embarked', 'Title', 'Deck', 'GroupSize']
categorical_transformer = Pipeline([
('onehot', OneHotEncoder(handle_unknown='ignore'))
])
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
])
lr_params = [{'C': [1, 3, 5]}]
for score in scores:
print('Tuning hyper-parameters for %s \n' % score)
print ('Creating pipeline instance.')
train_pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', GridSearchCV(LogisticRegression(random_state=SEED),
lr_params,
scoring='%s_macro' % score,
verbose=10,
n_jobs=-1,
cv=8))])
print('Fitting the model.')
train_pipeline.fit(X_train_eng, y_train)
print('Tuning hyper-parameters for %s \n' % score)
print('Best parameters set found on development set: \n')
print(train_pipeline.named_steps['classifier'].best_params_, '\n')
print("Grid scores on development set:\n")
means = train_pipeline.named_steps['classifier'].cv_results_['mean_test_score']
stds = train_pipeline.named_steps['classifier'].cv_results_['std_test_score']
for mean, std, params in zip(means, stds, train_pipeline.named_steps['classifier'].cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print('Detailed classification report:\n')
print('The model is trained on the full development set.')
print('The scores are computed on the full evaluation set.\n')
y_true, y_pred = y_val, train_pipeline.predict(X_val_eng)
print(classification_report(y_val, y_pred), '\n')
cm = confusion_matrix(y_val, y_pred)
fig = plt.figure(figsize = (10,7))
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title('Confusion matrix')
fig.colorbar(cax)
labels = ['Dead', 'Alive']
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
train_data = pd.concat([X_train_eng, X_val_eng])
y_train = pd.concat([y_train, y_val])
train_pipeline = Pipeline([
('preprocessor', preprocessor),
('classifier', LogisticRegression(random_state=SEED, C=3))
])
print('Fitting the model.')
train_pipeline.fit(train_data, y_train)
test_ids = test_data['PassengerId'].copy()
final_preds = pd.concat([test_ids, pd.Series(y_pred_test)], axis=1)
# Save final predicions
csv_buffer = StringIO()
final_preds.to_csv(csv_buffer)
s3_resource = boto3.resource('s3')
s3_resource.Object(MODEL_BUCKET, 'v1/prediction/finl_preds.csv').put(Body=csv_buffer.getvalue())
# Save trained model
s3_resource = boto3.resource('s3')
trained_model = pickle.dumps(voting_clf)
s3_resource.Object(MODEL_BUCKET, 'v1/model/trained_model.pickle').put(Body=trained_model)
| 0.509032 | 0.928214 |
```
from DEVDANmainloop import DEVDANmain, DEVDANmainID
from DEVDANbasic import DEVDAN
from utilsDEVDAN import dataLoader, plotPerformance
import random
import numpy as np
import torch
# random seed control
np.random.seed(0)
torch.manual_seed(0)
random.seed(0)
# load data
dataStreams = dataLoader('../dataset/rmnist2.mat')
print('Without Generative Phase')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
print('Without Node Growing')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
print('Without Node Pruning')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
```
|
github_jupyter
|
from DEVDANmainloop import DEVDANmain, DEVDANmainID
from DEVDANbasic import DEVDAN
from utilsDEVDAN import dataLoader, plotPerformance
import random
import numpy as np
import torch
# random seed control
np.random.seed(0)
torch.manual_seed(0)
random.seed(0)
# load data
dataStreams = dataLoader('../dataset/rmnist2.mat')
print('Without Generative Phase')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams, generative = False)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
print('Without Node Growing')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnGrowing = False
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
print('Without Node Pruning')
allMetrics = []
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet0, performanceHistory0, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory0[0],performanceHistory0[1],performanceHistory0[2],
performanceHistory0[3],performanceHistory0[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet1, performanceHistory1, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory1[0],performanceHistory1[1],performanceHistory1[2],
performanceHistory1[3],performanceHistory1[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet2, performanceHistory2, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory2[0],performanceHistory2[1],performanceHistory2[2],
performanceHistory2[3],performanceHistory2[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet3, performanceHistory3, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory3[0],performanceHistory3[1],performanceHistory3[2],
performanceHistory3[3],performanceHistory3[4])
# initialization
DevdanNet = DEVDAN(dataStreams.nInput,dataStreams.nOutput)
DevdanNet.hnPruning = False
DevdanNet4, performanceHistory4, allPerformance = DEVDANmain(DevdanNet,dataStreams)
allMetrics.append(allPerformance)
plotPerformance(performanceHistory4[0],performanceHistory4[1],performanceHistory4[2],
performanceHistory4[3],performanceHistory4[4])
# all results
# 0: accuracy
# 1: f1_score
# 2: precision_score
# 3: recall_score
# 4: training_time
# 5: testingTime
# 6: nHiddenLayer
# 7: nHiddenNode
meanResults = np.round_(np.mean(allMetrics,0), decimals=2)
stdResults = np.round_(np.std(allMetrics,0), decimals=2)
print('\n')
print('========== Performance ==========')
print('Preq Accuracy: ', meanResults[0].item(), '(+/-)',stdResults[0].item())
print('F1 score: ', meanResults[1].item(), '(+/-)',stdResults[1].item())
print('Precision: ', meanResults[2].item(), '(+/-)',stdResults[2].item())
print('Recall: ', meanResults[3].item(), '(+/-)',stdResults[3].item())
print('Training time: ', meanResults[4].item(), '(+/-)',stdResults[4].item())
print('Testing time: ', meanResults[5].item(), '(+/-)',stdResults[5].item())
print('\n')
print('========== Network ==========')
print('Number of hidden layers: ', meanResults[6].item(), '(+/-)',stdResults[6].item())
print('Number of features: ', meanResults[7].item(), '(+/-)',stdResults[7].item())
| 0.469277 | 0.103115 |
# Benchmarkin and Testing
This notebooks contains the generation, analysis and visualisation of the data accompying Project 1 in FYS4411. Several tests and sanity-checks for the various methods are also included.
```
import sys
sys.path.append("../analysis/")
import numpy as np
import matplotlib.pyplot as plt
import analysis as src
from multiprocessing import Process, Queue
import pandas as pd
import time
from tqdm import tqdm
plt.style.use("../lib/rapport.mplstyle")
%load_ext autoreload
%autoreload 2
def saveto(fig, path, ncol=2):
lgd = fig.legend(loc='lower left',# mode='expand',-
ncol=ncol,
bbox_to_anchor=(0.1, 1.02, 1, 0.2))
fig.savefig(f"../latex/figures/{path}.pdf", bbox_inches='tight')
```
### Testing of blocking method:
For testing, we generate a time-series of random variables as $X_i = 0.5X_{i-1} + 0.5z$, $z = N(0,1)$. Thus, each $X_i$ is normally distributed with mean 0 and variance 1, but is higly corrolated with the previous variables.
```
np.random.seed(42)
N= 2**18
X = [np.random.normal(0,1)]
for i in range(1, N):
X.append(0.5*X[-1] + 0.5*np.random.normal(0,1))
X = np.array(X)
plt.plot(X[:100])
plt.xlabel("t")
plt.ylabel("Amplitude")
plt.grid()
plt.show()
estimatedVar = src.blocking(X, degree = 10)
plt.plot(estimatedVar)
plt.plot((0,10), (1/N,1/N))
plt.xlabel("Degree of blocking")
plt.ylabel(r"$V(\bar{X})$")
plt.legend(["Estimated variance of mean", "Analytical variance of mean"])
plt.grid()
plt.show()
```
# Table
```
conf = src.config()
conf["numPart"] = 1
conf["numDim"] = 3
conf["numSteps"] = 2
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussianNumerical"
conf["Hamiltonian"] = "HarmonicOscillator"
Ns = [1, 2, 10, 100]
energies = []
variance = []
for N in Ns:
conf["NumPart"] = N
src.runner(conf)
localEnergy, _, _, _ = src.readData(conf)
energies.append(N*np.mean(localEnergy))
variance.append(np.var(localEnergy))
print(energies)
print(variance)
plt.plot(Ns, energies)
```
### One body density for 1 particle, 1 dimmension. Comparing Bruteforce Sampling with Importance Sampling
```
conf = src.config()
conf["numPart"] = 1
conf["numDim"] = 1
conf["numSteps"] = 1000000
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf)
_, posBruteForce, _, _ = src.readData(conf)
conf["importanceSampling"] = 1 #switching to importance sampling
src.runner(conf)
_, posImportance, _, _ = src.readData(conf)
```
Calculating and plotting the densities:
```
bins = np.linspace(-3, 3, 200)
dx = bins[1] - bins[0]
densityBruteForce = src.oneBodyDensity(posBruteForce[0], bins, mode = "1D")/conf["numSteps"]
densityImportance = src.oneBodyDensity(posImportance[0], bins, mode = "1D")/conf["numSteps"]
half = len(bins)//2
bins_ = bins[half:]
fig, ax = plt.subplots()
ax.plot(bins_, 1/np.sqrt(np.pi)*np.exp(-bins_**2), "--", label="Analytical")
ax.plot(bins_, densityBruteForce[half:], label="Brute Force")
ax.set_xlabel("Radial distance $r$")
ax.set_ylabel(r"$\rho(r)$")
ax.plot(bins_, densityImportance[half:], linestyle='dotted', label="Importance")
plt.grid()
plt.show()
saveto(fig, "density1", ncol=2)
```
### Radial one body density for 1 particle, 3 dimmension. Comparing Bruteforce Sampling with Importance sampling
```
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 1000000
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf)
_, posBruteForce, _, _ = src.readData(conf)
conf["importanceSampling"] = 1 #switching to importance sampling
src.runner(conf)
_, posImportance, _, _ = src.readData(conf)
bins = np.linspace(0, 3, 200)
dx = bins[1] - bins[0]
densityBruteForce = src.oneBodyDensity(posBruteForce[0].reshape(-1,conf["numDim"]), bins)/conf["numSteps"]
densityImportance = src.oneBodyDensity(posImportance[0].reshape(-1,conf["numDim"]), bins)/conf["numSteps"]
fig, ax = plt.subplots()
ax.plot(bins, 2*4/np.sqrt(np.pi)*np.exp(-bins**2)*bins**2, "--", label="Analytical")
ax.plot(bins, densityBruteForce, label="Brute Force")
ax.set_xlabel("Radial distance $r$")
ax.set_ylabel(r"$\rho(r)$")
ax.plot(bins, densityImportance, linestyle='dotted', label="Importance")
plt.grid()
plt.show()
saveto(fig, "density2", ncol=2)
```
### Local Energy for various $\alpha$, for 2 particles, 3 dimmensions in harmonic oscillator. Brute force and importance sampling are used and checked against analytical results to verify the methods works correctly in higher dimmension.
#### Brute force sampling:
```
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 2**17
conf["stepLength"] = 0.8
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
x = np.linspace(0.3, 0.8, 11)
E = []
E_var = []
Var = []
for alpha in tqdm(x):
conf["alpha"] = alpha
src.runner(conf)
localEnergies, _, _, acceptanceRate = src.readData(conf)
E.append(np.mean(localEnergies[0]))
E_var.append(np.std(localEnergies[0])**2)
Var.append(src.blocking(localEnergies[0], degree=15))
print("Done!")
print(f"Acceptance Rate: {acceptanceRate[0]}")
```
#### Checking appropriate degree of blocking
```
fig, ax = plt.subplots()
ax.plot(Var[-1])
ax.plot(9, Var[-1][9], "ko")
ax.set_xlabel("Blocking Strength")
ax.set_ylabel(r"$Var\langle E \rangle$")
plt.grid()
plt.show()
saveto(fig, "blocking1")
```
#### Plotting estimated energy as a function of alpha for 2 paricles in 3 dimmensions, harmonic oscillator:
```
E = np.array(E)
E_var = np.array(E_var)
Var = np.array(Var)
std = np.sqrt(Var[:,9])
fig = plt.figure()
plt.errorbar(x, E, std)
plt.plot(x, 6*(1/(8*x) + 1/2*x), "--")
plt.xlabel("Alpha")
plt.ylabel("<E>")
plt.legend([ "Analytical","Brute Force Sampling"])
plt.grid()
plt.show()
fig.savefig("figures/energy_bruteforce1.pdf", bbox_inches = "tight")
fig = plt.figure()
plt.plot(x, E_var)
plt.plot(x, 6*(0.25 + 3*(1/(8*x) - 0.5*x)**2 - (1/(8*x) + 0.5*x)**2), "--")
plt.xlabel("Alpha")
plt.ylabel("V(E)")
plt.legend(["Brute Force Sampling", "Analytical"])
plt.grid()
plt.show()
fig.savefig("figures/variance_bruteforce1.pdf", bbox_inches = "tight")
```
#### Importance Sampling:
```
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 2**17
conf["stepLength"] = 0.5
conf["importanceSampling"] = 1 #using importance sampling
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
x = np.linspace(0.3, 0.8, 11)
ImE = []
ImE_var = []
ImVar = []
for alpha in tqdm(x):
conf["alpha"] = alpha
src.runner(conf)
localEnergies, _, _, acceptanceRate = src.readData(conf)
ImE.append(np.mean(localEnergies[0]))
ImE_var.append(np.std(localEnergies[0])**2)
ImVar.append(src.blocking(localEnergies[0], degree=15))
print("Done!")
print(f"Acceptance Rate: {acceptanceRate[0]}")
fig, ax = plt.subplots()
ax.plot(Var[-1], label="Brute Force")
ax.plot(9, Var[-1][9], "ko")
ax.plot(ImVar[-1], label="Importance")
ax.plot(6, ImVar[-1][6], "ko")
ax.set_xlabel("Blocking Strength")
ax.set_ylabel(r"$Var\langle E \rangle$")
plt.grid()
plt.show()
saveto(fig, "blocking1")
ImE = np.array(ImE)
ImE_var = np.array(ImE_var)
ImVar = np.array(ImVar)
Imstd = np.sqrt(ImVar[:,8])
E = np.array(E)
E_var = np.array(E_var)
Var = np.array(Var)
std = np.sqrt(Var[:,9])
fig = plt.figure()
plt.plot(x, 6*(1/(8*x) + 1/2*x), "--", label="Analytical")
plt.errorbar(x, E, std, label="Brute Force")
plt.errorbar(x, ImE, Imstd, label="Importance")
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$\langle E_{L}\rangle$")
plt.grid()
saveto(fig, "energy_importance1", ncol=2)
fig = plt.figure()
plt.plot(x, 6*(0.25 + 3*(1/(8*x) - 0.5*x)**2 - (1/(8*x) + 0.5*x)**2), "--", label="Analytical")
plt.plot(x, E_var, label="Brute Force")
plt.plot(x, ImE_var, label="Importance")
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$Var(\langle E_{L}\rangle)$")
plt.grid()
plt.show()
saveto(fig, "variance_importance1")
```
### Repeated analysis for 40 particles
#### Brute Force
```
conf = src.config()
cutoff = 2000
conf["numPart"] = 40
conf["numDim"] = 3
conf["numSteps"] = 2**20 + cutoff
conf["stepLength"] = 1.2
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.3
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf, verbose = True)
localEnergies, _, _, acceptanceRate = src.readData(conf)
E = np.mean(localEnergies[0][cutoff:])
Var = src.blocking(localEnergies[0][cutoff:], degree=18)
print(f"Acceptance Rate: {acceptanceRate[0]}")
plt.plot(Var)
plt.grid()
plt.show()
std = np.sqrt(Var[13])
a = conf["alpha"]
E_analytical = 3*40*(1/(8*a) + 1/2*a)
print(f"Numerical: <E> = {E} +- {std}")
print(f"Analytical: <E> = {E_analytical}")
```
#### Importance Sampling
```
conf = src.config()
cutoff = 2000
conf["numPart"] = 40
conf["numDim"] = 3
conf["numSteps"] = 2**20 + cutoff
conf["importanceSampling"] = 1 #importance sampling
conf["alpha"] = 0.3
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
conf["stepLength"] = 0.5
src.runner(conf)
localEnergies1, _, _, acceptanceRate1 = src.readData(conf)
E1 = np.mean(localEnergies1[0][cutoff:])
Var1 = src.blocking(localEnergies1[0][cutoff:], degree=18)
print(f"Step length: {conf['stepLength']}. Acceptance rate: {acceptanceRate1}")
conf["stepLength"] = 1
src.runner(conf)
localEnergies2, _, _, acceptanceRate2 = src.readData(conf)
E2 = np.mean(localEnergies2[0][cutoff:])
Var2 = src.blocking(localEnergies2[0][cutoff:], degree=18)
print(f"Step length: {conf['stepLength']}. Acceptance rate: {acceptanceRate2}")
conf["stepLength"] = 2
src.runner(conf)
localEnergies3, _, _, acceptanceRate3 = src.readData(conf)
E3 = np.mean(localEnergies3[0][cutoff:])
Var3 = src.blocking(localEnergies3[0][cutoff:], degree=18)
print(f"Step length: {conf['stepLength']}. Acceptance rate: {acceptanceRate3}")
fig = plt.figure()
plt.plot(Var1, label=r"$\delta = 0.5$")
plt.plot(Var2, label=r"$\delta = 1$")
plt.plot(Var3, label=r"$\delta = 2$")
plt.plot(11, Var1[11], "ko")
plt.plot(10, Var2[10], "ko")
plt.plot(11, Var3[11], "ko")
plt.xlabel("Blocking Strength")
plt.ylabel(r"$ Var(\langle E\rangle)$")
plt.grid()
plt.show()
saveto(fig, "blocking3", ncol=3)
std1 = np.sqrt(Var1[11])
std2 = np.sqrt(Var2[10])
std3 = np.sqrt(Var3[11])
a = conf["alpha"]
E_analytical = 3*40*(1/(8*a) + 1/2*a)
print(f"dt = 0.5: <E> = {E1} +- {std1}")
print(f"dt = 1: <E> = {E2} +- {std2}")
print(f"dt = 2: <E> = {E3} +- {std3}")
print(f"Analytical: <E> = {E_analytical}")
```
### Repeated analysis for numerically calculated laplacian
```
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 2**17
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussianNumerical" #numerical laplacian
conf["Hamiltonian"] = "HarmonicOscillator"
x = np.linspace(0.3, 0.8, 21)
E = []
Var = []
for alpha in x:
conf["alpha"] = alpha
src.runner(conf)
localEnergies, _, _, acceptanceRate = src.readData(conf)
E.append(np.mean(localEnergies[0]))
Var.append(src.blocking(localEnergies[0], degree=15))
print("Done!")
print(f"Acceptance Rate: {acceptanceRate[0]}")
E = np.array(E)
Var = np.array(Var)
std = np.sqrt(Var[:,13])
fig = plt.figure()
plt.plot(x, 6*(1/(8*x) + 1/2*x), "--", label="Analytical")
plt.errorbar(x, E, std, label="Numerical")
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$\langle E_{L}\rangle$")
plt.grid()
saveto(fig, "numericalLap", ncol=2)
```
#### CPU-time differences between analytical and numerical laplacian
```
conf = src.config()
conf["numDim"] = 3
conf["stepLength"] = 1
conf["importanceSampling"] = 0
conf["alpha"] = 0.4
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Hamiltonian"] = "HarmonicOscillator"
timeAnalytic = []
timeNumeric = []
timeInteractive = []
cycles = 4000000
N = np.array([1, 3, 10, 30, 100])
for n in tqdm(N):
conf["InitialState"] = "RandomUniform"
conf["numPart"] = n
conf["numSteps"] = cycles/n
conf["Wavefunction"] = "SimpleGaussian"
start = time.time()
src.runner(conf)
end = time.time()
timeAnalytic.append((end - start)/conf["numSteps"])
conf["Wavefunction"] = "SimpleGaussianNumerical"
start = time.time()
src.runner(conf)
end = time.time()
timeNumeric.append((end - start)/conf["numSteps"])
#conf["InitialState"] = "HardshellInitial"
#conf["Wavefunction"] = "HardshellWavefunction"
#start = time.time()
#src.runner(conf)
#end = time.time()
#timeInteractive.append((end - start)/conf["numSteps"])
fig = plt.figure()
plt.plot(N,timeAnalytic, '.-', label="Analytical")
plt.plot(N, timeNumeric, '.-', label="Numerical")
plt.xscale("log")
plt.yscale("log")
plt.xlabel("number of particles")
plt.ylabel("time per cycle")
plt.grid(which="both")
plt.show()
saveto(fig, "numericalTime")
#fig.savefig("figures/numericalTime.pdf", bbox_inches = "tight")
```
### Thermalization of many particals, using Brute Force Sampling and Importance Sampling
```
conf = src.config()
conf["numDim"] = 3
conf["numSteps"] = 10000
conf["stepLength"] = 1
conf["importanceSampling"] = 0 #importance sampling
conf["alpha"] = 0.3
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
conf["numPart"] = 1
src.runner(conf)
localEnergies1, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
conf["numPart"] = 10
src.runner(conf)
localEnergies2, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
conf["numPart"] = 20
src.runner(conf)
localEnergies3, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
conf["numPart"] = 100
src.runner(conf)
localEnergies4, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
plt.plot(localEnergies1[0][:300])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
plt.plot(localEnergies2[0][:2000])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
plt.plot(localEnergies3[0][:5000])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
plt.plot(localEnergies4[0])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
conf["numPart"] = 100
conf["importanceSampling"] = 1
conf["stepLength"] = 1.6
src.runner(conf, verbose = True)
localEnergies, _, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
plt.plot(localEnergies[0])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
```
### One-body density for two interacting particles in 1D, harmonic oscillator
```
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 1
conf["numSteps"] = 4000000
conf["stepLength"] = 0.1
conf["importanceSampling"] = 1
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "HardshellInitial"
conf["Wavefunction"] = "HardshellWavefunction"
conf["Hamiltonian"] = "HarmonicOscillator"
conf["a"] = 0
src.runner(conf)
_, posNoninteracting, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
conf["a"] = 1
src.runner(conf)
_, posInteracting, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
conf["numPart"] = 3
src.runner(conf)
_, posInteracting3Part, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
bins = np.linspace(-3, 3, 200)
densityNoninteracting = src.oneBodyDensity(posNoninteracting[0], bins, mode = "1D")/conf["numSteps"]
densityInteracting = src.oneBodyDensity(posInteracting[0], bins, mode = "1D")/conf["numSteps"]
densityInteracting3Part = src.oneBodyDensity(posInteracting3Part[0], bins, mode = "1D")/conf["numSteps"]
```
#### Non-interacting
```
fig = plt.figure()
plt.plot(bins, densityNoninteracting, label="Non-interacting")
plt.plot(bins, densityInteracting, label="N=2")
plt.plot(bins, densityInteracting3Part, label="N=3")
#plt.plot(bins, 2/np.sqrt(np.pi)*np.exp(-bins**2), "--")
plt.xlabel("Radial distance $r$")
plt.ylabel(r"$\rho(r)$")
plt.grid()
plt.show()
saveto(fig, "interactingDensity")
```
#### Interacting
```
plt.plot(bins, densityInteracting)
plt.plot(bins, 2/np.sqrt(np.pi)*np.exp(-bins**2), "--")
plt.xlabel("L")
plt.ylabel("number of particles per L")
plt.grid()
plt.show()
```
#### Three interacting particles
```
plt.plot(bins, densityInteracting3Part)
plt.plot(bins, 2/np.sqrt(np.pi)*np.exp(-bins**2), "--")
plt.xlabel("L")
plt.ylabel("number of particles per L")
plt.grid()
plt.show()
```
### One-body density of many interacting particles in 2D
#### Non-interacting
```
conf = src.config()
conf["directory"] = "data1"
conf["threads"] = 8
conf["numPart"] = 10
conf["numDim"] = 2
conf["numSteps"] = 1000000
conf["stepLength"] = 0.5
conf["alpha"] = 0.4
conf["a"] = 1
conf["omega"] = 1
src.runner(conf, verbose = True)
r = np.linspace(-5, 5, 200)
def f(q,i):
pos = pd.read_csv(f"data1/configuration_{i}.txt", sep ="\n", header = None).values.reshape(-1,2)
density = src.oneBodyDensity(pos, r, mode = "2D")
q.put(density)
q = Queue()
processes = [Process(target = f, args=(q,i)) for i in range(conf["threads"])]
for p in processes:
p.start()
density = q.get()
for i in range(conf["threads"]-1):
density += q.get()
density /= conf["threads"]
fig = plt.figure(figsize = (8,6))
ax = fig.add_subplot(111)
ax.set_aspect("equal")
ax.pcolormesh(density)
plt.show()
```
#### Interacting
```
conf = src.config()
conf["directory"] = "data2"
conf["threads"] = 8
conf["numPart"] = 10
conf["numDim"] = 2
conf["numSteps"] = 1000000
conf["stepLength"] = 0.01
conf["importanceSampling"] = 1
conf["alpha"] = 0.5
conf["a"] = 0.7
conf["omega"] = 1
conf["InitialState"] = "HardshellInitial"
conf["Wavefunction"] = "HardshellWavefunction"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf, verbose = True)
r = np.linspace(-5, 5, 200)
def f(q,i):
pos = pd.read_csv(f"data2/configuration_{i}.txt", sep ="\n", header = None).values.reshape(-1,2)
density = src.oneBodyDensity(pos, r, mode = "2D")
q.put(density)
q = Queue()
processes = [Process(target = f, args=(q,i)) for i in range(conf["threads"])]
for p in processes:
p.start()
density = q.get()
for i in range(conf["threads"]-1):
density += q.get()
density /= conf["threads"]
fig = plt.figure(figsize = (8,6))
ax = fig.add_subplot(111)
ax.set_aspect("equal")
ax.pcolormesh(density)
plt.show()
```
### Gradient decent
```
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 10000
conf["stepLength"] = 1
conf["importanceSampling"] = 1
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
alphas = []
mu = 0.01
a0s = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5]
for a0 in a0s:
conf["alpha"] = a0
alphaArray = []
for i in range(50):
src.runner(conf)
localEnergies, _, psiGrad, _ = src.readData(conf)
grad = src.calculateGradient(localEnergies, psiGrad)
conf["alpha"] -= mu*grad
alphaArray.append(conf["alpha"])
alphas.append(alphaArray)
fig = plt.figure()
for (a0, alpha) in zip(a0s, alphas):
plt.plot(alpha, c="skyblue")
plt.xlabel("step")
plt.ylabel(r"$\alpha$")
plt.grid()
plt.show()
saveto(fig, "gd")
```
|
github_jupyter
|
import sys
sys.path.append("../analysis/")
import numpy as np
import matplotlib.pyplot as plt
import analysis as src
from multiprocessing import Process, Queue
import pandas as pd
import time
from tqdm import tqdm
plt.style.use("../lib/rapport.mplstyle")
%load_ext autoreload
%autoreload 2
def saveto(fig, path, ncol=2):
lgd = fig.legend(loc='lower left',# mode='expand',-
ncol=ncol,
bbox_to_anchor=(0.1, 1.02, 1, 0.2))
fig.savefig(f"../latex/figures/{path}.pdf", bbox_inches='tight')
np.random.seed(42)
N= 2**18
X = [np.random.normal(0,1)]
for i in range(1, N):
X.append(0.5*X[-1] + 0.5*np.random.normal(0,1))
X = np.array(X)
plt.plot(X[:100])
plt.xlabel("t")
plt.ylabel("Amplitude")
plt.grid()
plt.show()
estimatedVar = src.blocking(X, degree = 10)
plt.plot(estimatedVar)
plt.plot((0,10), (1/N,1/N))
plt.xlabel("Degree of blocking")
plt.ylabel(r"$V(\bar{X})$")
plt.legend(["Estimated variance of mean", "Analytical variance of mean"])
plt.grid()
plt.show()
conf = src.config()
conf["numPart"] = 1
conf["numDim"] = 3
conf["numSteps"] = 2
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussianNumerical"
conf["Hamiltonian"] = "HarmonicOscillator"
Ns = [1, 2, 10, 100]
energies = []
variance = []
for N in Ns:
conf["NumPart"] = N
src.runner(conf)
localEnergy, _, _, _ = src.readData(conf)
energies.append(N*np.mean(localEnergy))
variance.append(np.var(localEnergy))
print(energies)
print(variance)
plt.plot(Ns, energies)
conf = src.config()
conf["numPart"] = 1
conf["numDim"] = 1
conf["numSteps"] = 1000000
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf)
_, posBruteForce, _, _ = src.readData(conf)
conf["importanceSampling"] = 1 #switching to importance sampling
src.runner(conf)
_, posImportance, _, _ = src.readData(conf)
bins = np.linspace(-3, 3, 200)
dx = bins[1] - bins[0]
densityBruteForce = src.oneBodyDensity(posBruteForce[0], bins, mode = "1D")/conf["numSteps"]
densityImportance = src.oneBodyDensity(posImportance[0], bins, mode = "1D")/conf["numSteps"]
half = len(bins)//2
bins_ = bins[half:]
fig, ax = plt.subplots()
ax.plot(bins_, 1/np.sqrt(np.pi)*np.exp(-bins_**2), "--", label="Analytical")
ax.plot(bins_, densityBruteForce[half:], label="Brute Force")
ax.set_xlabel("Radial distance $r$")
ax.set_ylabel(r"$\rho(r)$")
ax.plot(bins_, densityImportance[half:], linestyle='dotted', label="Importance")
plt.grid()
plt.show()
saveto(fig, "density1", ncol=2)
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 1000000
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf)
_, posBruteForce, _, _ = src.readData(conf)
conf["importanceSampling"] = 1 #switching to importance sampling
src.runner(conf)
_, posImportance, _, _ = src.readData(conf)
bins = np.linspace(0, 3, 200)
dx = bins[1] - bins[0]
densityBruteForce = src.oneBodyDensity(posBruteForce[0].reshape(-1,conf["numDim"]), bins)/conf["numSteps"]
densityImportance = src.oneBodyDensity(posImportance[0].reshape(-1,conf["numDim"]), bins)/conf["numSteps"]
fig, ax = plt.subplots()
ax.plot(bins, 2*4/np.sqrt(np.pi)*np.exp(-bins**2)*bins**2, "--", label="Analytical")
ax.plot(bins, densityBruteForce, label="Brute Force")
ax.set_xlabel("Radial distance $r$")
ax.set_ylabel(r"$\rho(r)$")
ax.plot(bins, densityImportance, linestyle='dotted', label="Importance")
plt.grid()
plt.show()
saveto(fig, "density2", ncol=2)
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 2**17
conf["stepLength"] = 0.8
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
x = np.linspace(0.3, 0.8, 11)
E = []
E_var = []
Var = []
for alpha in tqdm(x):
conf["alpha"] = alpha
src.runner(conf)
localEnergies, _, _, acceptanceRate = src.readData(conf)
E.append(np.mean(localEnergies[0]))
E_var.append(np.std(localEnergies[0])**2)
Var.append(src.blocking(localEnergies[0], degree=15))
print("Done!")
print(f"Acceptance Rate: {acceptanceRate[0]}")
fig, ax = plt.subplots()
ax.plot(Var[-1])
ax.plot(9, Var[-1][9], "ko")
ax.set_xlabel("Blocking Strength")
ax.set_ylabel(r"$Var\langle E \rangle$")
plt.grid()
plt.show()
saveto(fig, "blocking1")
E = np.array(E)
E_var = np.array(E_var)
Var = np.array(Var)
std = np.sqrt(Var[:,9])
fig = plt.figure()
plt.errorbar(x, E, std)
plt.plot(x, 6*(1/(8*x) + 1/2*x), "--")
plt.xlabel("Alpha")
plt.ylabel("<E>")
plt.legend([ "Analytical","Brute Force Sampling"])
plt.grid()
plt.show()
fig.savefig("figures/energy_bruteforce1.pdf", bbox_inches = "tight")
fig = plt.figure()
plt.plot(x, E_var)
plt.plot(x, 6*(0.25 + 3*(1/(8*x) - 0.5*x)**2 - (1/(8*x) + 0.5*x)**2), "--")
plt.xlabel("Alpha")
plt.ylabel("V(E)")
plt.legend(["Brute Force Sampling", "Analytical"])
plt.grid()
plt.show()
fig.savefig("figures/variance_bruteforce1.pdf", bbox_inches = "tight")
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 2**17
conf["stepLength"] = 0.5
conf["importanceSampling"] = 1 #using importance sampling
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
x = np.linspace(0.3, 0.8, 11)
ImE = []
ImE_var = []
ImVar = []
for alpha in tqdm(x):
conf["alpha"] = alpha
src.runner(conf)
localEnergies, _, _, acceptanceRate = src.readData(conf)
ImE.append(np.mean(localEnergies[0]))
ImE_var.append(np.std(localEnergies[0])**2)
ImVar.append(src.blocking(localEnergies[0], degree=15))
print("Done!")
print(f"Acceptance Rate: {acceptanceRate[0]}")
fig, ax = plt.subplots()
ax.plot(Var[-1], label="Brute Force")
ax.plot(9, Var[-1][9], "ko")
ax.plot(ImVar[-1], label="Importance")
ax.plot(6, ImVar[-1][6], "ko")
ax.set_xlabel("Blocking Strength")
ax.set_ylabel(r"$Var\langle E \rangle$")
plt.grid()
plt.show()
saveto(fig, "blocking1")
ImE = np.array(ImE)
ImE_var = np.array(ImE_var)
ImVar = np.array(ImVar)
Imstd = np.sqrt(ImVar[:,8])
E = np.array(E)
E_var = np.array(E_var)
Var = np.array(Var)
std = np.sqrt(Var[:,9])
fig = plt.figure()
plt.plot(x, 6*(1/(8*x) + 1/2*x), "--", label="Analytical")
plt.errorbar(x, E, std, label="Brute Force")
plt.errorbar(x, ImE, Imstd, label="Importance")
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$\langle E_{L}\rangle$")
plt.grid()
saveto(fig, "energy_importance1", ncol=2)
fig = plt.figure()
plt.plot(x, 6*(0.25 + 3*(1/(8*x) - 0.5*x)**2 - (1/(8*x) + 0.5*x)**2), "--", label="Analytical")
plt.plot(x, E_var, label="Brute Force")
plt.plot(x, ImE_var, label="Importance")
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$Var(\langle E_{L}\rangle)$")
plt.grid()
plt.show()
saveto(fig, "variance_importance1")
conf = src.config()
cutoff = 2000
conf["numPart"] = 40
conf["numDim"] = 3
conf["numSteps"] = 2**20 + cutoff
conf["stepLength"] = 1.2
conf["importanceSampling"] = 0 #using bruteforce sampling
conf["alpha"] = 0.3
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf, verbose = True)
localEnergies, _, _, acceptanceRate = src.readData(conf)
E = np.mean(localEnergies[0][cutoff:])
Var = src.blocking(localEnergies[0][cutoff:], degree=18)
print(f"Acceptance Rate: {acceptanceRate[0]}")
plt.plot(Var)
plt.grid()
plt.show()
std = np.sqrt(Var[13])
a = conf["alpha"]
E_analytical = 3*40*(1/(8*a) + 1/2*a)
print(f"Numerical: <E> = {E} +- {std}")
print(f"Analytical: <E> = {E_analytical}")
conf = src.config()
cutoff = 2000
conf["numPart"] = 40
conf["numDim"] = 3
conf["numSteps"] = 2**20 + cutoff
conf["importanceSampling"] = 1 #importance sampling
conf["alpha"] = 0.3
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
conf["stepLength"] = 0.5
src.runner(conf)
localEnergies1, _, _, acceptanceRate1 = src.readData(conf)
E1 = np.mean(localEnergies1[0][cutoff:])
Var1 = src.blocking(localEnergies1[0][cutoff:], degree=18)
print(f"Step length: {conf['stepLength']}. Acceptance rate: {acceptanceRate1}")
conf["stepLength"] = 1
src.runner(conf)
localEnergies2, _, _, acceptanceRate2 = src.readData(conf)
E2 = np.mean(localEnergies2[0][cutoff:])
Var2 = src.blocking(localEnergies2[0][cutoff:], degree=18)
print(f"Step length: {conf['stepLength']}. Acceptance rate: {acceptanceRate2}")
conf["stepLength"] = 2
src.runner(conf)
localEnergies3, _, _, acceptanceRate3 = src.readData(conf)
E3 = np.mean(localEnergies3[0][cutoff:])
Var3 = src.blocking(localEnergies3[0][cutoff:], degree=18)
print(f"Step length: {conf['stepLength']}. Acceptance rate: {acceptanceRate3}")
fig = plt.figure()
plt.plot(Var1, label=r"$\delta = 0.5$")
plt.plot(Var2, label=r"$\delta = 1$")
plt.plot(Var3, label=r"$\delta = 2$")
plt.plot(11, Var1[11], "ko")
plt.plot(10, Var2[10], "ko")
plt.plot(11, Var3[11], "ko")
plt.xlabel("Blocking Strength")
plt.ylabel(r"$ Var(\langle E\rangle)$")
plt.grid()
plt.show()
saveto(fig, "blocking3", ncol=3)
std1 = np.sqrt(Var1[11])
std2 = np.sqrt(Var2[10])
std3 = np.sqrt(Var3[11])
a = conf["alpha"]
E_analytical = 3*40*(1/(8*a) + 1/2*a)
print(f"dt = 0.5: <E> = {E1} +- {std1}")
print(f"dt = 1: <E> = {E2} +- {std2}")
print(f"dt = 2: <E> = {E3} +- {std3}")
print(f"Analytical: <E> = {E_analytical}")
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 2**17
conf["stepLength"] = 0.5
conf["importanceSampling"] = 0
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussianNumerical" #numerical laplacian
conf["Hamiltonian"] = "HarmonicOscillator"
x = np.linspace(0.3, 0.8, 21)
E = []
Var = []
for alpha in x:
conf["alpha"] = alpha
src.runner(conf)
localEnergies, _, _, acceptanceRate = src.readData(conf)
E.append(np.mean(localEnergies[0]))
Var.append(src.blocking(localEnergies[0], degree=15))
print("Done!")
print(f"Acceptance Rate: {acceptanceRate[0]}")
E = np.array(E)
Var = np.array(Var)
std = np.sqrt(Var[:,13])
fig = plt.figure()
plt.plot(x, 6*(1/(8*x) + 1/2*x), "--", label="Analytical")
plt.errorbar(x, E, std, label="Numerical")
plt.xlabel(r"$\alpha$")
plt.ylabel(r"$\langle E_{L}\rangle$")
plt.grid()
saveto(fig, "numericalLap", ncol=2)
conf = src.config()
conf["numDim"] = 3
conf["stepLength"] = 1
conf["importanceSampling"] = 0
conf["alpha"] = 0.4
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Hamiltonian"] = "HarmonicOscillator"
timeAnalytic = []
timeNumeric = []
timeInteractive = []
cycles = 4000000
N = np.array([1, 3, 10, 30, 100])
for n in tqdm(N):
conf["InitialState"] = "RandomUniform"
conf["numPart"] = n
conf["numSteps"] = cycles/n
conf["Wavefunction"] = "SimpleGaussian"
start = time.time()
src.runner(conf)
end = time.time()
timeAnalytic.append((end - start)/conf["numSteps"])
conf["Wavefunction"] = "SimpleGaussianNumerical"
start = time.time()
src.runner(conf)
end = time.time()
timeNumeric.append((end - start)/conf["numSteps"])
#conf["InitialState"] = "HardshellInitial"
#conf["Wavefunction"] = "HardshellWavefunction"
#start = time.time()
#src.runner(conf)
#end = time.time()
#timeInteractive.append((end - start)/conf["numSteps"])
fig = plt.figure()
plt.plot(N,timeAnalytic, '.-', label="Analytical")
plt.plot(N, timeNumeric, '.-', label="Numerical")
plt.xscale("log")
plt.yscale("log")
plt.xlabel("number of particles")
plt.ylabel("time per cycle")
plt.grid(which="both")
plt.show()
saveto(fig, "numericalTime")
#fig.savefig("figures/numericalTime.pdf", bbox_inches = "tight")
conf = src.config()
conf["numDim"] = 3
conf["numSteps"] = 10000
conf["stepLength"] = 1
conf["importanceSampling"] = 0 #importance sampling
conf["alpha"] = 0.3
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
conf["numPart"] = 1
src.runner(conf)
localEnergies1, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
conf["numPart"] = 10
src.runner(conf)
localEnergies2, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
conf["numPart"] = 20
src.runner(conf)
localEnergies3, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
conf["numPart"] = 100
src.runner(conf)
localEnergies4, _, _, acceptanceRate = src.readData(conf)
print(acceptanceRate[0])
plt.plot(localEnergies1[0][:300])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
plt.plot(localEnergies2[0][:2000])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
plt.plot(localEnergies3[0][:5000])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
plt.plot(localEnergies4[0])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
conf["numPart"] = 100
conf["importanceSampling"] = 1
conf["stepLength"] = 1.6
src.runner(conf, verbose = True)
localEnergies, _, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
plt.plot(localEnergies[0])
plt.xlabel("cycles")
plt.ylabel("$E_L$")
plt.grid()
plt.show()
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 1
conf["numSteps"] = 4000000
conf["stepLength"] = 0.1
conf["importanceSampling"] = 1
conf["alpha"] = 0.5
conf["omega"] = 1
conf["InitialState"] = "HardshellInitial"
conf["Wavefunction"] = "HardshellWavefunction"
conf["Hamiltonian"] = "HarmonicOscillator"
conf["a"] = 0
src.runner(conf)
_, posNoninteracting, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
conf["a"] = 1
src.runner(conf)
_, posInteracting, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
conf["numPart"] = 3
src.runner(conf)
_, posInteracting3Part, _, acceptanceRate = src.readData(conf)
print(f"Acceptance Rate: {acceptanceRate}")
bins = np.linspace(-3, 3, 200)
densityNoninteracting = src.oneBodyDensity(posNoninteracting[0], bins, mode = "1D")/conf["numSteps"]
densityInteracting = src.oneBodyDensity(posInteracting[0], bins, mode = "1D")/conf["numSteps"]
densityInteracting3Part = src.oneBodyDensity(posInteracting3Part[0], bins, mode = "1D")/conf["numSteps"]
fig = plt.figure()
plt.plot(bins, densityNoninteracting, label="Non-interacting")
plt.plot(bins, densityInteracting, label="N=2")
plt.plot(bins, densityInteracting3Part, label="N=3")
#plt.plot(bins, 2/np.sqrt(np.pi)*np.exp(-bins**2), "--")
plt.xlabel("Radial distance $r$")
plt.ylabel(r"$\rho(r)$")
plt.grid()
plt.show()
saveto(fig, "interactingDensity")
plt.plot(bins, densityInteracting)
plt.plot(bins, 2/np.sqrt(np.pi)*np.exp(-bins**2), "--")
plt.xlabel("L")
plt.ylabel("number of particles per L")
plt.grid()
plt.show()
plt.plot(bins, densityInteracting3Part)
plt.plot(bins, 2/np.sqrt(np.pi)*np.exp(-bins**2), "--")
plt.xlabel("L")
plt.ylabel("number of particles per L")
plt.grid()
plt.show()
conf = src.config()
conf["directory"] = "data1"
conf["threads"] = 8
conf["numPart"] = 10
conf["numDim"] = 2
conf["numSteps"] = 1000000
conf["stepLength"] = 0.5
conf["alpha"] = 0.4
conf["a"] = 1
conf["omega"] = 1
src.runner(conf, verbose = True)
r = np.linspace(-5, 5, 200)
def f(q,i):
pos = pd.read_csv(f"data1/configuration_{i}.txt", sep ="\n", header = None).values.reshape(-1,2)
density = src.oneBodyDensity(pos, r, mode = "2D")
q.put(density)
q = Queue()
processes = [Process(target = f, args=(q,i)) for i in range(conf["threads"])]
for p in processes:
p.start()
density = q.get()
for i in range(conf["threads"]-1):
density += q.get()
density /= conf["threads"]
fig = plt.figure(figsize = (8,6))
ax = fig.add_subplot(111)
ax.set_aspect("equal")
ax.pcolormesh(density)
plt.show()
conf = src.config()
conf["directory"] = "data2"
conf["threads"] = 8
conf["numPart"] = 10
conf["numDim"] = 2
conf["numSteps"] = 1000000
conf["stepLength"] = 0.01
conf["importanceSampling"] = 1
conf["alpha"] = 0.5
conf["a"] = 0.7
conf["omega"] = 1
conf["InitialState"] = "HardshellInitial"
conf["Wavefunction"] = "HardshellWavefunction"
conf["Hamiltonian"] = "HarmonicOscillator"
src.runner(conf, verbose = True)
r = np.linspace(-5, 5, 200)
def f(q,i):
pos = pd.read_csv(f"data2/configuration_{i}.txt", sep ="\n", header = None).values.reshape(-1,2)
density = src.oneBodyDensity(pos, r, mode = "2D")
q.put(density)
q = Queue()
processes = [Process(target = f, args=(q,i)) for i in range(conf["threads"])]
for p in processes:
p.start()
density = q.get()
for i in range(conf["threads"]-1):
density += q.get()
density /= conf["threads"]
fig = plt.figure(figsize = (8,6))
ax = fig.add_subplot(111)
ax.set_aspect("equal")
ax.pcolormesh(density)
plt.show()
conf = src.config()
conf["numPart"] = 2
conf["numDim"] = 3
conf["numSteps"] = 10000
conf["stepLength"] = 1
conf["importanceSampling"] = 1
conf["omega"] = 1
conf["InitialState"] = "RandomUniform"
conf["Wavefunction"] = "SimpleGaussian"
conf["Hamiltonian"] = "HarmonicOscillator"
alphas = []
mu = 0.01
a0s = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 1.5]
for a0 in a0s:
conf["alpha"] = a0
alphaArray = []
for i in range(50):
src.runner(conf)
localEnergies, _, psiGrad, _ = src.readData(conf)
grad = src.calculateGradient(localEnergies, psiGrad)
conf["alpha"] -= mu*grad
alphaArray.append(conf["alpha"])
alphas.append(alphaArray)
fig = plt.figure()
for (a0, alpha) in zip(a0s, alphas):
plt.plot(alpha, c="skyblue")
plt.xlabel("step")
plt.ylabel(r"$\alpha$")
plt.grid()
plt.show()
saveto(fig, "gd")
| 0.284278 | 0.886862 |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
```
## Player Count
* Display the total number of players
## Purchasing Analysis (Total)
* Run basic calculations to obtain number of unique items, average price, etc.
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
## Gender Demographics
* Percentage and Count of Male Players
* Percentage and Count of Female Players
* Percentage and Count of Other / Non-Disclosed
## Purchasing Analysis (Gender)
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. by gender
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
## Age Demographics
* Establish bins for ages
* Categorize the existing players using the age bins. Hint: use pd.cut()
* Calculate the numbers and percentages by age group
* Create a summary data frame to hold the results
* Optional: round the percentage column to two decimal points
* Display Age Demographics Table
## Purchasing Analysis (Age)
* Bin the purchase_data data frame by age
* Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below
* Create a summary data frame to hold the results
* Optional: give the displayed data cleaner formatting
* Display the summary data frame
## Top Spenders
* Run basic calculations to obtain the results in the table below
* Create a summary data frame to hold the results
* Sort the total purchase value column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
## Most Popular Items
* Retrieve the Item ID, Item Name, and Item Price columns
* Group by Item ID and Item Name. Perform calculations to obtain purchase count, item price, and total purchase value
* Create a summary data frame to hold the results
* Sort the purchase count column in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the summary data frame
## Most Profitable Items
* Sort the above table by total purchase value in descending order
* Optional: give the displayed data cleaner formatting
* Display a preview of the data frame
|
github_jupyter
|
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
| 0.372505 | 0.849971 |
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
<i>Licensed under the MIT License.</i>
# Benchmark with Movielens dataset
This illustrative comparison applies to collaborative filtering algorithms available in this repository such as Spark ALS, Surprise SVD, SAR and others using the Movielens dataset. These algorithms are usable in a variety of recommendation tasks, including product or news recommendations.
The main purpose of this notebook is not to produce comprehensive benchmarking results on multiple datasets. Rather, it is intended to illustrate on how one could evaluate different recommender algorithms using tools in this repository.
## Experimentation setup:
* Objective
* To compare how each collaborative filtering algorithm perform in predicting ratings and recommending relevant items.
* Environment
* The comparison is run on a [Azure Data Science Virtual Machine](https://azure.microsoft.com/en-us/services/virtual-machines/data-science-virtual-machines/).
* The virtual machine size is Standard NC6s_v2 (6 vcpus, 112 GB memory, 1P100 GPU).
* It should be noted that the single node DSVM is not supposed to run scalable benchmarking analysis. Either scaling up or out the computing instances is necessary to run the benchmarking in an run-time efficient way without any memory issue.
* **NOTE ABOUT THE DEPENDENCIES TO INSTALL**: This notebook uses CPU, GPU and PySpark algorithms, so make sure you install the `full environment` as detailed in the [SETUP.md](../SETUP.md).
* Datasets
* [Movielens 100K](https://grouplens.org/datasets/movielens/100k/).
* [Movielens 1M](https://grouplens.org/datasets/movielens/1m/).
* Data split
* The data is split into train and test sets.
* The split ratios are 75-25 for train and test datasets.
* The splitting is stratified based on items.
* Model training
* A recommendation model is trained by using each of the collaborative filtering algorithms.
* Empirical parameter values reported [here](http://mymedialite.net/examples/datasets.html) are used in this notebook. More exhaustive hyper parameter tuning would be required to further optimize results.
* Evaluation metrics
* Ranking metrics:
* Precision@k.
* Recall@k.
* Normalized discounted cumulative gain@k (NDCG@k).
* Mean-average-precision (MAP).
* In the evaluation metrics above, k = 10.
* Rating metrics:
* Root mean squared error (RMSE).
* Mean average error (MAE).
* R squared.
* Explained variance.
* Run time performance
* Elapsed for training a model and using a model for predicting/recommending k items.
* The time may vary across different machines.
## 0 Globals settings
```
import sys
sys.path.append("../")
import os
import json
import pandas as pd
import numpy as np
import seaborn as sns
import pyspark
import torch
import fastai
import tensorflow as tf
import surprise
from reco_utils.common.general_utils import get_number_processors
from reco_utils.common.gpu_utils import get_cuda_version, get_cudnn_version
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_stratified_split
from benchmark_utils import *
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("PySpark version: {}".format(pyspark.__version__))
print("Surprise version: {}".format(surprise.__version__))
print("PyTorch version: {}".format(torch.__version__))
print("Fast AI version: {}".format(fastai.__version__))
print("Tensorflow version: {}".format(tf.__version__))
print("CUDA version: {}".format(get_cuda_version()))
print("CuDNN version: {}".format(get_cudnn_version()))
n_cores = get_number_processors()
print("Number of cores: {}".format(n_cores))
%load_ext autoreload
%autoreload 2
```
## Parameters
```
# Run parameters
EPOCHS = 15
# Hide fastai progress bar
hide_fastai_progress_bar()
# fix random seeds to make sure out runs are reproducible
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
environments = {
"als": "pyspark",
"sar": "python_cpu",
"svd": "python_cpu",
"fastai": "python_gpu",
"ncf": "python_gpu",
}
metrics = {
"als": ["rating", "ranking"],
"sar": ["ranking"],
"svd": ["rating", "ranking"],
"fastai": ["rating", "ranking"],
"ncf": ["ranking"],
}
```
Algorithm parameters
```
als_params = {
"rank": 10,
"maxIter": EPOCHS,
"implicitPrefs": False,
"alpha": 0.1,
"regParam": 0.05,
"coldStartStrategy": "drop",
"nonnegative": False,
"userCol": DEFAULT_USER_COL,
"itemCol": DEFAULT_ITEM_COL,
"ratingCol": DEFAULT_RATING_COL,
}
sar_params = {
"similarity_type": "jaccard",
"time_decay_coefficient": 30,
"time_now": None,
"timedecay_formula": True,
"col_user": DEFAULT_USER_COL,
"col_item": DEFAULT_ITEM_COL,
"col_rating": DEFAULT_RATING_COL,
"col_timestamp": DEFAULT_TIMESTAMP_COL,
}
svd_params = {
"n_factors": 150,
"n_epochs": EPOCHS,
"lr_all": 0.005,
"reg_all": 0.02,
"random_state": SEED,
"verbose": False
}
fastai_params = {
"n_factors": 40,
"y_range": [0,5.5],
"wd": 1e-1,
"max_lr": 5e-3,
"epochs": EPOCHS
}
ncf_params = {
"model_type": "NeuMF",
"n_factors": 4,
"layer_sizes": [16, 8, 4],
"n_epochs": EPOCHS,
"batch_size": 1024,
"learning_rate": 1e-3,
"verbose": 10
}
params = {
"als": als_params,
"sar": sar_params,
"svd": svd_params,
"fastai": fastai_params,
"ncf": ncf_params,
}
prepare_training_data = {
"als": prepare_training_als,
"svd": prepare_training_svd,
"fastai": prepare_training_fastai,
"ncf": prepare_training_ncf,
}
prepare_metrics_data = {
"als": lambda train, test: prepare_metrics_als(train, test),
"fastai": lambda train, test: prepare_metrics_fastai(train, test),
}
trainer = {
"als": lambda params, data: train_als(params, data),
"svd": lambda params, data: train_svd(params, data),
"sar": lambda params, data: train_sar(params, data),
"fastai": lambda params, data: train_fastai(params, data),
"ncf": lambda params, data: train_ncf(params, data),
}
rating_predictor = {
"als": lambda model, test: predict_als(model, test),
"svd": lambda model, test: predict_svd(model, test),
"fastai": lambda model, test: predict_fastai(model, test),
}
ranking_predictor = {
"als": lambda model, test, train: recommend_k_als(model, test, train),
"sar": lambda model, test, train: recommend_k_sar(model, test, train),
"svd": lambda model, test, train: recommend_k_svd(model, test, train),
"fastai": lambda model, test, train: recommend_k_fastai(model, test, train),
"ncf": lambda model, test, train: recommend_k_ncf(model, test, train),
}
rating_evaluator = {
"als": lambda test, predictions: rating_metrics_pyspark(test, predictions),
"svd": lambda test, predictions: rating_metrics_python(test, predictions),
"fastai": lambda test, predictions: rating_metrics_python(test, predictions)
}
ranking_evaluator = {
"als": lambda test, predictions, k: ranking_metrics_pyspark(test, predictions, k),
"sar": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
"svd": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
"fastai": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
"ncf": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
}
def generate_summary(data, algo, k, train_time, time_rating, rating_metrics, time_ranking, ranking_metrics):
summary = {"Data": data, "Algo": algo, "K": k, "Train time (s)": train_time, "Predicting time (s)": time_rating, "Recommending time (s)": time_ranking}
if rating_metrics is None:
rating_metrics = {
"RMSE": np.nan,
"MAE": np.nan,
"R2": np.nan,
"Explained Variance": np.nan,
}
if ranking_metrics is None:
ranking_metrics = {
"MAP": np.nan,
"nDCG@k": np.nan,
"Precision@k": np.nan,
"Recall@k": np.nan,
}
summary.update(rating_metrics)
summary.update(ranking_metrics)
return summary
```
## Benchmark loop
```
data_sizes = ["100k", "1m"] # Movielens data size: 100k, 1m, 10m, or 20m
algorithms = ["als", "svd", "sar", "ncf", "fastai"]
%%time
# For each data size and each algorithm, a recommender is evaluated.
cols = ["Data", "Algo", "K", "Train time (s)", "Predicting time (s)", "RMSE", "MAE", "R2", "Explained Variance", "Recommending time (s)", "MAP", "nDCG@k", "Precision@k", "Recall@k"]
df_results = pd.DataFrame(columns=cols)
for data_size in data_sizes:
# Load the dataset
df = movielens.load_pandas_df(
size=data_size,
header=[DEFAULT_USER_COL, DEFAULT_ITEM_COL, DEFAULT_RATING_COL, DEFAULT_TIMESTAMP_COL]
)
print("Size of Movielens {}: {}".format(data_size, df.shape))
# Split the dataset
df_train, df_test = python_stratified_split(df,
ratio=0.75,
min_rating=1,
filter_by="item",
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL
)
# Loop through the algos
for algo in algorithms:
print("\nComputing {} algorithm on Movielens {}".format(algo, data_size))
# Data prep for training set
train = prepare_training_data.get(algo, lambda x:x)(df_train)
# Get model parameters
model_params = params[algo]
# Train the model
model, time_train = trainer[algo](model_params, train)
# Predict and evaluate
train, test = prepare_metrics_data.get(algo, lambda x,y:(x,y))(df_train, df_test)
if "rating" in metrics[algo]:
# Predict for rating
preds, time_rating = rating_predictor[algo](model, test)
# Evaluate for rating
ratings = rating_evaluator[algo](test, preds)
else:
ratings = None
time_rating = np.nan
if "ranking" in metrics[algo]:
# Predict for ranking
top_k_scores, time_ranking = ranking_predictor[algo](model, test, train)
# Evaluate for rating
rankings = ranking_evaluator[algo](test, top_k_scores, DEFAULT_K)
else:
rankings = None
time_ranking = np.nan
# Record results
summary = generate_summary(data_size, algo, DEFAULT_K, time_train, time_rating, ratings, time_ranking, rankings)
df_results.loc[df_results.shape[0] + 1] = summary
```
## Results
```
df_results
```
|
github_jupyter
|
import sys
sys.path.append("../")
import os
import json
import pandas as pd
import numpy as np
import seaborn as sns
import pyspark
import torch
import fastai
import tensorflow as tf
import surprise
from reco_utils.common.general_utils import get_number_processors
from reco_utils.common.gpu_utils import get_cuda_version, get_cudnn_version
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_stratified_split
from benchmark_utils import *
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("PySpark version: {}".format(pyspark.__version__))
print("Surprise version: {}".format(surprise.__version__))
print("PyTorch version: {}".format(torch.__version__))
print("Fast AI version: {}".format(fastai.__version__))
print("Tensorflow version: {}".format(tf.__version__))
print("CUDA version: {}".format(get_cuda_version()))
print("CuDNN version: {}".format(get_cudnn_version()))
n_cores = get_number_processors()
print("Number of cores: {}".format(n_cores))
%load_ext autoreload
%autoreload 2
# Run parameters
EPOCHS = 15
# Hide fastai progress bar
hide_fastai_progress_bar()
# fix random seeds to make sure out runs are reproducible
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
environments = {
"als": "pyspark",
"sar": "python_cpu",
"svd": "python_cpu",
"fastai": "python_gpu",
"ncf": "python_gpu",
}
metrics = {
"als": ["rating", "ranking"],
"sar": ["ranking"],
"svd": ["rating", "ranking"],
"fastai": ["rating", "ranking"],
"ncf": ["ranking"],
}
als_params = {
"rank": 10,
"maxIter": EPOCHS,
"implicitPrefs": False,
"alpha": 0.1,
"regParam": 0.05,
"coldStartStrategy": "drop",
"nonnegative": False,
"userCol": DEFAULT_USER_COL,
"itemCol": DEFAULT_ITEM_COL,
"ratingCol": DEFAULT_RATING_COL,
}
sar_params = {
"similarity_type": "jaccard",
"time_decay_coefficient": 30,
"time_now": None,
"timedecay_formula": True,
"col_user": DEFAULT_USER_COL,
"col_item": DEFAULT_ITEM_COL,
"col_rating": DEFAULT_RATING_COL,
"col_timestamp": DEFAULT_TIMESTAMP_COL,
}
svd_params = {
"n_factors": 150,
"n_epochs": EPOCHS,
"lr_all": 0.005,
"reg_all": 0.02,
"random_state": SEED,
"verbose": False
}
fastai_params = {
"n_factors": 40,
"y_range": [0,5.5],
"wd": 1e-1,
"max_lr": 5e-3,
"epochs": EPOCHS
}
ncf_params = {
"model_type": "NeuMF",
"n_factors": 4,
"layer_sizes": [16, 8, 4],
"n_epochs": EPOCHS,
"batch_size": 1024,
"learning_rate": 1e-3,
"verbose": 10
}
params = {
"als": als_params,
"sar": sar_params,
"svd": svd_params,
"fastai": fastai_params,
"ncf": ncf_params,
}
prepare_training_data = {
"als": prepare_training_als,
"svd": prepare_training_svd,
"fastai": prepare_training_fastai,
"ncf": prepare_training_ncf,
}
prepare_metrics_data = {
"als": lambda train, test: prepare_metrics_als(train, test),
"fastai": lambda train, test: prepare_metrics_fastai(train, test),
}
trainer = {
"als": lambda params, data: train_als(params, data),
"svd": lambda params, data: train_svd(params, data),
"sar": lambda params, data: train_sar(params, data),
"fastai": lambda params, data: train_fastai(params, data),
"ncf": lambda params, data: train_ncf(params, data),
}
rating_predictor = {
"als": lambda model, test: predict_als(model, test),
"svd": lambda model, test: predict_svd(model, test),
"fastai": lambda model, test: predict_fastai(model, test),
}
ranking_predictor = {
"als": lambda model, test, train: recommend_k_als(model, test, train),
"sar": lambda model, test, train: recommend_k_sar(model, test, train),
"svd": lambda model, test, train: recommend_k_svd(model, test, train),
"fastai": lambda model, test, train: recommend_k_fastai(model, test, train),
"ncf": lambda model, test, train: recommend_k_ncf(model, test, train),
}
rating_evaluator = {
"als": lambda test, predictions: rating_metrics_pyspark(test, predictions),
"svd": lambda test, predictions: rating_metrics_python(test, predictions),
"fastai": lambda test, predictions: rating_metrics_python(test, predictions)
}
ranking_evaluator = {
"als": lambda test, predictions, k: ranking_metrics_pyspark(test, predictions, k),
"sar": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
"svd": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
"fastai": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
"ncf": lambda test, predictions, k: ranking_metrics_python(test, predictions, k),
}
def generate_summary(data, algo, k, train_time, time_rating, rating_metrics, time_ranking, ranking_metrics):
summary = {"Data": data, "Algo": algo, "K": k, "Train time (s)": train_time, "Predicting time (s)": time_rating, "Recommending time (s)": time_ranking}
if rating_metrics is None:
rating_metrics = {
"RMSE": np.nan,
"MAE": np.nan,
"R2": np.nan,
"Explained Variance": np.nan,
}
if ranking_metrics is None:
ranking_metrics = {
"MAP": np.nan,
"nDCG@k": np.nan,
"Precision@k": np.nan,
"Recall@k": np.nan,
}
summary.update(rating_metrics)
summary.update(ranking_metrics)
return summary
data_sizes = ["100k", "1m"] # Movielens data size: 100k, 1m, 10m, or 20m
algorithms = ["als", "svd", "sar", "ncf", "fastai"]
%%time
# For each data size and each algorithm, a recommender is evaluated.
cols = ["Data", "Algo", "K", "Train time (s)", "Predicting time (s)", "RMSE", "MAE", "R2", "Explained Variance", "Recommending time (s)", "MAP", "nDCG@k", "Precision@k", "Recall@k"]
df_results = pd.DataFrame(columns=cols)
for data_size in data_sizes:
# Load the dataset
df = movielens.load_pandas_df(
size=data_size,
header=[DEFAULT_USER_COL, DEFAULT_ITEM_COL, DEFAULT_RATING_COL, DEFAULT_TIMESTAMP_COL]
)
print("Size of Movielens {}: {}".format(data_size, df.shape))
# Split the dataset
df_train, df_test = python_stratified_split(df,
ratio=0.75,
min_rating=1,
filter_by="item",
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL
)
# Loop through the algos
for algo in algorithms:
print("\nComputing {} algorithm on Movielens {}".format(algo, data_size))
# Data prep for training set
train = prepare_training_data.get(algo, lambda x:x)(df_train)
# Get model parameters
model_params = params[algo]
# Train the model
model, time_train = trainer[algo](model_params, train)
# Predict and evaluate
train, test = prepare_metrics_data.get(algo, lambda x,y:(x,y))(df_train, df_test)
if "rating" in metrics[algo]:
# Predict for rating
preds, time_rating = rating_predictor[algo](model, test)
# Evaluate for rating
ratings = rating_evaluator[algo](test, preds)
else:
ratings = None
time_rating = np.nan
if "ranking" in metrics[algo]:
# Predict for ranking
top_k_scores, time_ranking = ranking_predictor[algo](model, test, train)
# Evaluate for rating
rankings = ranking_evaluator[algo](test, top_k_scores, DEFAULT_K)
else:
rankings = None
time_ranking = np.nan
# Record results
summary = generate_summary(data_size, algo, DEFAULT_K, time_train, time_rating, ratings, time_ranking, rankings)
df_results.loc[df_results.shape[0] + 1] = summary
df_results
| 0.429908 | 0.927396 |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/ImageCollection/filtering_by_calendar_range.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/ImageCollection/filtering_by_calendar_range.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=ImageCollection/filtering_by_calendar_range.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/ImageCollection/filtering_by_calendar_range.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell. Uncomment these lines if you are running this notebook for the first time.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for the first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
roi = ee.Geometry.Point([-99.2182, 46.7824])
# find images acquired during June and July
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterBounds(roi) \
.filter(ee.Filter.calendarRange(6, 7, 'month')) \
.sort('DATE_ACQUIRED')
print(collection.size().getInfo())
first = collection.first()
propertyNames = first.propertyNames()
print(propertyNames.getInfo())
time_start = ee.Date(first.get('system:time_start')).format("YYYY-MM-dd")
print(time_start.getInfo())
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
|
github_jupyter
|
# %%capture
# !pip install earthengine-api
# !pip install geehydro
import ee
import folium
import geehydro
# ee.Authenticate()
ee.Initialize()
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
roi = ee.Geometry.Point([-99.2182, 46.7824])
# find images acquired during June and July
collection = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \
.filterBounds(roi) \
.filter(ee.Filter.calendarRange(6, 7, 'month')) \
.sort('DATE_ACQUIRED')
print(collection.size().getInfo())
first = collection.first()
propertyNames = first.propertyNames()
print(propertyNames.getInfo())
time_start = ee.Date(first.get('system:time_start')).format("YYYY-MM-dd")
print(time_start.getInfo())
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
| 0.327668 | 0.947284 |
```
from lecture import *
```
# Introduction to programming in Python
### [Gerard Gorman](http://www.imperial.ac.uk/people/g.gorman)
# Lecture 6: Reading files
Learning objectives: You will learn how to:
* Parse strings to extract specific data of interest.
* Use dictionaries to index data using any type of key.
## Reading data from a plain text file
We can read text from a [text file](http://en.wikipedia.org/wiki/Text_file) into strings in a program. This is a common (and simple) way for a program to get input data. The basic recipe is:
```python
# Open text file
infile = open("myfile.dat", "r")
# Read next line:
line = infile.readline()
# Read the lines in a loop one by one:
for line in infile:
<process line>
# Load all lines into a list of strings:
lines = infile.readlines()
for line in lines:
<process line>```
Let's look at the file [./data/data1.txt](./data/data1.txt) (all of the data files in this lecture are stored in the sub-folder *data/* of this notebook library). The files has a column of numbers:
```
21.8
18.1
19
23
26
17.8```
The goal is to read this file and calculate the mean:
```
# Open data file
infile = open("data/data1.txt", "r")
# Initialise values
mean = 0
n=0
# Loop to perform sum
for number in infile:
number = float(number)
mean = mean + number
n += 1
# It is good practice to close a file when you are finished.
infile.close()
# Calculate the mean.
mean = mean/n
print(mean)
```
Let's make this example more interesting. There is a **lot** of data out there for you to discover all kinds of interesting facts - you just need to be interested in learning a little analysis. For this case I have downloaded tidal gauge data for the port of Avonmouth from the [BODC](http://www.bodc.ac.uk/). Take some time now to open the file and have a look through it - [data/2012AVO.txt](data/2012AVO.txt) you will see the [metadata](http://en.wikipedia.org/wiki/Metadata):
```
Port: P060
Site: Avonmouth
Latitude: 51.51089
Longitude: -2.71497
Start Date: 01JAN2012-00.00.00
End Date: 30APR2012-23.45.00
Contributor: National Oceanography Centre, Liverpool
Datum information: The data refer to Admiralty Chart Datum (ACD)
Parameter code: ASLVTD02 = Surface elevation (unspecified datum) of the water body by fixed in-situ pressure sensor```
Let's read the column ASLVTD02 (the surface elevation) and plot it:
```
import matplotlib.pyplot as plt
import pendulum
import numpy as np
tide_file = open("data/2012AVO.txt", "r")
# Initialise an empty list to store the elevation
elevation = []
time = []
for line in tide_file:
# Here we use a try/except block to try to read the data and
# raise an exception if we fail to parse the data in a line
# for some reason. This is a neat trick to skip over all the
# header information.
try:
# Split this line into words.
words = line.split()
# If we do not have 5 words then the line must be part of the header.
if len(words)!=5:
raise ValueError
# The elevation data is on the 4th column. However, the BODC
# appends a "M" when a value is improbable and an "N" when
# data is missing (maybe a ship dumped into it during rough weather!)
# As we are in a try/except block, an error will be raised
# in the float conversion when this situation arises.
level = float(words[3])
elevation.append(level)
# Form a single string with the date and time.
date_time = ' '.join(words[1:3])
# Dealing with dates and time is a major pain as there are
# several different formats. Luckily there are lots of people
# out there writting libraries that are making your life easier.
# At the moment the Python library *pendulum* seems to be the
# best out there for parsing various different date and time
# formats and is pretty easy to use.
date_time = pendulum.parse(date_time)
# So that we can plot this we are going to convert this date
# and time into a POSIX timestamp (aka UNIX Epoch time):
# https://en.wikipedia.org/wiki/Unix_time
time.append(date_time.timestamp())
except:
pass
# For plotting lets convert the list to a NumPy array.
elevation = np.array(elevation)
time = np.array(time)
plt.plot(time, elevation)
plt.xlabel("timestamp")
plt.ylabel("Elevation (meters)")
plt.show()
```
Quiz time:
* What tidal constituents can you identify by looking at this plot?
* Is this primarily a diurnal or semi-diurnal tidal region? (hint - change the x-axis range on the plot above).
You will notice in the above example that we used the *split()* string member function. This is a very useful function for grabbing individual words on a line. When called without any arguments it assumes that the [delimiter](http://en.wikipedia.org/wiki/Delimiter) is a blank space. However, you can use this to split a string with any delimiter, *e.g.*, *line.split(';')*, *line.split(':')*.
## <span style="color:blue">Exercise 6.1: Read a two-column data file</span>
The file [data/xy.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/xy.dat) contains two columns of numbers, corresponding to *x* and *y* coordinates on a curve. The start of the file looks like this:
-1.0000 -0.0000</br>
-0.9933 -0.0087</br>
-0.9867 -0.0179</br>
-0.9800 -0.0274</br>
-0.9733 -0.0374</br>
Make a program that reads the first column into a list `xlist_61` and the second column into a list `ylist_61`. Then convert the lists to arrays named `xarray_61` and `yarray_61`, and plot the curve. Store the maximum and minimum y coordinates in two variables named `ymin_61` and `ymax_61`. (Hint: Read the file line by line, split each line into words, convert to float, and append to `xlist_61` and `ylist_61`.)</br>
```
grade = ok.grade('question-6_1')
```
## <span style="color:blue">Exercise 6.2: Read a data file</span>
The files [data/density_water.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/density_water.dat) and [data/density_air.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/density_air.dat) contain data about the density of water and air (respectively) for different temperatures. The data files have some comment lines starting with # and some lines are blank. The rest of the lines contain density data: the temperature in the first column and the corresponding density in the second column. The goal of this exercise is to read the data in such a file, discard commented or blank lines, and plot the density versus the temperature as distinct (small) circles for each data point. Write a function `readTempDenFile` that takes a filename as argument and returns two lists containing respectively the temperature and the density. Call this function on both files, and store the temperature and density in lists called `temp_air_list`, `dens_air_list`, `temp_water_list` and `dens_water_list`.
```
grade = ok.grade("question-6_2")
```
## <span style="color:blue">Exercise 6.3: Read acceleration data and find velocities</span>
A file [data/acc.dat](./data/acc.dat) contains measurements $a_0, a_1, \ldots, a_{n-1}$ of the acceleration of an object moving along a straight line. The measurement $a_k$ is taken at time point $t_k = k\Delta t$, where $\Delta t$ is the time spacing between the measurements. The purpose of the exercise is to load the acceleration data into a program and compute the velocity $v(t)$ of the object at some time $t$.
In general, the acceleration $a(t)$ is related to the velocity $v(t)$ through $v^\prime(t) = a(t)$. This means that
$$
v(t) = v(0) + \int_0^t{a(\tau)d\tau}
$$
If $a(t)$ is only known at some discrete, equally spaced points in time, $a_0, \ldots, a_{n-1}$ (which is the case in this exercise), we must compute the integral above numerically, for example by the Trapezoidal rule:
$$
v(t_k) \approx v(0) + \Delta t \left(\frac{1}{2}a_0 + \frac{1}{2}a_k + \sum_{i=1}^{k-1}a_i \right), \ \ 1 \leq k \leq n-1.
$$
We assume $v(0) = 0$ so that also $v_0 = 0$.
Read the values $a_0, \ldots, a_{n-1}$ from file into an array `acc_array_63` and plot the acceleration versus time for $\Delta_t = 0.5$. The time should be stored in an array named `time_array_63`.
Then write a function `compute_velocity(dt, k, a)` that takes as arguments a time interval $\Delta_t$ `dt`, an index `k` and a list of accelerations `a`, uses the Trapezoidal rule to compute one $v(t_k)$ value and return this value. Experiment with different values of $\Delta t$ and $k$.
```
grade = ok.grade('question-6_3')
```
## File writing
Writing a file in Python is simple. You just collect the text you want to write in one or more strings and, for each string, use a statement along the lines of
```python
outfile.write(string)```
The write function does not add a newline character so you may have to do that explicitly:
```python
outfile.write(string + ’\n’)```
That’s it! Compose the strings and write! Let's do an example. Write a nested list (table) to a file:
```
# Let's define some table of data
data = [[ 0.75, 0.29619813, -0.29619813, -0.75 ],
[ 0.29619813, 0.11697778, -0.11697778, -0.29619813],
[-0.29619813, -0.11697778, 0.11697778, 0.29619813],
[-0.75, -0.29619813, 0.29619813, 0.75 ]]
# Open the file for writing. Notice the "w" indicates we are writing!
outfile = open("tmp_table.dat", "w")
for row in data:
for column in row:
outfile.write("%14.8f" % column)
outfile.write("\n") # ensure newline
outfile.close()
```
And that's it - run the above cell and take a look at the file that was generated in your Azure library clone.
## Exercise 6.4: Write function data to a file
We want to dump $x$ and $f(x)$ values to a file named function_data.dat, where the $x$ values appear in the first column and the $f(x)$ values appear in the second. Choose $n$ equally spaced $x$ values in the interval [-4, 4]. Here, the function $f(x)$ is given by:
$f(x) = \frac{1}{\sqrt{2\pi}}\exp(-0.5x^2)$
```
ok.grade('question-6_4')
ok.score()
```
|
github_jupyter
|
from lecture import *
# Open text file
infile = open("myfile.dat", "r")
# Read next line:
line = infile.readline()
# Read the lines in a loop one by one:
for line in infile:
<process line>
# Load all lines into a list of strings:
lines = infile.readlines()
for line in lines:
<process line>```
Let's look at the file [./data/data1.txt](./data/data1.txt) (all of the data files in this lecture are stored in the sub-folder *data/* of this notebook library). The files has a column of numbers:
The goal is to read this file and calculate the mean:
Let's make this example more interesting. There is a **lot** of data out there for you to discover all kinds of interesting facts - you just need to be interested in learning a little analysis. For this case I have downloaded tidal gauge data for the port of Avonmouth from the [BODC](http://www.bodc.ac.uk/). Take some time now to open the file and have a look through it - [data/2012AVO.txt](data/2012AVO.txt) you will see the [metadata](http://en.wikipedia.org/wiki/Metadata):
Let's read the column ASLVTD02 (the surface elevation) and plot it:
Quiz time:
* What tidal constituents can you identify by looking at this plot?
* Is this primarily a diurnal or semi-diurnal tidal region? (hint - change the x-axis range on the plot above).
You will notice in the above example that we used the *split()* string member function. This is a very useful function for grabbing individual words on a line. When called without any arguments it assumes that the [delimiter](http://en.wikipedia.org/wiki/Delimiter) is a blank space. However, you can use this to split a string with any delimiter, *e.g.*, *line.split(';')*, *line.split(':')*.
## <span style="color:blue">Exercise 6.1: Read a two-column data file</span>
The file [data/xy.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/xy.dat) contains two columns of numbers, corresponding to *x* and *y* coordinates on a curve. The start of the file looks like this:
-1.0000 -0.0000</br>
-0.9933 -0.0087</br>
-0.9867 -0.0179</br>
-0.9800 -0.0274</br>
-0.9733 -0.0374</br>
Make a program that reads the first column into a list `xlist_61` and the second column into a list `ylist_61`. Then convert the lists to arrays named `xarray_61` and `yarray_61`, and plot the curve. Store the maximum and minimum y coordinates in two variables named `ymin_61` and `ymax_61`. (Hint: Read the file line by line, split each line into words, convert to float, and append to `xlist_61` and `ylist_61`.)</br>
## <span style="color:blue">Exercise 6.2: Read a data file</span>
The files [data/density_water.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/density_water.dat) and [data/density_air.dat](https://raw.githubusercontent.com/ggorman/Introduction-to-programming-for-geoscientists/master/notebook/data/density_air.dat) contain data about the density of water and air (respectively) for different temperatures. The data files have some comment lines starting with # and some lines are blank. The rest of the lines contain density data: the temperature in the first column and the corresponding density in the second column. The goal of this exercise is to read the data in such a file, discard commented or blank lines, and plot the density versus the temperature as distinct (small) circles for each data point. Write a function `readTempDenFile` that takes a filename as argument and returns two lists containing respectively the temperature and the density. Call this function on both files, and store the temperature and density in lists called `temp_air_list`, `dens_air_list`, `temp_water_list` and `dens_water_list`.
## <span style="color:blue">Exercise 6.3: Read acceleration data and find velocities</span>
A file [data/acc.dat](./data/acc.dat) contains measurements $a_0, a_1, \ldots, a_{n-1}$ of the acceleration of an object moving along a straight line. The measurement $a_k$ is taken at time point $t_k = k\Delta t$, where $\Delta t$ is the time spacing between the measurements. The purpose of the exercise is to load the acceleration data into a program and compute the velocity $v(t)$ of the object at some time $t$.
In general, the acceleration $a(t)$ is related to the velocity $v(t)$ through $v^\prime(t) = a(t)$. This means that
$$
v(t) = v(0) + \int_0^t{a(\tau)d\tau}
$$
If $a(t)$ is only known at some discrete, equally spaced points in time, $a_0, \ldots, a_{n-1}$ (which is the case in this exercise), we must compute the integral above numerically, for example by the Trapezoidal rule:
$$
v(t_k) \approx v(0) + \Delta t \left(\frac{1}{2}a_0 + \frac{1}{2}a_k + \sum_{i=1}^{k-1}a_i \right), \ \ 1 \leq k \leq n-1.
$$
We assume $v(0) = 0$ so that also $v_0 = 0$.
Read the values $a_0, \ldots, a_{n-1}$ from file into an array `acc_array_63` and plot the acceleration versus time for $\Delta_t = 0.5$. The time should be stored in an array named `time_array_63`.
Then write a function `compute_velocity(dt, k, a)` that takes as arguments a time interval $\Delta_t$ `dt`, an index `k` and a list of accelerations `a`, uses the Trapezoidal rule to compute one $v(t_k)$ value and return this value. Experiment with different values of $\Delta t$ and $k$.
## File writing
Writing a file in Python is simple. You just collect the text you want to write in one or more strings and, for each string, use a statement along the lines of
The write function does not add a newline character so you may have to do that explicitly:
That’s it! Compose the strings and write! Let's do an example. Write a nested list (table) to a file:
And that's it - run the above cell and take a look at the file that was generated in your Azure library clone.
## Exercise 6.4: Write function data to a file
We want to dump $x$ and $f(x)$ values to a file named function_data.dat, where the $x$ values appear in the first column and the $f(x)$ values appear in the second. Choose $n$ equally spaced $x$ values in the interval [-4, 4]. Here, the function $f(x)$ is given by:
$f(x) = \frac{1}{\sqrt{2\pi}}\exp(-0.5x^2)$
| 0.83545 | 0.973266 |
## Generate more image training data using data augmentation
"Data augmentation" means you create more virtual training data from existing image data.
you can leverage keras' image data generation
### We will use a picture of a dog as an exmple
Because tensorflow can be configured with either channels_last (the default) or channels_first (if using NVidia card), I am adding routines to handle the two situation
```
import numpy as np
from tensorflow import keras
from matplotlib import pyplot as plt
from keras.preprocessing.image import ImageDataGenerator,img_to_array, load_img
# print some info
print('backend:', keras.backend.backend() ,', version:', keras.__version__, ', image_data_format:' , keras.backend.image_data_format())
is_channels_first = (keras.backend.image_data_format() == 'channels_first')
```
#### load the source image
```
# this is the name of the dog file
dog_file_name = 'dog300.png'
# load the image
img = load_img(dog_file_name)
print('the image:', dog_file_name, ', image type:', img)
# we need to change the image to numpy array for processing and normalize the value (0.0 to 1.0)
ar = (img_to_array(img).astype(float)) / 255.0
#show the shape
print('converted the image to np array of shape:', ar.shape)
# plt format must be x,y,channel
if is_channels_first:
mar = np.rollaxis(ar,0,3)
else:
mar = ar
print('the array to be displayed in plt has the shape:', mar.shape)
```
#### show the source image in matplotlib
```
# display the image in matplotlib
%matplotlib inline
plt.figure()
plt.xticks([])
plt.yticks([])
plt.imshow(mar)
plt.show()
plt.close()
```
#### define how you will "transform" the images
```
dog_datagen = ImageDataGenerator(
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.3,
horizontal_flip=True,
fill_mode='nearest')
# must convert the source a 4 tuple
ar4 = ar.reshape((1,) + ar.shape)
print('the shape of source image in 4-tuple:', ar4.shape)
```
#### let's generate 10 transformed images from the source
```
# let's generate 10 modified images
nimg = 10
images = []
i = 0
for generated_image in dog_datagen.flow(ar4 ):
# drop the 1 in front so we have image that can be shown in matplotlib
if is_channels_first:
new_image = generated_image.reshape( 3,300,300)
new_image = np.rollaxis(new_image,0,3)
else:
new_image = generated_image.reshape( 300,300,3)
images.append(new_image)
i +=1
if( i >= nimg):
break
#plot the images
fig = plt.figure( figsize = (11,2) ,dpi=96)
for i in range(nimg):
plt.subplot(1,10,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i])
plt.show()
plt.close()
```
|
github_jupyter
|
import numpy as np
from tensorflow import keras
from matplotlib import pyplot as plt
from keras.preprocessing.image import ImageDataGenerator,img_to_array, load_img
# print some info
print('backend:', keras.backend.backend() ,', version:', keras.__version__, ', image_data_format:' , keras.backend.image_data_format())
is_channels_first = (keras.backend.image_data_format() == 'channels_first')
# this is the name of the dog file
dog_file_name = 'dog300.png'
# load the image
img = load_img(dog_file_name)
print('the image:', dog_file_name, ', image type:', img)
# we need to change the image to numpy array for processing and normalize the value (0.0 to 1.0)
ar = (img_to_array(img).astype(float)) / 255.0
#show the shape
print('converted the image to np array of shape:', ar.shape)
# plt format must be x,y,channel
if is_channels_first:
mar = np.rollaxis(ar,0,3)
else:
mar = ar
print('the array to be displayed in plt has the shape:', mar.shape)
# display the image in matplotlib
%matplotlib inline
plt.figure()
plt.xticks([])
plt.yticks([])
plt.imshow(mar)
plt.show()
plt.close()
dog_datagen = ImageDataGenerator(
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.3,
horizontal_flip=True,
fill_mode='nearest')
# must convert the source a 4 tuple
ar4 = ar.reshape((1,) + ar.shape)
print('the shape of source image in 4-tuple:', ar4.shape)
# let's generate 10 modified images
nimg = 10
images = []
i = 0
for generated_image in dog_datagen.flow(ar4 ):
# drop the 1 in front so we have image that can be shown in matplotlib
if is_channels_first:
new_image = generated_image.reshape( 3,300,300)
new_image = np.rollaxis(new_image,0,3)
else:
new_image = generated_image.reshape( 300,300,3)
images.append(new_image)
i +=1
if( i >= nimg):
break
#plot the images
fig = plt.figure( figsize = (11,2) ,dpi=96)
for i in range(nimg):
plt.subplot(1,10,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[i])
plt.show()
plt.close()
| 0.318379 | 0.958148 |
## 论文引用网络中的节点分类任务
在这一教程中,我们将展示 GraphScope 如何结合图分析、图查询和图学习的能力,处理论文引用网络中的节点分类任务。
在这个例子中我们使用的是 [ogbn-mag](https://ogb.stanford.edu/docs/nodeprop/#ogbn-mag) 数据集。"ogbn" 是由微软学术关系图(Microsoft Academic Graph)的子集组成的异构图网络。该图中包含4种类型的实体(即论文、作者、机构和研究领域),以及连接两个实体的四种类型的有向关系边。
我们需要处理的任务是,给出异构的 ogbn-mag 数据,在该图上预测每篇论文的类别。这是一个节点分类任务,该任务可以归类在各个领域、各个方向或研究小组的论文,通过对论文属性和引用图上的结构信息对论文进行分类。在该数据中,每个论文节点包含了一个从论文标题、摘要抽取的 128 维 word2vec 向量作为表征,该表征是经过预训练提前获取的;而结构信息是在计算过程中即时计算的。
这一教程将会分为以下几个步骤:
- 通过gremlin交互式查询图;
- 执行图算法做图分析;
- 执行基于图数据的机器学习任务;
```
# Install graphscope package if you are NOT in the Playground
!pip3 install graphscope
# Import the graphscope module
import graphscope
graphscope.set_option(show_log=True) # enable logging
# Load the obgn_mag dataset as a graph
from graphscope.dataset import load_ogbn_mag
graph = load_ogbn_mag()
```
## Interactive query with gremlin
在此示例中,我们启动了一个交互查询引擎,然后使用图遍历来查看两位给定作者共同撰写的论文数量。为了简化查询,我们假设作者可以分别由ID 2 和 4307 唯一标识。
```
# Get the entrypoint for submitting Gremlin queries on graph g.
interactive = graphscope.gremlin(graph)
# Count the number of papers two authors (with id 2 and 4307) have co-authored.
papers = interactive.execute(
"g.V().has('author', 'id', 2).out('writes').where(__.in('writes').has('id', 4307)).count()").one()
print("result", papers)
```
## Graph analytics with analytical engine
继续我们的示例,下面我们在图数据中进行图分析来生成节点结构特征。我们首先通过在特定周期内从全图中提取论文(使用Gremlin!)来导出一个子图,然后运行 k-core 分解和三角形计数以生成每个论文节点的结构特征。
```
# Exact a subgraph of publication within a time range.
sub_graph = interactive.subgraph(
"g.V().has('year', inside(2014, 2020)).outE('cites')"
)
# Project the subgraph to simple graph by selecting papers and their citations.
simple_g = sub_graph.project(vertices={"paper": []}, edges={"cites": []})
# compute the kcore and triangle-counting.
kc_result = graphscope.k_core(simple_g, k=5)
tc_result = graphscope.triangles(simple_g)
# Add the results as new columns to the citation graph.
sub_graph = sub_graph.add_column(kc_result, {"kcore": "r"})
sub_graph = sub_graph.add_column(tc_result, {"tc": "r"})
```
## Graph neural networks (GNNs)
接着我们利用生成的结构特征和原有特征通过 GraphScope 的学习引擎来训练一个学习模型。
在本例中,我们训练了 GCN 模型,将节点(论文)分类为349个类别,每个类别代表一个出处(例如预印本和会议)。
```
# Define the features for learning,
# we chose original 128-dimension feature and k-core, triangle count result as new features.
paper_features = []
for i in range(128):
paper_features.append("feat_" + str(i))
paper_features.append("kcore")
paper_features.append("tc")
# Launch a learning engine. here we split the dataset, 75% as train, 10% as validation and 15% as test.
lg = graphscope.graphlearn(sub_graph, nodes=[("paper", paper_features)],
edges=[("paper", "cites", "paper")],
gen_labels=[
("train", "paper", 100, (0, 75)),
("val", "paper", 100, (75, 85)),
("test", "paper", 100, (85, 100))
])
# Then we define the training process, use internal GCN model.
from graphscope.learning.examples import GCN
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
def train(config, graph):
def model_fn():
return GCN(graph,
config["class_num"],
config["features_num"],
config["batch_size"],
val_batch_size=config["val_batch_size"],
test_batch_size=config["test_batch_size"],
categorical_attrs_desc=config["categorical_attrs_desc"],
hidden_dim=config["hidden_dim"],
in_drop_rate=config["in_drop_rate"],
neighs_num=config["neighs_num"],
hops_num=config["hops_num"],
node_type=config["node_type"],
edge_type=config["edge_type"],
full_graph_mode=config["full_graph_mode"])
trainer = LocalTFTrainer(model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"],
config["learning_rate"],
config["weight_decay"]))
trainer.train_and_evaluate()
# hyperparameters config.
config = {"class_num": 349, # output dimension
"features_num": 130, # 128 dimension + kcore + triangle count
"batch_size": 500,
"val_batch_size": 100,
"test_batch_size":100,
"categorical_attrs_desc": "",
"hidden_dim": 256,
"in_drop_rate": 0.5,
"hops_num": 2,
"neighs_num": [5, 10],
"full_graph_mode": False,
"agg_type": "gcn", # mean, sum
"learning_algo": "adam",
"learning_rate": 0.01,
"weight_decay": 0.0005,
"epoch": 5,
"node_type": "paper",
"edge_type": "cites"}
# Start traning and evaluating
train(config, lg)
```
|
github_jupyter
|
# Install graphscope package if you are NOT in the Playground
!pip3 install graphscope
# Import the graphscope module
import graphscope
graphscope.set_option(show_log=True) # enable logging
# Load the obgn_mag dataset as a graph
from graphscope.dataset import load_ogbn_mag
graph = load_ogbn_mag()
# Get the entrypoint for submitting Gremlin queries on graph g.
interactive = graphscope.gremlin(graph)
# Count the number of papers two authors (with id 2 and 4307) have co-authored.
papers = interactive.execute(
"g.V().has('author', 'id', 2).out('writes').where(__.in('writes').has('id', 4307)).count()").one()
print("result", papers)
# Exact a subgraph of publication within a time range.
sub_graph = interactive.subgraph(
"g.V().has('year', inside(2014, 2020)).outE('cites')"
)
# Project the subgraph to simple graph by selecting papers and their citations.
simple_g = sub_graph.project(vertices={"paper": []}, edges={"cites": []})
# compute the kcore and triangle-counting.
kc_result = graphscope.k_core(simple_g, k=5)
tc_result = graphscope.triangles(simple_g)
# Add the results as new columns to the citation graph.
sub_graph = sub_graph.add_column(kc_result, {"kcore": "r"})
sub_graph = sub_graph.add_column(tc_result, {"tc": "r"})
# Define the features for learning,
# we chose original 128-dimension feature and k-core, triangle count result as new features.
paper_features = []
for i in range(128):
paper_features.append("feat_" + str(i))
paper_features.append("kcore")
paper_features.append("tc")
# Launch a learning engine. here we split the dataset, 75% as train, 10% as validation and 15% as test.
lg = graphscope.graphlearn(sub_graph, nodes=[("paper", paper_features)],
edges=[("paper", "cites", "paper")],
gen_labels=[
("train", "paper", 100, (0, 75)),
("val", "paper", 100, (75, 85)),
("test", "paper", 100, (85, 100))
])
# Then we define the training process, use internal GCN model.
from graphscope.learning.examples import GCN
from graphscope.learning.graphlearn.python.model.tf.trainer import LocalTFTrainer
from graphscope.learning.graphlearn.python.model.tf.optimizer import get_tf_optimizer
def train(config, graph):
def model_fn():
return GCN(graph,
config["class_num"],
config["features_num"],
config["batch_size"],
val_batch_size=config["val_batch_size"],
test_batch_size=config["test_batch_size"],
categorical_attrs_desc=config["categorical_attrs_desc"],
hidden_dim=config["hidden_dim"],
in_drop_rate=config["in_drop_rate"],
neighs_num=config["neighs_num"],
hops_num=config["hops_num"],
node_type=config["node_type"],
edge_type=config["edge_type"],
full_graph_mode=config["full_graph_mode"])
trainer = LocalTFTrainer(model_fn,
epoch=config["epoch"],
optimizer=get_tf_optimizer(
config["learning_algo"],
config["learning_rate"],
config["weight_decay"]))
trainer.train_and_evaluate()
# hyperparameters config.
config = {"class_num": 349, # output dimension
"features_num": 130, # 128 dimension + kcore + triangle count
"batch_size": 500,
"val_batch_size": 100,
"test_batch_size":100,
"categorical_attrs_desc": "",
"hidden_dim": 256,
"in_drop_rate": 0.5,
"hops_num": 2,
"neighs_num": [5, 10],
"full_graph_mode": False,
"agg_type": "gcn", # mean, sum
"learning_algo": "adam",
"learning_rate": 0.01,
"weight_decay": 0.0005,
"epoch": 5,
"node_type": "paper",
"edge_type": "cites"}
# Start traning and evaluating
train(config, lg)
| 0.791902 | 0.907681 |
```
import pandas as pd, json, numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
cluster=json.loads(file('../json/cluster.json','r').read())
citysave=json.loads(file('../json/citysave.json','r').read())
import wolframalpha
app_id='RV7XH4-KUYXP36YXX'
#[email protected]
client = wolframalpha.Client(app_id)
unicities={}
for i in cluster:
if cluster[i] not in unicities:
unicities[cluster[i]]=citysave[i]['country']
G={}
error=[]
uk=unicities.keys()
for h in range(1800,2700):
c=uk[h]
if c not in G.keys()+error:
print h,
ys={"pop":0,"nearby":{}}
q='population of '+c+', '+unicities[c]
try:
res = client.query(q)
for i in range(len(res['pod'])):
if res['pod'][i]['@title']=="Result":
x=res['pod'][i]['subpod']['plaintext']
popul=x[:x.find('people')-1]
if 'mill' in popul:
popul=float(popul[:popul.find('mill')-1])*1000000.0
ys['pop']=int(popul)
if res['pod'][i]['@title']=="Nearby cities":
x=res['pod'][i]['subpod']['plaintext'].split('\n')
for y in x[:-1]:
people=y[y.rfind('|')+2:y.find('people')-1]
if 'mill' in people:
people=float(people[:people.find('mill')-1])*1000000.0
km=float(y[y.find('|')+2:y.find('km')-1])
ys['nearby'][y.split('|')[0].split(',')[0].strip()]={"km":km,"people":int(people)}
G[c]=ys
except: error.append(c)
file("../json/pop3.json",'w').write(json.dumps(G))
file("../json/pop3e.json",'w').write(json.dumps(error))
```
Postprocessing
```
G=json.loads(file('../json/pop3.json','r').read())
error=json.loads(file('../json/pop3e.json','r').read())
error2=[]
import unicodedata
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
only_ascii = nfkd_form.encode('ASCII', 'ignore')
return only_ascii
for c in error:
if c not in G.keys()+error2:
ys={"pop":0,"nearby":{}}
q=remove_accents(strip_accents('population of '+c.split('/')[0]+', '+unicities[c]))
res = client.query(q)
if 'pod' in res:
for i in range(len(res['pod'])):
if res['pod'][i]['@title']=="Result":
x=res['pod'][i]['subpod']['plaintext']
if 'available' not in x:
popul=x[:x.find('people')-1]
if 'mill' in popul:
popul=popul[:popul.find('mill')-1]
if '|' in popul:popul=popul.split('|')[1].strip()
ys['pop']=int(float(popul)*1000000.0)
if res['pod'][i]['@title']=="Nearby cities":
x=res['pod'][i]['subpod']['plaintext'].split('\n')
if 'available' not in x:
for y in x[:-1]:
people=y[y.rfind('|')+2:y.find('people')-1]
if 'mill' in people:
people=float(people[:people.find('mill')-1])*1000000.0
km=float(y[y.find('|')+2:y.find(' km ')])
ys['nearby'][y.split('|')[0].split(',')[0].strip()]={"km":km,"people":int(people)}
G[c]=ys
print 'success',c
else:
print 'error',c
error2.append(c)
file("../json/pop3b.json",'w').write(json.dumps(G))
file("../json/pop3eb.json",'w').write(json.dumps(error2))
print len(G),len(error),len(error2)
```
Re-reparse
```
G=json.loads(file('../json/pop3b.json','r').read())
error=json.loads(file('../json/pop3eb.json','r').read())
error3=[]
error4=[]
uk=unicities.keys()
for h in range(1800,2700):
c=uk[h]
if c not in G:
error3.append(c)
print len(error3)
for c in error3:
if c not in G.keys()+error4:
ys={"pop":0,"nearby":{}}
q=remove_accents(strip_accents('population of '+c.split('/')[0]+', '+unicities[c]))
res = client.query(q)
good=True
if 'pod' in res:
for i in range(len(res['pod'])):
try:
if res['pod'][i]['@title']=="Result":
x=res['pod'][i]['subpod']['plaintext']
if 'available' not in x:
popul=x[:x.find('people')-1]
if 'mill' in popul:
popul=popul[:popul.find('mill')-1]
if '|' in popul:popul=popul.split('|')[1].strip()
ys['pop']=int(float(popul)*1000000.0)
if res['pod'][i]['@title']=="Nearby cities":
x=res['pod'][i]['subpod']['plaintext'].split('\n')
if 'available' not in x:
for y in x[:-1]:
people=y[y.rfind('|')+2:y.find('people')-1]
if 'mill' in people:
people=float(people[:people.find('mill')-1])*1000000.0
km=float(y[y.find('|')+2:y.find(' km ')])
ys['nearby'][y.split('|')[0].split(',')[0].strip()]={"km":km,"people":int(people)}
G[c]=ys
print 'success',c
good=False
except: pass
if good:
print 'error',c
error4.append(c)
file("../json/pop3c.json",'w').write(json.dumps(G))
file("../json/pop3ec.json",'w').write(json.dumps(error4))
print len(G),len(error4)
```
|
github_jupyter
|
import pandas as pd, json, numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
cluster=json.loads(file('../json/cluster.json','r').read())
citysave=json.loads(file('../json/citysave.json','r').read())
import wolframalpha
app_id='RV7XH4-KUYXP36YXX'
#[email protected]
client = wolframalpha.Client(app_id)
unicities={}
for i in cluster:
if cluster[i] not in unicities:
unicities[cluster[i]]=citysave[i]['country']
G={}
error=[]
uk=unicities.keys()
for h in range(1800,2700):
c=uk[h]
if c not in G.keys()+error:
print h,
ys={"pop":0,"nearby":{}}
q='population of '+c+', '+unicities[c]
try:
res = client.query(q)
for i in range(len(res['pod'])):
if res['pod'][i]['@title']=="Result":
x=res['pod'][i]['subpod']['plaintext']
popul=x[:x.find('people')-1]
if 'mill' in popul:
popul=float(popul[:popul.find('mill')-1])*1000000.0
ys['pop']=int(popul)
if res['pod'][i]['@title']=="Nearby cities":
x=res['pod'][i]['subpod']['plaintext'].split('\n')
for y in x[:-1]:
people=y[y.rfind('|')+2:y.find('people')-1]
if 'mill' in people:
people=float(people[:people.find('mill')-1])*1000000.0
km=float(y[y.find('|')+2:y.find('km')-1])
ys['nearby'][y.split('|')[0].split(',')[0].strip()]={"km":km,"people":int(people)}
G[c]=ys
except: error.append(c)
file("../json/pop3.json",'w').write(json.dumps(G))
file("../json/pop3e.json",'w').write(json.dumps(error))
G=json.loads(file('../json/pop3.json','r').read())
error=json.loads(file('../json/pop3e.json','r').read())
error2=[]
import unicodedata
def strip_accents(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def remove_accents(input_str):
nfkd_form = unicodedata.normalize('NFKD', input_str)
only_ascii = nfkd_form.encode('ASCII', 'ignore')
return only_ascii
for c in error:
if c not in G.keys()+error2:
ys={"pop":0,"nearby":{}}
q=remove_accents(strip_accents('population of '+c.split('/')[0]+', '+unicities[c]))
res = client.query(q)
if 'pod' in res:
for i in range(len(res['pod'])):
if res['pod'][i]['@title']=="Result":
x=res['pod'][i]['subpod']['plaintext']
if 'available' not in x:
popul=x[:x.find('people')-1]
if 'mill' in popul:
popul=popul[:popul.find('mill')-1]
if '|' in popul:popul=popul.split('|')[1].strip()
ys['pop']=int(float(popul)*1000000.0)
if res['pod'][i]['@title']=="Nearby cities":
x=res['pod'][i]['subpod']['plaintext'].split('\n')
if 'available' not in x:
for y in x[:-1]:
people=y[y.rfind('|')+2:y.find('people')-1]
if 'mill' in people:
people=float(people[:people.find('mill')-1])*1000000.0
km=float(y[y.find('|')+2:y.find(' km ')])
ys['nearby'][y.split('|')[0].split(',')[0].strip()]={"km":km,"people":int(people)}
G[c]=ys
print 'success',c
else:
print 'error',c
error2.append(c)
file("../json/pop3b.json",'w').write(json.dumps(G))
file("../json/pop3eb.json",'w').write(json.dumps(error2))
print len(G),len(error),len(error2)
G=json.loads(file('../json/pop3b.json','r').read())
error=json.loads(file('../json/pop3eb.json','r').read())
error3=[]
error4=[]
uk=unicities.keys()
for h in range(1800,2700):
c=uk[h]
if c not in G:
error3.append(c)
print len(error3)
for c in error3:
if c not in G.keys()+error4:
ys={"pop":0,"nearby":{}}
q=remove_accents(strip_accents('population of '+c.split('/')[0]+', '+unicities[c]))
res = client.query(q)
good=True
if 'pod' in res:
for i in range(len(res['pod'])):
try:
if res['pod'][i]['@title']=="Result":
x=res['pod'][i]['subpod']['plaintext']
if 'available' not in x:
popul=x[:x.find('people')-1]
if 'mill' in popul:
popul=popul[:popul.find('mill')-1]
if '|' in popul:popul=popul.split('|')[1].strip()
ys['pop']=int(float(popul)*1000000.0)
if res['pod'][i]['@title']=="Nearby cities":
x=res['pod'][i]['subpod']['plaintext'].split('\n')
if 'available' not in x:
for y in x[:-1]:
people=y[y.rfind('|')+2:y.find('people')-1]
if 'mill' in people:
people=float(people[:people.find('mill')-1])*1000000.0
km=float(y[y.find('|')+2:y.find(' km ')])
ys['nearby'][y.split('|')[0].split(',')[0].strip()]={"km":km,"people":int(people)}
G[c]=ys
print 'success',c
good=False
except: pass
if good:
print 'error',c
error4.append(c)
file("../json/pop3c.json",'w').write(json.dumps(G))
file("../json/pop3ec.json",'w').write(json.dumps(error4))
print len(G),len(error4)
| 0.04788 | 0.30832 |
## Step 0: Latent Dirichlet Allocation ##
LDA is used to classify text in a document to a particular topic. It builds a topic per document model and words per topic model, modeled as Dirichlet distributions.
* Each document is modeled as a multinomial distribution of topics and each topic is modeled as a multinomial distribution of words.
* LDA assumes that the every chunk of text we feed into it will contain words that are somehow related. Therefore choosing the right corpus of data is crucial.
* It also assumes documents are produced from a mixture of topics. Those topics then generate words based on their probability distribution.
## Step 1: Load the dataset
The dataset we'll use is a list of over one million news headlines published over a period of 15 years. We'll start by loading it from the `abcnews-date-text.csv` file.
```
'''
Load the dataset from the CSV and save it to 'data_text'
'''
import pandas as pd
data = pd.read_csv('abcnews-date-text.csv', error_bad_lines=False);
# We only need the Headlines text column from the data
data_text = data[:300000][['headline_text']];
data_text['index'] = data_text.index
documents = data_text
```
Let's glance at the dataset:
```
'''
Get the total number of documents
'''
print(len(documents))
documents[:5]
```
## Step 2: Data Preprocessing ##
We will perform the following steps:
* **Tokenization**: Split the text into sentences and the sentences into words. Lowercase the words and remove punctuation.
* Words that have fewer than 3 characters are removed.
* All **stopwords** are removed.
* Words are **lemmatized** - words in third person are changed to first person and verbs in past and future tenses are changed into present.
* Words are **stemmed** - words are reduced to their root form.
```
'''
Loading Gensim and nltk libraries
'''
# pip install gensim
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
np.random.seed(400)
import nltk
nltk.download('wordnet')
```
### Lemmatizer Example
Before preprocessing our dataset, let's first look at an lemmatizing example. What would be the output if we lemmatized the word 'went':
```
print(WordNetLemmatizer().lemmatize('went', pos = 'v')) # past tense to present tense
```
### Stemmer Example
Let's also look at a stemming example. Let's throw a number of words at the stemmer and see how it deals with each one:
```
stemmer = SnowballStemmer("english")
original_words = ['caresses', 'flies', 'dies', 'mules', 'denied','died', 'agreed', 'owned',
'humbled', 'sized','meeting', 'stating', 'siezing', 'itemization','sensational',
'traditional', 'reference', 'colonizer','plotted']
singles = [stemmer.stem(plural) for plural in original_words]
pd.DataFrame(data={'original word':original_words, 'stemmed':singles })
'''
Write a function to perform the pre processing steps on the entire dataset
'''
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
# Tokenize and lemmatize
def preprocess(text):
result=[]
for token in gensim.utils.simple_preprocess(text) :
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
# TODO: Apply lemmatize_stemming() on the token, then add to the results list
result = [ lemmatize_stemming(word) for word in gensim.utils.simple_preprocess(text, min_len=4) ]
return result
'''
Preview a document after preprocessing
'''
document_num = 4310
doc_sample = documents[documents['index'] == document_num].values[0][0]
print("Original document: ")
words = []
for word in doc_sample.split(' '):
words.append(word)
print(words)
print("\n\nTokenized and lemmatized document: ")
print(preprocess(doc_sample))
documents[:5]
```
Let's now preprocess all the news headlines we have. To do that, let's use the [map](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.map.html) function from pandas to apply `preprocess()` to the `headline_text` column
**Note**: This may take a few minutes (it take 6 minutes on my laptop)
```
# TODO: preprocess all the headlines, saving the list of results as 'processed_docs'
processed_docs = documents['headline_text'].map(preprocess)
'''
Preview 'processed_docs'
'''
processed_docs[:10]
```
## Step 3.1: Bag of words on the dataset
Now let's create a dictionary from 'processed_docs' containing the number of times a word appears in the training set. To do that, let's pass `processed_docs` to [`gensim.corpora.Dictionary()`](https://radimrehurek.com/gensim/corpora/dictionary.html) and call it '`dictionary`'.
```
'''
Create a dictionary from 'processed_docs' containing the number of times a word appears
in the training set using gensim.corpora.Dictionary and call it 'dictionary'
'''
dictionary = gensim.corpora.Dictionary(processed_docs)
'''
Checking dictionary created
'''
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
```
** Gensim filter_extremes **
[`filter_extremes(no_below=5, no_above=0.5, keep_n=100000)`](https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.filter_extremes)
Filter out tokens that appear in
* less than no_below documents (absolute number) or
* more than no_above documents (fraction of total corpus size, not absolute number).
* after (1) and (2), keep only the first keep_n most frequent tokens (or keep all if None).
```
'''
OPTIONAL STEP
Remove very rare and very common words:
- words appearing less than 15 times
- words appearing in more than 10% of all documents
'''
# TODO: apply dictionary.filter_extremes() with the parameters mentioned above
dictionary.filter_extremes(no_below=15, no_above=0.1, keep_n=100000)
```
** Gensim doc2bow **
[`doc2bow(document)`](https://radimrehurek.com/gensim/corpora/dictionary.html#gensim.corpora.dictionary.Dictionary.doc2bow)
* Convert document (a list of words) into the bag-of-words format = list of (token_id, token_count) 2-tuples. Each word is assumed to be a tokenized and normalized string (either unicode or utf8-encoded). No further preprocessing is done on the words in document; apply tokenization, stemming etc. before calling this method.
```
'''
Create the Bag-of-words model for each document i.e for each document we create a dictionary reporting how many
words and how many times those words appear. Save this to 'bow_corpus'
'''
# TODO
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
'''
Checking Bag of Words corpus for our sample document --> (token_id, token_count)
'''
bow_corpus[document_num]
'''
Preview BOW for our sample preprocessed document
'''
# Here document_num is document number 4310 which we have checked in Step 2
bow_doc_4310 = bow_corpus[document_num]
for i in range(len(bow_doc_4310)):
print("Word {} (\"{}\") appears {} time.".format(bow_doc_4310[i][0],
dictionary[bow_doc_4310[i][0]],
bow_doc_4310[i][1]))
```
## Step 3.2: TF-IDF on our document set ##
While performing TF-IDF on the corpus is not necessary for LDA implemention using the gensim model, it is recemmended. TF-IDF expects a bag-of-words (integer values) training corpus during initialization. During transformation, it will take a vector and return another vector of the same dimensionality.
*Please note: The author of Gensim dictates the standard procedure for LDA to be using the Bag of Words model.*
** TF-IDF stands for "Term Frequency, Inverse Document Frequency".**
* It is a way to score the importance of words (or "terms") in a document based on how frequently they appear across multiple documents.
* If a word appears frequently in a document, it's important. Give the word a high score. But if a word appears in many documents, it's not a unique identifier. Give the word a low score.
* Therefore, common words like "the" and "for", which appear in many documents, will be scaled down. Words that appear frequently in a single document will be scaled up.
In other words:
* TF(w) = `(Number of times term w appears in a document) / (Total number of terms in the document)`.
* IDF(w) = `log_e(Total number of documents / Number of documents with term w in it)`.
** For example **
* Consider a document containing `100` words wherein the word 'tiger' appears 3 times.
* The term frequency (i.e., tf) for 'tiger' is then:
- `TF = (3 / 100) = 0.03`.
* Now, assume we have `10 million` documents and the word 'tiger' appears in `1000` of these. Then, the inverse document frequency (i.e., idf) is calculated as:
- `IDF = log(10,000,000 / 1,000) = 4`.
* Thus, the Tf-idf weight is the product of these quantities:
- `TF-IDF = 0.03 * 4 = 0.12`.
```
'''
Create tf-idf model object using models.TfidfModel on 'bow_corpus' and save it to 'tfidf'
'''
from gensim import corpora, models
# TODO
tfidf = models.TfidfModel(corpus=bow_corpus)
'''
Apply transformation to the entire corpus and call it 'corpus_tfidf'
'''
# TODO
corpus_tfidf = tfidf[bow_corpus]
'''
Preview TF-IDF scores for our first document --> --> (token_id, tfidf score)
'''
from pprint import pprint
for doc in corpus_tfidf:
pprint(doc)
break
```
## Step 4.1: Running LDA using Bag of Words ##
We are going for 10 topics in the document corpus.
** We will be running LDA using all CPU cores to parallelize and speed up model training.**
Some of the parameters we will be tweaking are:
* **num_topics** is the number of requested latent topics to be extracted from the training corpus.
* **id2word** is a mapping from word ids (integers) to words (strings). It is used to determine the vocabulary size, as well as for debugging and topic printing.
* **workers** is the number of extra processes to use for parallelization. Uses all available cores by default.
* **alpha** and **eta** are hyperparameters that affect sparsity of the document-topic (theta) and topic-word (lambda) distributions. We will let these be the default values for now(default value is `1/num_topics`)
- Alpha is the per document topic distribution.
* High alpha: Every document has a mixture of all topics(documents appear similar to each other).
* Low alpha: Every document has a mixture of very few topics
- Eta is the per topic word distribution.
* High eta: Each topic has a mixture of most words(topics appear similar to each other).
* Low eta: Each topic has a mixture of few words.
* ** passes ** is the number of training passes through the corpus. For example, if the training corpus has 50,000 documents, chunksize is 10,000, passes is 2, then online training is done in 10 updates:
* `#1 documents 0-9,999 `
* `#2 documents 10,000-19,999 `
* `#3 documents 20,000-29,999 `
* `#4 documents 30,000-39,999 `
* `#5 documents 40,000-49,999 `
* `#6 documents 0-9,999 `
* `#7 documents 10,000-19,999 `
* `#8 documents 20,000-29,999 `
* `#9 documents 30,000-39,999 `
* `#10 documents 40,000-49,999`
```
# LDA mono-core -- fallback code in case LdaMulticore throws an error on your machine
# lda_model = gensim.models.LdaModel(bow_corpus,
# num_topics = 10,
# id2word = dictionary,
# passes = 50)
# LDA multicore
'''
Train your lda model using gensim.models.LdaMulticore and save it to 'lda_model'
'''
# TODO
lda_model = gensim.models.LdaMulticore(bow_corpus,num_topics=10, id2word=dictionary, passes=2)
'''
For each topic, we will explore the words occuring in that topic and its relative weight
'''
for idx, topic in lda_model.print_topics(-1):
print("Topic: {} \nWords: {}".format(topic, idx ))
print("\n")
```
### Classification of the topics ###
Using the words in each topic and their corresponding weights, what categories were you able to infer?
* 0:
* 1:
* 2:
* 3:
* 4:
* 5:
* 6:
* 7:
* 8:
* 9:
## Step 4.2 Running LDA using TF-IDF ##
```
'''
Define lda model using corpus_tfidf, again using gensim.models.LdaMulticore()
'''
# TODO
lda_model_tfidf = gensim.models.LdaMulticore(corpus_tfidf ,num_topics=10, id2word=dictionary, passes=4)
'''
For each topic, we will explore the words occuring in that topic and its relative weight
'''
for idx, topic in lda_model_tfidf.print_topics(-1):
print("Topic: {} Word: {}".format(idx, topic))
print("\n")
```
### Classification of the topics ###
As we can see, when using tf-idf, heavier weights are given to words that are not as frequent which results in nouns being factored in. That makes it harder to figure out the categories as nouns can be hard to categorize. This goes to show that the models we apply depend on the type of corpus of text we are dealing with.
Using the words in each topic and their corresponding weights, what categories could you find?
* 0:
* 1:
* 2:
* 3:
* 4:
* 5:
* 6:
* 7:
* 8:
* 9:
## Step 5.1: Performance evaluation by classifying sample document using LDA Bag of Words model##
We will check to see where our test document would be classified.
```
'''
Text of sample document 4310
'''
processed_docs[4310]
'''
Check which topic our test document belongs to using the LDA Bag of Words model.
'''
document_num = 4310
# Our test document is document number 4310
# TODO
# Our test document is document number 4310
print(lda_model[bow_corpus[document_num]])
print("sorted")
for index, score in sorted(lda_model[bow_corpus[document_num]], key=lambda tup: -1*tup[1]):
print("\nScore: {}\t \nTopic: {}".format(score, lda_model.print_topic(index, 10)))
```
### It has the highest probability (`x`) to be part of the topic that we assigned as X, which is the accurate classification. ###
## Step 5.2: Performance evaluation by classifying sample document using LDA TF-IDF model##
```
'''
Check which topic our test document belongs to using the LDA TF-IDF model.
'''
# Our test document is document number 4310
for index, score in sorted(lda_model_tfidf[bow_corpus[document_num]], key=lambda tup: -1*tup[1]):
print("\nScore: {}\t \nTopic: {}".format(score, lda_model_tfidf.print_topic(index, 10)))
```
### It has the highest probability (`x%`) to be part of the topic that we assigned as X. ###
## Step 6: Testing model on unseen document ##
```
unseen_document = "My favorite sports activities are running and swimming."
# Data preprocessing step for the unseen document
bow_vector = dictionary.doc2bow(preprocess(unseen_document))
for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]):
print("Score: {}\t Topic: {}".format(score, lda_model.print_topic(index, 5)))
```
The model correctly classifies the unseen document with 'x'% probability to the X category.
|
github_jupyter
|
'''
Load the dataset from the CSV and save it to 'data_text'
'''
import pandas as pd
data = pd.read_csv('abcnews-date-text.csv', error_bad_lines=False);
# We only need the Headlines text column from the data
data_text = data[:300000][['headline_text']];
data_text['index'] = data_text.index
documents = data_text
'''
Get the total number of documents
'''
print(len(documents))
documents[:5]
'''
Loading Gensim and nltk libraries
'''
# pip install gensim
import gensim
from gensim.utils import simple_preprocess
from gensim.parsing.preprocessing import STOPWORDS
from nltk.stem import WordNetLemmatizer, SnowballStemmer
from nltk.stem.porter import *
import numpy as np
np.random.seed(400)
import nltk
nltk.download('wordnet')
print(WordNetLemmatizer().lemmatize('went', pos = 'v')) # past tense to present tense
stemmer = SnowballStemmer("english")
original_words = ['caresses', 'flies', 'dies', 'mules', 'denied','died', 'agreed', 'owned',
'humbled', 'sized','meeting', 'stating', 'siezing', 'itemization','sensational',
'traditional', 'reference', 'colonizer','plotted']
singles = [stemmer.stem(plural) for plural in original_words]
pd.DataFrame(data={'original word':original_words, 'stemmed':singles })
'''
Write a function to perform the pre processing steps on the entire dataset
'''
def lemmatize_stemming(text):
return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
# Tokenize and lemmatize
def preprocess(text):
result=[]
for token in gensim.utils.simple_preprocess(text) :
if token not in gensim.parsing.preprocessing.STOPWORDS and len(token) > 3:
# TODO: Apply lemmatize_stemming() on the token, then add to the results list
result = [ lemmatize_stemming(word) for word in gensim.utils.simple_preprocess(text, min_len=4) ]
return result
'''
Preview a document after preprocessing
'''
document_num = 4310
doc_sample = documents[documents['index'] == document_num].values[0][0]
print("Original document: ")
words = []
for word in doc_sample.split(' '):
words.append(word)
print(words)
print("\n\nTokenized and lemmatized document: ")
print(preprocess(doc_sample))
documents[:5]
# TODO: preprocess all the headlines, saving the list of results as 'processed_docs'
processed_docs = documents['headline_text'].map(preprocess)
'''
Preview 'processed_docs'
'''
processed_docs[:10]
'''
Create a dictionary from 'processed_docs' containing the number of times a word appears
in the training set using gensim.corpora.Dictionary and call it 'dictionary'
'''
dictionary = gensim.corpora.Dictionary(processed_docs)
'''
Checking dictionary created
'''
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
'''
OPTIONAL STEP
Remove very rare and very common words:
- words appearing less than 15 times
- words appearing in more than 10% of all documents
'''
# TODO: apply dictionary.filter_extremes() with the parameters mentioned above
dictionary.filter_extremes(no_below=15, no_above=0.1, keep_n=100000)
'''
Create the Bag-of-words model for each document i.e for each document we create a dictionary reporting how many
words and how many times those words appear. Save this to 'bow_corpus'
'''
# TODO
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
'''
Checking Bag of Words corpus for our sample document --> (token_id, token_count)
'''
bow_corpus[document_num]
'''
Preview BOW for our sample preprocessed document
'''
# Here document_num is document number 4310 which we have checked in Step 2
bow_doc_4310 = bow_corpus[document_num]
for i in range(len(bow_doc_4310)):
print("Word {} (\"{}\") appears {} time.".format(bow_doc_4310[i][0],
dictionary[bow_doc_4310[i][0]],
bow_doc_4310[i][1]))
'''
Create tf-idf model object using models.TfidfModel on 'bow_corpus' and save it to 'tfidf'
'''
from gensim import corpora, models
# TODO
tfidf = models.TfidfModel(corpus=bow_corpus)
'''
Apply transformation to the entire corpus and call it 'corpus_tfidf'
'''
# TODO
corpus_tfidf = tfidf[bow_corpus]
'''
Preview TF-IDF scores for our first document --> --> (token_id, tfidf score)
'''
from pprint import pprint
for doc in corpus_tfidf:
pprint(doc)
break
# LDA mono-core -- fallback code in case LdaMulticore throws an error on your machine
# lda_model = gensim.models.LdaModel(bow_corpus,
# num_topics = 10,
# id2word = dictionary,
# passes = 50)
# LDA multicore
'''
Train your lda model using gensim.models.LdaMulticore and save it to 'lda_model'
'''
# TODO
lda_model = gensim.models.LdaMulticore(bow_corpus,num_topics=10, id2word=dictionary, passes=2)
'''
For each topic, we will explore the words occuring in that topic and its relative weight
'''
for idx, topic in lda_model.print_topics(-1):
print("Topic: {} \nWords: {}".format(topic, idx ))
print("\n")
'''
Define lda model using corpus_tfidf, again using gensim.models.LdaMulticore()
'''
# TODO
lda_model_tfidf = gensim.models.LdaMulticore(corpus_tfidf ,num_topics=10, id2word=dictionary, passes=4)
'''
For each topic, we will explore the words occuring in that topic and its relative weight
'''
for idx, topic in lda_model_tfidf.print_topics(-1):
print("Topic: {} Word: {}".format(idx, topic))
print("\n")
'''
Text of sample document 4310
'''
processed_docs[4310]
'''
Check which topic our test document belongs to using the LDA Bag of Words model.
'''
document_num = 4310
# Our test document is document number 4310
# TODO
# Our test document is document number 4310
print(lda_model[bow_corpus[document_num]])
print("sorted")
for index, score in sorted(lda_model[bow_corpus[document_num]], key=lambda tup: -1*tup[1]):
print("\nScore: {}\t \nTopic: {}".format(score, lda_model.print_topic(index, 10)))
'''
Check which topic our test document belongs to using the LDA TF-IDF model.
'''
# Our test document is document number 4310
for index, score in sorted(lda_model_tfidf[bow_corpus[document_num]], key=lambda tup: -1*tup[1]):
print("\nScore: {}\t \nTopic: {}".format(score, lda_model_tfidf.print_topic(index, 10)))
unseen_document = "My favorite sports activities are running and swimming."
# Data preprocessing step for the unseen document
bow_vector = dictionary.doc2bow(preprocess(unseen_document))
for index, score in sorted(lda_model[bow_vector], key=lambda tup: -1*tup[1]):
print("Score: {}\t Topic: {}".format(score, lda_model.print_topic(index, 5)))
| 0.144934 | 0.949949 |
```
# reload packages
%load_ext autoreload
%autoreload 2
```
### Choose GPU
```
%env CUDA_DEVICE_ORDER=PCI_BUS_ID
%env CUDA_VISIBLE_DEVICES=1
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_devices)>0:
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
print(gpu_devices)
tf.keras.backend.clear_session()
```
### dataset information
```
from datetime import datetime
dataset = "mnist"
dims = (28, 28, 1)
num_classes = 10
labels_per_class = 256 # full
batch_size = 128
datestring = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
datestring = (
str(dataset)
+ "_"
+ str(labels_per_class)
+ "____"
+ datestring
+ '_baseline'
)
print(datestring)
```
### Load packages
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
from IPython import display
import pandas as pd
import umap
import copy
import os, tempfile
```
### Load dataset
```
from tfumap.load_datasets import load_MNIST, mask_labels
X_train, X_test, X_valid, Y_train, Y_test, Y_valid = load_MNIST(flatten=False)
X_train.shape
if labels_per_class == "full":
X_labeled = X_train
Y_masked = Y_labeled = Y_train
else:
X_labeled, Y_labeled, Y_masked = mask_labels(
X_train, Y_train, labels_per_class=labels_per_class
)
```
### Build network
```
from tensorflow.keras import datasets, layers, models
from tensorflow_addons.layers import WeightNormalization
def conv_block(filts, name, kernel_size = (3, 3), padding = "same", **kwargs):
return WeightNormalization(
layers.Conv2D(
filts, kernel_size, activation=None, padding=padding, **kwargs
),
name="conv"+name,
)
#CNN13
#See:
#https://github.com/vikasverma1077/ICT/blob/master/networks/lenet.py
#https://github.com/brain-research/realistic-ssl-evaluation
lr_alpha = 0.1
dropout_rate = 0.5
num_classes = 10
input_shape = dims
model = models.Sequential()
model.add(tf.keras.Input(shape=input_shape))
### conv1a
name = '1a'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv1b
name = '1b'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv1c
name = '1c'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid', name="mp1"))
# dropout
model.add(layers.Dropout(dropout_rate, name="drop1"))
### conv2a
name = '2a'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha))
### conv2b
name = '2b'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv2c
name = '2c'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid', name="mp2"))
# dropout
model.add(layers.Dropout(dropout_rate, name="drop2"))
### conv3a
name = '3a'
model.add(conv_block(name = name, filts = 512, kernel_size = (3,3), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv3b
name = '3b'
model.add(conv_block(name = name, filts = 256, kernel_size = (1,1), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv3c
name = '3c'
model.add(conv_block(name = name, filts = 128, kernel_size = (1,1), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.AveragePooling2D(pool_size=(3, 3), strides=2, padding='valid'))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation=None, name='z'))
model.add(WeightNormalization(layers.Dense(256, activation=None)))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelufc1'))
model.add(WeightNormalization(layers.Dense(256, activation=None)))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelufc2'))
model.add(WeightNormalization(layers.Dense(num_classes, activation=None)))
model.summary()
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy', min_delta=0, patience=100, verbose=1, mode='auto',
baseline=None, restore_best_weights=True
)
import tensorflow_addons as tfa
opt = tf.keras.optimizers.Adam(1e-4)
opt = tfa.optimizers.MovingAverage(opt)
loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.2, from_logits=True)
model.compile(opt, loss = loss, metrics=['accuracy'])
Y_valid_one_hot = tf.keras.backend.one_hot(
Y_valid, num_classes
)
Y_labeled_one_hot = tf.keras.backend.one_hot(
Y_labeled, num_classes
)
from livelossplot import PlotLossesKerasTF
# plot losses callback
plotlosses = PlotLossesKerasTF()
train_ds = (
tf.data.Dataset.from_tensor_slices((X_labeled, Y_labeled_one_hot))
.repeat()
.shuffle(len(X_labeled))
.batch(batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
steps_per_epoch = int(len(X_train)/ batch_size)
history = model.fit(
train_ds,
epochs=500,
validation_data=(X_valid, Y_valid_one_hot),
callbacks = [early_stopping, plotlosses],
steps_per_epoch = steps_per_epoch,
)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
submodel = tf.keras.models.Model(
[model.inputs[0]], [model.get_layer('z').output]
)
z = submodel.predict(X_train)
np.shape(z)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
z_valid = submodel.predict(X_valid)
np.shape(z_valid)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z_valid.reshape(len(z_valid), np.product(np.shape(z_valid)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_valid.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(embedding[:, 0], embedding[:, 1], c=Y_valid.flatten(), s= 1, alpha = 1, cmap = plt.cm.tab10)
predictions = model.predict(X_valid)
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(embedding[:, 0], embedding[:, 1], c=np.argmax(predictions, axis=1), s= 1, alpha = 1, cmap = plt.cm.tab10)
Y_test_one_hot = tf.keras.backend.one_hot(
Y_test, num_classes
)
result = model.evaluate(X_test, Y_test_one_hot)
```
### save results
```
# save score, valid embedding, weights, results
from tfumap.paths import MODEL_DIR, ensure_dir
save_folder = MODEL_DIR / 'semisupervised-keras' / dataset / str(labels_per_class) / datestring
ensure_dir(save_folder)
```
#### save weights
```
encoder = tf.keras.models.Model(
[model.inputs[0]], [model.get_layer('z').output]
)
encoder.save_weights((save_folder / "encoder").as_posix())
classifier = tf.keras.models.Model(
[tf.keras.Input(tensor=model.get_layer('weight_normalization').input)], [model.outputs[0]]
)
print([i.name for i in classifier.layers])
classifier.save_weights((save_folder / "classifier").as_posix())
```
#### save score
```
Y_test_one_hot = tf.keras.backend.one_hot(
Y_test, num_classes
)
result = model.evaluate(X_test, Y_test_one_hot)
np.save(save_folder / 'test_loss.npy', result)
```
#### save embedding
```
z = encoder.predict(X_train)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
np.save(save_folder / 'train_embedding.npy', embedding)
```
#### save results
```
import pickle
with open(save_folder / 'history.pickle', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
```
|
github_jupyter
|
# reload packages
%load_ext autoreload
%autoreload 2
%env CUDA_DEVICE_ORDER=PCI_BUS_ID
%env CUDA_VISIBLE_DEVICES=1
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_devices)>0:
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
print(gpu_devices)
tf.keras.backend.clear_session()
from datetime import datetime
dataset = "mnist"
dims = (28, 28, 1)
num_classes = 10
labels_per_class = 256 # full
batch_size = 128
datestring = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
datestring = (
str(dataset)
+ "_"
+ str(labels_per_class)
+ "____"
+ datestring
+ '_baseline'
)
print(datestring)
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
from IPython import display
import pandas as pd
import umap
import copy
import os, tempfile
from tfumap.load_datasets import load_MNIST, mask_labels
X_train, X_test, X_valid, Y_train, Y_test, Y_valid = load_MNIST(flatten=False)
X_train.shape
if labels_per_class == "full":
X_labeled = X_train
Y_masked = Y_labeled = Y_train
else:
X_labeled, Y_labeled, Y_masked = mask_labels(
X_train, Y_train, labels_per_class=labels_per_class
)
from tensorflow.keras import datasets, layers, models
from tensorflow_addons.layers import WeightNormalization
def conv_block(filts, name, kernel_size = (3, 3), padding = "same", **kwargs):
return WeightNormalization(
layers.Conv2D(
filts, kernel_size, activation=None, padding=padding, **kwargs
),
name="conv"+name,
)
#CNN13
#See:
#https://github.com/vikasverma1077/ICT/blob/master/networks/lenet.py
#https://github.com/brain-research/realistic-ssl-evaluation
lr_alpha = 0.1
dropout_rate = 0.5
num_classes = 10
input_shape = dims
model = models.Sequential()
model.add(tf.keras.Input(shape=input_shape))
### conv1a
name = '1a'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv1b
name = '1b'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv1c
name = '1c'
model.add(conv_block(name = name, filts = 128, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid', name="mp1"))
# dropout
model.add(layers.Dropout(dropout_rate, name="drop1"))
### conv2a
name = '2a'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha))
### conv2b
name = '2b'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv2c
name = '2c'
model.add(conv_block(name = name, filts = 256, kernel_size = (3,3), padding="same"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2, padding='valid', name="mp2"))
# dropout
model.add(layers.Dropout(dropout_rate, name="drop2"))
### conv3a
name = '3a'
model.add(conv_block(name = name, filts = 512, kernel_size = (3,3), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv3b
name = '3b'
model.add(conv_block(name = name, filts = 256, kernel_size = (1,1), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
### conv3c
name = '3c'
model.add(conv_block(name = name, filts = 128, kernel_size = (1,1), padding="valid"))
model.add(layers.BatchNormalization(name="bn"+name))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelu'+name))
# max pooling
model.add(layers.AveragePooling2D(pool_size=(3, 3), strides=2, padding='valid'))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation=None, name='z'))
model.add(WeightNormalization(layers.Dense(256, activation=None)))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelufc1'))
model.add(WeightNormalization(layers.Dense(256, activation=None)))
model.add(layers.LeakyReLU(alpha=lr_alpha, name = 'lrelufc2'))
model.add(WeightNormalization(layers.Dense(num_classes, activation=None)))
model.summary()
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_accuracy', min_delta=0, patience=100, verbose=1, mode='auto',
baseline=None, restore_best_weights=True
)
import tensorflow_addons as tfa
opt = tf.keras.optimizers.Adam(1e-4)
opt = tfa.optimizers.MovingAverage(opt)
loss = tf.keras.losses.CategoricalCrossentropy(label_smoothing=0.2, from_logits=True)
model.compile(opt, loss = loss, metrics=['accuracy'])
Y_valid_one_hot = tf.keras.backend.one_hot(
Y_valid, num_classes
)
Y_labeled_one_hot = tf.keras.backend.one_hot(
Y_labeled, num_classes
)
from livelossplot import PlotLossesKerasTF
# plot losses callback
plotlosses = PlotLossesKerasTF()
train_ds = (
tf.data.Dataset.from_tensor_slices((X_labeled, Y_labeled_one_hot))
.repeat()
.shuffle(len(X_labeled))
.batch(batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
steps_per_epoch = int(len(X_train)/ batch_size)
history = model.fit(
train_ds,
epochs=500,
validation_data=(X_valid, Y_valid_one_hot),
callbacks = [early_stopping, plotlosses],
steps_per_epoch = steps_per_epoch,
)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
submodel = tf.keras.models.Model(
[model.inputs[0]], [model.get_layer('z').output]
)
z = submodel.predict(X_train)
np.shape(z)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
z_valid = submodel.predict(X_valid)
np.shape(z_valid)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z_valid.reshape(len(z_valid), np.product(np.shape(z_valid)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_valid.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(embedding[:, 0], embedding[:, 1], c=Y_valid.flatten(), s= 1, alpha = 1, cmap = plt.cm.tab10)
predictions = model.predict(X_valid)
fig, ax = plt.subplots(figsize=(10,10))
ax.scatter(embedding[:, 0], embedding[:, 1], c=np.argmax(predictions, axis=1), s= 1, alpha = 1, cmap = plt.cm.tab10)
Y_test_one_hot = tf.keras.backend.one_hot(
Y_test, num_classes
)
result = model.evaluate(X_test, Y_test_one_hot)
# save score, valid embedding, weights, results
from tfumap.paths import MODEL_DIR, ensure_dir
save_folder = MODEL_DIR / 'semisupervised-keras' / dataset / str(labels_per_class) / datestring
ensure_dir(save_folder)
encoder = tf.keras.models.Model(
[model.inputs[0]], [model.get_layer('z').output]
)
encoder.save_weights((save_folder / "encoder").as_posix())
classifier = tf.keras.models.Model(
[tf.keras.Input(tensor=model.get_layer('weight_normalization').input)], [model.outputs[0]]
)
print([i.name for i in classifier.layers])
classifier.save_weights((save_folder / "classifier").as_posix())
Y_test_one_hot = tf.keras.backend.one_hot(
Y_test, num_classes
)
result = model.evaluate(X_test, Y_test_one_hot)
np.save(save_folder / 'test_loss.npy', result)
z = encoder.predict(X_train)
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
np.save(save_folder / 'train_embedding.npy', embedding)
import pickle
with open(save_folder / 'history.pickle', 'wb') as file_pi:
pickle.dump(history.history, file_pi)
| 0.650356 | 0.70147 |

# European Plain Vanilla Option
```
import rivapy
from rivapy import marketdata as mkt_data
from rivapy import enums as enums
import datetime as dt
import math
import matplotlib.pyplot as plt
from scipy.stats import norm
import random
import pandas as pd
%load_ext autoreload
%autoreload 2
#the next line is a jupyter internal command to show the matplotlib graphs within the notebook
%matplotlib inline
```
## Option Pricing Using *rivapy* European Vanilla Specification
### Create the necessary market data
As a first step, we need to create the market data necessary to conduct the valuation. Therefore, we need to construct a *discount-*, *funding-* and *borrowing curve*, a *forward-curve* as well as a *volatility surface*.
#### Create a discount-, funding-, and borrowing curve
```
refdate = dt.datetime(2021,1,1,0,0,0)
days_to_maturity = [1, 180, 365, 720, 3*365, 4*365, 10*365]
dates = [refdate + dt.timedelta(days=d) for d in days_to_maturity]
# discount factors from constant rate
disc_rate = 0.05
dc = rivapy.marketdata.DiscountCurve("DC", refdate, dates,
[math.exp(-d/365.0*disc_rate) for d in days_to_maturity])
borrow_rate = 0.02
bc = rivapy.marketdata.DiscountCurve("BC", refdate, dates,
[math.exp(-d/365.0*borrow_rate) for d in days_to_maturity])
funding_rate = 0.05
fc = rivapy.marketdata.DiscountCurve("FC", refdate, dates,
[math.exp(-d/365.0*funding_rate) for d in days_to_maturity])
```
#### Create a dividend table
```
refdate = dt.datetime(2021,1,1,0,0,0)
ex_dates = [refdate + dt.timedelta(days=i) for i in [365, 2*365, 3*365, 4*365]]
pay_dates = [d + dt.timedelta(days=2) for d in ex_dates]
tax_factors = [1.0, 1.0, 1.0, 1.0]
div_yield = [0, 0.0, 0.02, 0.02]
div_cash = [2.5, 2.5, 1.5, 1.5]
div_table = rivapy.marketdata.DividendTable('DIV', refdate, ex_dates, pay_dates, div_yield, div_cash, tax_factors)
```
#### Create a forward curve
To ensure comparability with the results from the Black-Scholes-Merton model, we assume a non dividend paying stock.
```
# Creation of a Forward curve
spot = 50.0
forward_curve = rivapy.marketdata.EquityForwardCurve(spot, fc, bc, div_table)
forward_curve.plot(days_end=5*365)
```
#### Create a volatility surface
In order to compare the results with the Black-Scholes-Merton model above, we use a flat volatillity surface.
```
flat_param = rivapy.marketdata.VolatilityParametrizationFlat(0.3)
vol_surf = rivapy.marketdata.VolatilitySurface('TEST_SURFACE', refdate, forward_curve, enums.DayCounterType.Act365Fixed, flat_param)
```
### Setup the specification
The rivapy European Vanilla Specification requires the following mandatory arguments:
- object id (str)
- type ('CALL','PUT')
- expiry (datetime)
- strike (float)
The following arguments must only be set when...??
- issuer
- securitization level
- currency
- underlying id
- share ratio
- ex settle
- trade settle
[Beschreibung warum relevant]Although the arguments *object id*, *issuer*, *securitization level* and the *underlying id* are not relevant for the pricing process, they nevertheless have to be set. Hence, we have to input valid strings for these arguments. Optional arguments are the *share ratio*, *holidays*, *ex settle* and *trade settle*.
Referring to the Black-Scholes-Merton model, we consequently provided the model already with the the *option type*, the *expiry* $T$ and the *strike price* $K$. Thus, $S_0$, $r$ and $\sigma$ have not been set yet. This data has been defined in the previous steps by creating the necessary market data.
The *Back76PricingData* requires a discount curve, pricing parameter, a pricing specification and a volatility surface. Furthermore, we need to provide a valuation date and set the pricing request. The underlying's spot price $S_0$ is provided through the forward curve which is contained in the volatility surface which, of course, also contains the volatility $\sigma$. $r$ is contained in the provided discount curve.
```
issuer = 'DBK'
seclevel = 'COLLATERALIZED'
currency = 'EUR'
tpe = 'CALL' # Change to 'PUT' if you want to calculate the price of an european put option.
expiry = refdate + dt.timedelta(days=365)
strike = 50
spec = rivapy.instruments.EuropeanVanillaSpecification('Test_call', tpe, expiry, strike,
issuer = issuer, sec_lvl = seclevel, curr='EUR',udl_id='ADS',share_ratio = 1)
```
### Setup the pricing data
```
prdata = rivapy.pricing.Black76PricingData(val_date = refdate,
spec = spec,
discount_curve = dc,
vol_surface = vol_surf,
pricing_request=(rivapy.pricing.ResultType.DELTA,
rivapy.pricing.ResultType.GAMMA))
prdata1 = rivapy.pricing.Black76PricingData(val_date = refdate,
spec = spec,
discount_curve = dc,
vol_surface = vol_surf,
pricing_request=(rivapy.pricing.ResultType.PRICE))
```
### Calculation of an option price using the pyvacon European Vanilla Specification
After all necessary information has been set, the price of the option can be calculated using the *rivapy.pricing.price* function.
```
price = rivapy.pricing.price(prdata)
price.getPrice()
# delta = price.getDeltas()
# price.getDelta1D()
# delta.values()
```
### Exercises - rivapy European Vanilla Specification
- Calculate the price of an European put option using the rivapy European Vanilla Specification.
- Modify the dividend table to see how dividends impact the option price.
- Calculate the price of an European call option using a discount curve with a non-constant discount rate.
- Use a different volatility model.
## General Remarks
### Implied volatility
Since the volatility in the option pricing formulas cannot be directly observed, one usually works with implied volatilities. Implied volatilities are the volatilities implied by option prices observed in the market. As it is not possible to invert the Black-Scholes-Merton or Black-76 formulas so that the volatility is expressed as a function of the other parameters, one needs to use an interactive search procedure.
|
github_jupyter
|
import rivapy
from rivapy import marketdata as mkt_data
from rivapy import enums as enums
import datetime as dt
import math
import matplotlib.pyplot as plt
from scipy.stats import norm
import random
import pandas as pd
%load_ext autoreload
%autoreload 2
#the next line is a jupyter internal command to show the matplotlib graphs within the notebook
%matplotlib inline
refdate = dt.datetime(2021,1,1,0,0,0)
days_to_maturity = [1, 180, 365, 720, 3*365, 4*365, 10*365]
dates = [refdate + dt.timedelta(days=d) for d in days_to_maturity]
# discount factors from constant rate
disc_rate = 0.05
dc = rivapy.marketdata.DiscountCurve("DC", refdate, dates,
[math.exp(-d/365.0*disc_rate) for d in days_to_maturity])
borrow_rate = 0.02
bc = rivapy.marketdata.DiscountCurve("BC", refdate, dates,
[math.exp(-d/365.0*borrow_rate) for d in days_to_maturity])
funding_rate = 0.05
fc = rivapy.marketdata.DiscountCurve("FC", refdate, dates,
[math.exp(-d/365.0*funding_rate) for d in days_to_maturity])
refdate = dt.datetime(2021,1,1,0,0,0)
ex_dates = [refdate + dt.timedelta(days=i) for i in [365, 2*365, 3*365, 4*365]]
pay_dates = [d + dt.timedelta(days=2) for d in ex_dates]
tax_factors = [1.0, 1.0, 1.0, 1.0]
div_yield = [0, 0.0, 0.02, 0.02]
div_cash = [2.5, 2.5, 1.5, 1.5]
div_table = rivapy.marketdata.DividendTable('DIV', refdate, ex_dates, pay_dates, div_yield, div_cash, tax_factors)
# Creation of a Forward curve
spot = 50.0
forward_curve = rivapy.marketdata.EquityForwardCurve(spot, fc, bc, div_table)
forward_curve.plot(days_end=5*365)
flat_param = rivapy.marketdata.VolatilityParametrizationFlat(0.3)
vol_surf = rivapy.marketdata.VolatilitySurface('TEST_SURFACE', refdate, forward_curve, enums.DayCounterType.Act365Fixed, flat_param)
issuer = 'DBK'
seclevel = 'COLLATERALIZED'
currency = 'EUR'
tpe = 'CALL' # Change to 'PUT' if you want to calculate the price of an european put option.
expiry = refdate + dt.timedelta(days=365)
strike = 50
spec = rivapy.instruments.EuropeanVanillaSpecification('Test_call', tpe, expiry, strike,
issuer = issuer, sec_lvl = seclevel, curr='EUR',udl_id='ADS',share_ratio = 1)
prdata = rivapy.pricing.Black76PricingData(val_date = refdate,
spec = spec,
discount_curve = dc,
vol_surface = vol_surf,
pricing_request=(rivapy.pricing.ResultType.DELTA,
rivapy.pricing.ResultType.GAMMA))
prdata1 = rivapy.pricing.Black76PricingData(val_date = refdate,
spec = spec,
discount_curve = dc,
vol_surface = vol_surf,
pricing_request=(rivapy.pricing.ResultType.PRICE))
price = rivapy.pricing.price(prdata)
price.getPrice()
# delta = price.getDeltas()
# price.getDelta1D()
# delta.values()
| 0.481941 | 0.883939 |
<a href="https://colab.research.google.com/github/rucelfernandez/LinearAlgebra1T-2021-2022/blob/main/Assignment10.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#**Linear Algebra for ECE**
##**Laboratory 5: Linear Combination and Vector Spaces**
Now that you have a fundamental knowledge about linear combination, we'll try to visualize it using scientific programming.
###**Objectives**
At the end of this activity you will be able to:
1. Be familiar with representing linear combinations in the 2-dimensional plane.
2. Visualize spans using vector fields in Python.
3. Perform vector fields operations using scientific programming.
##**Discussion**
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
####**Linear Combination**
It is said that a linear combination is the combination of linear scaling and addition of a vector its bases/components
We will try to visualize the vectors and their linear combinations by plotting a sample of real number values for the scalars for the vectors. Let's first try the vectors below:
$$X = \begin{bmatrix} 2\\5 \\\end{bmatrix} , Y = \begin{bmatrix} 7\\9 \\\end{bmatrix} $$
```
vectX = np.array([2,5])
vectY = np.array([7,9])
vectY
```
####**Span of single vectors**
As discussed in the lecture, the span of individual vectors can be represented by a line span. Let's take vector $X$ as an example.
$$X = c\cdot\begin{bmatrix} 2\\5 \\\end{bmatrix}$$
```
c = np.arange(-10,10,0.125)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-10,10)
plt.ylim(-10,10)
plt.axhline(y=0, color='green')
plt.axvline(x=0, color='blue')
plt.grid()
plt.show()
```
$$Y = c\cdot\begin{bmatrix} 7\\9 \\\end{bmatrix}$$
```
vectX = np.array([2,5])
vectY = np.array([7,9])
c = np.arange(-20,20,1)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-20,20)
plt.ylim(-20,20)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
```
####**Span of a linear combination of vectors**
So what if we are to plot the span of a linear combination of vectors? We can visualize as a plane on the 2-dimensional coordinate system. Let's take the span of the linear combination below:
$$S = \begin{Bmatrix} c_1 \cdot\begin{bmatrix} 1\\0 \\\end{bmatrix},
c_2 \cdot \begin{bmatrix} 1\\-1 \\\end{bmatrix}\end{Bmatrix} $$
```
vectA = np.array([1,0])
vectB = np.array([1,-1])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectA + vectB
spanRx = c1*vectA[0] + c2*vectB[0]
spanRy = c1*vectA[1] + c2*vectB[1]
#plt.scatter(R*vectA[0],R*vectA[1])
#plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectX = np.array([2,5])
vectY = np.array([7,9])
c = np.arange(-20,20,1)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-20,20)
plt.ylim(-20,20)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectP = np.array([2,1])
vectQ = np.array([4,3])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectP + vectQ
spanRx = c1*vectP[0] + c2*vectQ[0]
spanRy = c1*vectP[1] + c2*vectQ[1]
#plt.scatter(R*vectA[0],R*vectA[1])
#plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
```
Take note that if vectors are seen to be as a 2-dimensional span we can say it has a Rank of 2 or $\mathbb{R}^2$. But if the span of the linear combination of vectors are seen to be like a line, they are said to be linearly dependent and they have a rank of 1 or $\mathbb{R}^1$.
###**Activity**
**Task 1**
Try different linear combinations using different scalar values. In your methodology discuss the different functions that you have used, the linear equation and vector form of the linear combination, and the flowchart for declaring and displaying linear combinations. Please make sure that your flowchart has only few words and not putting the entire code as it is bad practice. In your results, display and discuss the linear combination visualization you made. You should use the cells below for displaying the equation markdows using LaTeX and your code.
$$M = \begin{bmatrix} 5\\2 \\\end{bmatrix} , Y = \begin{bmatrix} 9\\5 \\\end{bmatrix} $$
\
$$4x -3y + 7z = 0$$
```
vectM = np.array([5,9])
vectY = np.array([2,5])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectM + vectY
spanRx = c1*vectM[0] + c2*vectY[0]
spanRy = c1*vectM[1] + c2*vectY[1]
#plt.scatter(R*vectM[0],R*vectY[1])
#plt.scatter(R*vectM[0],R*vectY[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
```

###**Conclusion guide**
The use of NumPy and MatPlotLib in the previous experiment was performed in this lab to evaluate the scope of understanding of the topic. Linear combinations and vector spaces are also covered and discussed in order to accomplish this. Linear combinations are produced by scalar multiplying matrices and adding them together. Vector spaces, on the other hand, are a set that can be closed using finite vector addition and scalar multiplication. The intended activity for this laboratory was achieved, of course, with the help of Google Colab and its feature utilizing Python programming languages.
After the discussion, I learned how to represent, plot and perform linear combination of vectors using the Python programming language. Also, not just plotting it but also by visualizing and interpreting the graph. Moreover, it is important to be careful in encoding since one mistake will make the programmer do the program all over again.
The application of linear combination in real life situation are widely used. It has a lot of different fields to use for. It can be use in solving easy to high level complex problems. For engineers, the use of this method is greatly appreciated in solving models of circuits, calculating speed, velocity, displacement, force and etc. In real life situations, it is useful in compmuting variable costs, rates, budgeting, and even making predictions.
|
github_jupyter
|
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
vectX = np.array([2,5])
vectY = np.array([7,9])
vectY
c = np.arange(-10,10,0.125)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-10,10)
plt.ylim(-10,10)
plt.axhline(y=0, color='green')
plt.axvline(x=0, color='blue')
plt.grid()
plt.show()
vectX = np.array([2,5])
vectY = np.array([7,9])
c = np.arange(-20,20,1)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-20,20)
plt.ylim(-20,20)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectA = np.array([1,0])
vectB = np.array([1,-1])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectA + vectB
spanRx = c1*vectA[0] + c2*vectB[0]
spanRy = c1*vectA[1] + c2*vectB[1]
#plt.scatter(R*vectA[0],R*vectA[1])
#plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectX = np.array([2,5])
vectY = np.array([7,9])
c = np.arange(-20,20,1)
plt.scatter(c*vectX[0],c*vectX[1])
plt.xlim(-20,20)
plt.ylim(-20,20)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectP = np.array([2,1])
vectQ = np.array([4,3])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectP + vectQ
spanRx = c1*vectP[0] + c2*vectQ[0]
spanRy = c1*vectP[1] + c2*vectQ[1]
#plt.scatter(R*vectA[0],R*vectA[1])
#plt.scatter(R*vectB[0],R*vectB[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
vectM = np.array([5,9])
vectY = np.array([2,5])
R = np.arange(-10,10,1)
c1, c2 = np.meshgrid(R,R)
vectR = vectM + vectY
spanRx = c1*vectM[0] + c2*vectY[0]
spanRy = c1*vectM[1] + c2*vectY[1]
#plt.scatter(R*vectM[0],R*vectY[1])
#plt.scatter(R*vectM[0],R*vectY[1])
plt.scatter(spanRx,spanRy, s=5, alpha=0.75)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.grid()
plt.show()
| 0.448185 | 0.985072 |
# Deep learning the collisional cross sections of the peptide universe from a million experimental values
Florian Meier, Niklas D. Köhler, Andreas-David Brunner, Jean-Marc H. Wanka, Eugenia Voytik, Maximilian T. Strauss, Fabian J. Theis, Matthias Mann
Pre-print: https://doi.org/10.1101/2020.05.19.102285
Publication: pending
revised 09/2020
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
import time
from numba import njit, prange
import scipy.spatial as spatial
from scipy import stats
@njit
def mean_dist_pairwise(matrix, shape):
dist = np.zeros((shape,shape))
for i in prange(shape):
for j in prange(shape):
dist[j,i] = np.nanmean(matrix[:,i] - matrix[:,j])
return dist
def overlap_pairwise(matrix, shape):
dist = np.zeros((shape,shape))
for i in prange(shape):
for j in prange(shape):
x = matrix[:,i]
y = matrix[:,j]
mask = ~np.logical_or(np.isnan(x), np.isnan(y))
dist[j,i] = mask.sum()
return dist
def pearson_pairwise(matrix, shape, minpoints):
dist = np.zeros((shape,shape))
for i in prange(shape):
for j in prange(shape):
x = matrix[:,i]
y = matrix[:,j]
mask = ~np.logical_or(np.isnan(x), np.isnan(y))
if(mask.sum() > minpoints):
dist[j,i] = stats.pearsonr(np.compress(mask, x), np.compress(mask, y))[0]
else:
dist[j,i] = np.nan
return dist
```
#### Import raw data from MaxQuant output
```
# Load evidence.txt files from folder
filenames = glob.glob("data/evidence*.txt")
evidences = [pd.read_csv(filename, sep='\t', engine='python', header=0) for filename in filenames]
# Combine all evidences in one dataframe
evidence_all = pd.concat(evidences, sort=False, ignore_index = True)
# Clean up
del evidences
evidence_all.head()
# Drop reverse hits
# Drop features with no intensity value
# Drop charge 1 features
evidence_all = evidence_all.loc[(evidence_all['Reverse'] != '+') & \
(evidence_all['Intensity'] > 0) & \
(evidence_all['Charge'] != 1)]
```
### Evaluating the precision and utility of TIMS CCS measurements
```
# Select tryptic subset
selection = ['HeLa_Trypsin_1', 'HeLa_Trp_2', 'Drosophila_Trp', 'Yeast_Trypsin', 'Ecoli_trypsin', 'CElegans_Tryp']
evidence_tryptic = evidence_all.loc[evidence_all['Experiment'].isin(selection)]
len(set(evidence_tryptic['Raw file']))
# CCS values
# Keep only one evidence per raw file
# Maximum intensity
selection = ['Modified sequence', 'Sequence', 'Charge', 'Mass', 'm/z', 'CCS', 'Experiment', 'id', 'Intensity',
'Score', 'Length', 'Raw file']
evidence_agg = evidence_tryptic.loc[evidence_tryptic.groupby(
['Modified sequence', 'Charge', 'Raw file'])['Intensity'].idxmax()][selection]
evidence_pivot_tryptic_long_CCS = evidence_agg.pivot_table(index = ['Modified sequence', 'Charge'],
columns = 'Raw file',
values = 'CCS')
del evidence_agg
evidence_pivot_tryptic_long_CCS = evidence_pivot_tryptic_long_CCS.astype(np.float32)
len(evidence_tryptic), len(evidence_pivot_tryptic_long_CCS)
# RT values
# Keep only one evidence per raw file
# Maximum intensity
selection = ['Modified sequence', 'Sequence', 'Charge', 'Mass', 'm/z', 'CCS', 'Retention time', 'Experiment',
'id', 'Intensity', 'Score', 'Length', 'Raw file']
evidence_agg = evidence_tryptic.loc[evidence_tryptic.groupby(
['Modified sequence', 'Charge', 'Raw file'])['Intensity'].idxmax()][selection]
evidence_pivot_tryptic_long_RT = evidence_agg.pivot_table(index = ['Modified sequence', 'Charge'],
columns = 'Raw file',
values = 'Retention time')
del evidence_agg
evidence_pivot_tryptic_long_RT = evidence_pivot_tryptic_long_RT.astype(np.float32)
len(evidence_tryptic), len(evidence_pivot_tryptic_long_RT)
evidence_pivot_tryptic_long_RT.iloc[:, 0:48].columns
evidence_pivot_tryptic_long_RT.iloc[:, 144:168].columns
# Filter out peptides with only one occurence to speed up and save memory
evidence_pivot_tryptic_CCS = evidence_pivot_tryptic_long_CCS.loc[
evidence_pivot_tryptic_long_CCS.isnull().sum(axis=1) < (len(set(evidence_tryptic['Raw file'])) - 1)]
evidence_pivot_tryptic_RT = evidence_pivot_tryptic_long_RT.loc[
evidence_pivot_tryptic_long_RT.isnull().sum(axis=1) < (len(set(evidence_tryptic['Raw file'])) - 1)]
len(evidence_pivot_tryptic_CCS), len(evidence_pivot_tryptic_long_CCS)
# Calculate pairwise Pearson correlation for retention time values
# requires > 4 shared data points for calculation
start = time.time()
evidence_tryptic_pearson_RT = pd.DataFrame(pearson_pairwise(np.array(evidence_pivot_tryptic_RT),
evidence_pivot_tryptic_RT.shape[1], 4))
end = time.time()
print((end - start)/60)
# Calculate pairwise Pearson correlation for CCS values
# requires > 4 shared data points for calculation
start = time.time()
evidence_tryptic_pearson_CCS = pd.DataFrame(pearson_pairwise(np.array(evidence_pivot_tryptic_CCS),
evidence_pivot_tryptic_CCS.shape[1], 4))
end = time.time()
print((end - start)/60)
# Calculate pairwise overlap
start = time.time()
evidence_tryptic_overlap = pd.DataFrame(overlap_pairwise(np.array(evidence_pivot_tryptic_RT),
evidence_pivot_tryptic_RT.shape[1]))
end = time.time()
print((end - start)/60)
evidence_tryptic_pearson_CCS.iloc[0:48, 144:168].unstack().hist();
# Pearson correlation (CCS) of the two HeLa data sets
np.round((evidence_tryptic_pearson_CCS.iloc[0:48, 144:168]).unstack().median(), 3)
evidence_tryptic_pearson_RT.iloc[0:48, 144:168].unstack().hist();
# Pearson correlation (retention time) of the two HeLa data sets
np.round((evidence_tryptic_pearson_RT.iloc[0:48, 144:168]).unstack().median(), 3)
evidence_tryptic_overlap.iloc[0:48, 144:168].unstack().hist();
# Pearson correlation (retention time) of the two HeLa data sets
np.round(evidence_tryptic_overlap.iloc[0:48, 144:168].unstack().mean())
plt.figure(figsize=(9,6))
plt.title('Pearson correlation, retention time')
plt.pcolor(evidence_tryptic_pearson_RT.iloc[0:48, 144:168], cmap='magma', vmin=0.9, vmax=1)
plt.colorbar()
plt.show()
plt.figure(figsize=(9,6))
plt.title('Pearson correlation, CCS')
plt.pcolor(evidence_tryptic_pearson_CCS.iloc[0:48, 144:168], cmap='magma', vmin=0.9, vmax=1)
plt.colorbar()
plt.show()
plt.figure(figsize=(9,6))
plt.title('Overlapping features')
plt.pcolor(evidence_tryptic_overlap.iloc[0:48, 144:168], cmap='magma')
plt.colorbar()
plt.show()
# keep only triagonal matrix for figure
evidence_tryptic_pearson_RT = evidence_tryptic_pearson_RT.mask(
np.arange(len(evidence_tryptic_pearson_RT))[:,None] <= np.arange(len(evidence_tryptic_pearson_RT)))
evidence_tryptic_pearson_CCS = evidence_tryptic_pearson_CCS.mask(
np.arange(len(evidence_tryptic_pearson_CCS))[:,None] >= np.arange(len(evidence_tryptic_pearson_CCS)))
fig = plt.figure(figsize=(9,6))
ax1 = fig.add_subplot(111)
plt.pcolor(evidence_tryptic_pearson_RT, cmap='YlOrRd', vmin=0.95, vmax=1)
ax2 = ax1.twiny()
X2tick_location= ax1.xaxis.get_ticklocs()
ax2.set_xticks(X2tick_location)
ax2.set_xticklabels(X2tick_location)
plt.colorbar()
plt.savefig("figures/Figure_2_a_rt.pdf");
plt.show()
fig = plt.figure(figsize=(9,6))
ax1 = fig.add_subplot(111)
plt.pcolor(evidence_tryptic_pearson_CCS, cmap='YlOrRd', vmin=0.95, vmax=1)
ax3 = ax1.twiny()
X2tick_location= ax1.yaxis.get_ticklocs()
ax3.set_yticks(X2tick_location)
ax3.set_yticklabels(X2tick_location)
plt.colorbar()
plt.savefig("figures/Figure_2_a_ccs.pdf");
```
<b>Figure 2. Precision, accuracy and utility of experimental peptide CCS values.</b> a, Color-coded pairwise Pearson correlation values of peptide retention time (upper triangular matrix) and CCS values (lower triangular matrix) between 168 LC-MS/MS runs of fractionated tryptic digests. Experimental meta-data are indicated below the x-axis. White (n/a) indicates less than 5 data points for pairwise comparison.
```
start = time.time()
evidence_tryptic_distance_CCS = pd.DataFrame(mean_dist_pairwise(np.array(evidence_pivot_tryptic_CCS),
evidence_pivot_tryptic_CCS.shape[1]))
end = time.time()
print((end - start)/60)
# make triangular matrix
evidence_tryptic_distance_CCS = evidence_tryptic_distance_CCS.mask(
np.arange(len(evidence_tryptic_distance_CCS))[:,None] <= np.arange(len(evidence_tryptic_distance_CCS)))
plt.figure(figsize=(9,6))
plt.title('Pairwise mean distance ($\AA^2$)')
plt.pcolor(evidence_tryptic_distance_CCS, cmap='magma')
plt.colorbar()
plt.show()
evidence_tryptic_distance_CCS.unstack().abs().hist()
plt.ylabel('Count')
plt.xlabel('Abs. pairwise mean distance ($\AA^2$)');
print("Max. abs. distance: {:.1f} Å^2".format(evidence_tryptic_distance_CCS.unstack().abs().max()))
print("Median abs. distance: {:.1f} Å^2".format(evidence_tryptic_distance_CCS.unstack().abs().median()))
```
#### Import aligned dataset
```
evidence_pivot_aligned = pd.read_csv('output/evidence_pivot_aligned.csv')
evidence_pivot_aligned.head()
# Exclude Proteome Tools data
proteometools = set(evidence_all.loc[evidence_all['Experiment'].isin(
['Proteotypic', 'SRMATLAS', 'MissingGeneSet'])]['Raw file'])
evidence_pivot_aligned_endo = evidence_pivot_aligned.drop(proteometools, axis = 1)
len(evidence_pivot_aligned.columns), len(evidence_pivot_aligned_endo.columns)
runs = ['20190504_TIMS1_FlMe_SA_HeLa_frac03_C10_1_95',
'20190119_TIMS2_AnBr_SA_200ng_Drosophila_Trypsin_IRT_Fraction03_C7_01_4020']
x = evidence_pivot_aligned[runs[0]]
y = evidence_pivot_aligned[runs[1]]
mask = ~np.logical_or(np.isnan(x), np.isnan(y))
pcorr = stats.pearsonr(x[mask], y[mask])[0]
fig = plt.figure(figsize=(4,3))
ax = plt.axes()
plt.scatter(x, y, s = 30, alpha = 1)
plt.xlabel(runs[0])
plt.ylabel(runs[1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.plot(np.arange(300,700,1), np.arange(300,700,1), color = 'black')
plt.savefig("figures/Figure_2_b.pdf");
plt.show()
print("n = {0}".format(mask.sum()))
print("Pearson correlation: {:1.3f}".format(pcorr))
print("Median delta = {:1.2f}%".format(np.nanmedian((x-y)/y*100)))
print("Median absolute delta = {:1.2f}%".format(np.nanmedian(np.abs((x-y)/y*100))))
# Calculate coefficients of variation
evidence_pivot_aligned_endo_CV = evidence_pivot_aligned_endo.std(axis = 1, skipna = True) / \
evidence_pivot_aligned_endo.mean(axis = 1, skipna = True) * 100
print("Median relative standard deviation: {:10.2f}%".format(evidence_pivot_aligned_endo_CV.dropna().median()))
print("Unique sequences*charge with >1 measurements: {}".format(len(evidence_pivot_aligned_endo_CV.dropna())))
fig = plt.figure(figsize=(4,3))
ax = plt.axes()
plt.hist(x = evidence_pivot_aligned_endo_CV.dropna(), bins = 100, range = (0,5))
plt.xlabel('Coefficient of variation (%)')
plt.ylabel('Count')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig("figures/Figure_2_c.pdf");
plt.show()
def kdtree_worker(mass_tolerance, ccs_tolerance, masses, crossections):
found_points = []
results = []
tolerance = 0.1
points = np.array([np.log(masses)/mass_tolerance*1e6*tolerance, np.log(crossections)/ccs_tolerance*1e2*tolerance]).T
point_tree = spatial.cKDTree(points)
for i, point in enumerate(points):
n_found = point_tree.query_ball_point(point, tolerance, p=np.float('inf'))
results.append(len(n_found)-1)
ratio_kd = np.sum(np.array(results)>0)/len(results)
return ratio_kd
def kdtree_worker_count(mass_tolerance, ccs_tolerance, masses, crossections):
found_points = []
results = []
tolerance = 0.1
points = np.array([np.log(masses)/mass_tolerance*1e6*tolerance, np.log(crossections)/ccs_tolerance*1e2*tolerance]).T
point_tree = spatial.cKDTree(points)
for i, point in enumerate(points):
n_found = point_tree.query_ball_point(point, tolerance, p=np.float('inf'))
results.append(len(n_found))
return results
cmap = plt.get_cmap("RdYlBu")
colors = cmap(np.linspace(0, 1, num=20))
evidence_aligned = pd.read_csv('output/evidence_aligned.csv')
evidence_aligned.head()
# Select peptides with N-terminal R or K
evidence_aligned['lastAA'] = evidence_aligned['Sequence'].str[-1:]
evidence_tryptic = evidence_aligned.loc[(evidence_aligned['lastAA'] == 'R') | (evidence_aligned['lastAA'] == 'K')]
len(evidence_aligned), len(evidence_tryptic)
# Split by charge state
evidence_tryptic_cs2 = evidence_tryptic.loc[evidence_tryptic['Charge'] == 2]
evidence_tryptic_cs3 = evidence_tryptic.loc[evidence_tryptic['Charge'] == 3]
len(evidence_tryptic_cs2), len(evidence_tryptic_cs3)
set(evidence_aligned['Experiment'])
# Calculate interference ratios for charge state 2
CCS_tolerances = [0.1, 0.2, 0.3, 0.5, 0.7, 1.0, 1.5, 2.0, 3, 5, 10, 20, 50]
mass_tolerance = 1.5
evaluation_cs2 = []
for ccs_tolerance in CCS_tolerances:
ratio = kdtree_worker(mass_tolerance, ccs_tolerance, evidence_tryptic_cs2['m/z'], evidence_tryptic_cs2['CCS'])
evaluation_cs2.append((mass_tolerance, ccs_tolerance, ratio))
# Results, charge state 2
out2 = pd.DataFrame(evaluation_cs2)
out2.columns = ['Mass Tolerance', 'CCS Tolerance', 'Interference Ratio']
out2
CCS_tolerances = [0.1, 0.2, 0.3, 0.5, 0.7, 1.0, 1.5, 2.0, 3, 5, 10, 20, 50]
mass_tolerance = 1.5
evaluation_cs3 = []
for ccs_tolerance in CCS_tolerances:
ratio = kdtree_worker(mass_tolerance, ccs_tolerance, evidence_tryptic_cs3['m/z'], evidence_tryptic_cs3['CCS'])
evaluation_cs3.append((mass_tolerance, ccs_tolerance, ratio))
# Results, charge state 3
out3 = pd.DataFrame(evaluation_cs3)
out3.columns = ['Mass Tolerance', 'CCS Tolerance', 'Interference Ratio']
out3
fig = plt.figure(figsize=(4,3))
ax = plt.axes()
plt.scatter(out2['CCS Tolerance'], out2['Interference Ratio']*100, marker = 'x', label = 'charge 2', color = 'black')
plt.scatter(out3['CCS Tolerance'], out3['Interference Ratio']*100, marker = '+', label = 'charge 3', color = 'black')
plt.xlabel('CCS Tolerance (+/- %)')
plt.ylabel('Non-unique fraction (%)')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylim((0,100))
plt.xlim((0.05,60))
plt.xscale('log')
plt.legend()
plt.savefig("figures/Figure_2_d.pdf");
plt.show()
```
### Candidate peptide sequences in a given window
```
CCS_tolerances = [0.5, 1.5, 100]
mass_tolerance = 1.5
evaluation = []
for ccs_tolerance in CCS_tolerances:
candidates = kdtree_worker_count(mass_tolerance, ccs_tolerance, evidence_tryptic_cs2['m/z'], evidence_tryptic_cs2['CCS'])
evaluation.append((mass_tolerance, ccs_tolerance, candidates))
# no ion mobility
plt.hist(evaluation[2][2], align='left', bins=range(1,50))
plt.xticks(range(1, 50, 2), rotation=45);
# 1.5% mobility deviation
plt.hist(evaluation[1][2], align='left', bins=range(1,30))
plt.xticks(range(1, 30, 2), rotation=45);
# 0.5% mobility deviation
plt.hist(evaluation[0][2], align='left', bins=range(1,14))
plt.xticks(range(1, 14), rotation=45);
# Median number of doubly charged peptides in a +- 1.5 ppm x [0.5%, 1.5%, 100%] window
np.median(evaluation[0][2]), np.median(evaluation[1][2]), np.median(evaluation[2][2])
evaluation = []
for ccs_tolerance in CCS_tolerances:
candidates = kdtree_worker_count(mass_tolerance, ccs_tolerance, evidence_tryptic_cs3['m/z'], evidence_tryptic_cs3['CCS'])
evaluation.append((mass_tolerance, ccs_tolerance, candidates))
# no ion mobility
plt.hist(evaluation[2][2], align='left', bins=range(1, 16))
plt.xticks(range(1, 16));
# 1.5% mobility deviation
plt.hist(evaluation[1][2], align='left', bins=range(1,6))
plt.xticks(range(1, 6));
# 0.5% mobility deviation
plt.hist(evaluation[0][2], align='left', bins=range(1,6))
plt.xticks(range(1, 6));
# Median number of triply charged peptides in a +- 1.5 ppm x [0.5%, 1.5%, 100%] window
np.median(evaluation[0][2]), np.median(evaluation[1][2]), np.median(evaluation[2][2])
```
|
github_jupyter
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
import time
from numba import njit, prange
import scipy.spatial as spatial
from scipy import stats
@njit
def mean_dist_pairwise(matrix, shape):
dist = np.zeros((shape,shape))
for i in prange(shape):
for j in prange(shape):
dist[j,i] = np.nanmean(matrix[:,i] - matrix[:,j])
return dist
def overlap_pairwise(matrix, shape):
dist = np.zeros((shape,shape))
for i in prange(shape):
for j in prange(shape):
x = matrix[:,i]
y = matrix[:,j]
mask = ~np.logical_or(np.isnan(x), np.isnan(y))
dist[j,i] = mask.sum()
return dist
def pearson_pairwise(matrix, shape, minpoints):
dist = np.zeros((shape,shape))
for i in prange(shape):
for j in prange(shape):
x = matrix[:,i]
y = matrix[:,j]
mask = ~np.logical_or(np.isnan(x), np.isnan(y))
if(mask.sum() > minpoints):
dist[j,i] = stats.pearsonr(np.compress(mask, x), np.compress(mask, y))[0]
else:
dist[j,i] = np.nan
return dist
# Load evidence.txt files from folder
filenames = glob.glob("data/evidence*.txt")
evidences = [pd.read_csv(filename, sep='\t', engine='python', header=0) for filename in filenames]
# Combine all evidences in one dataframe
evidence_all = pd.concat(evidences, sort=False, ignore_index = True)
# Clean up
del evidences
evidence_all.head()
# Drop reverse hits
# Drop features with no intensity value
# Drop charge 1 features
evidence_all = evidence_all.loc[(evidence_all['Reverse'] != '+') & \
(evidence_all['Intensity'] > 0) & \
(evidence_all['Charge'] != 1)]
# Select tryptic subset
selection = ['HeLa_Trypsin_1', 'HeLa_Trp_2', 'Drosophila_Trp', 'Yeast_Trypsin', 'Ecoli_trypsin', 'CElegans_Tryp']
evidence_tryptic = evidence_all.loc[evidence_all['Experiment'].isin(selection)]
len(set(evidence_tryptic['Raw file']))
# CCS values
# Keep only one evidence per raw file
# Maximum intensity
selection = ['Modified sequence', 'Sequence', 'Charge', 'Mass', 'm/z', 'CCS', 'Experiment', 'id', 'Intensity',
'Score', 'Length', 'Raw file']
evidence_agg = evidence_tryptic.loc[evidence_tryptic.groupby(
['Modified sequence', 'Charge', 'Raw file'])['Intensity'].idxmax()][selection]
evidence_pivot_tryptic_long_CCS = evidence_agg.pivot_table(index = ['Modified sequence', 'Charge'],
columns = 'Raw file',
values = 'CCS')
del evidence_agg
evidence_pivot_tryptic_long_CCS = evidence_pivot_tryptic_long_CCS.astype(np.float32)
len(evidence_tryptic), len(evidence_pivot_tryptic_long_CCS)
# RT values
# Keep only one evidence per raw file
# Maximum intensity
selection = ['Modified sequence', 'Sequence', 'Charge', 'Mass', 'm/z', 'CCS', 'Retention time', 'Experiment',
'id', 'Intensity', 'Score', 'Length', 'Raw file']
evidence_agg = evidence_tryptic.loc[evidence_tryptic.groupby(
['Modified sequence', 'Charge', 'Raw file'])['Intensity'].idxmax()][selection]
evidence_pivot_tryptic_long_RT = evidence_agg.pivot_table(index = ['Modified sequence', 'Charge'],
columns = 'Raw file',
values = 'Retention time')
del evidence_agg
evidence_pivot_tryptic_long_RT = evidence_pivot_tryptic_long_RT.astype(np.float32)
len(evidence_tryptic), len(evidence_pivot_tryptic_long_RT)
evidence_pivot_tryptic_long_RT.iloc[:, 0:48].columns
evidence_pivot_tryptic_long_RT.iloc[:, 144:168].columns
# Filter out peptides with only one occurence to speed up and save memory
evidence_pivot_tryptic_CCS = evidence_pivot_tryptic_long_CCS.loc[
evidence_pivot_tryptic_long_CCS.isnull().sum(axis=1) < (len(set(evidence_tryptic['Raw file'])) - 1)]
evidence_pivot_tryptic_RT = evidence_pivot_tryptic_long_RT.loc[
evidence_pivot_tryptic_long_RT.isnull().sum(axis=1) < (len(set(evidence_tryptic['Raw file'])) - 1)]
len(evidence_pivot_tryptic_CCS), len(evidence_pivot_tryptic_long_CCS)
# Calculate pairwise Pearson correlation for retention time values
# requires > 4 shared data points for calculation
start = time.time()
evidence_tryptic_pearson_RT = pd.DataFrame(pearson_pairwise(np.array(evidence_pivot_tryptic_RT),
evidence_pivot_tryptic_RT.shape[1], 4))
end = time.time()
print((end - start)/60)
# Calculate pairwise Pearson correlation for CCS values
# requires > 4 shared data points for calculation
start = time.time()
evidence_tryptic_pearson_CCS = pd.DataFrame(pearson_pairwise(np.array(evidence_pivot_tryptic_CCS),
evidence_pivot_tryptic_CCS.shape[1], 4))
end = time.time()
print((end - start)/60)
# Calculate pairwise overlap
start = time.time()
evidence_tryptic_overlap = pd.DataFrame(overlap_pairwise(np.array(evidence_pivot_tryptic_RT),
evidence_pivot_tryptic_RT.shape[1]))
end = time.time()
print((end - start)/60)
evidence_tryptic_pearson_CCS.iloc[0:48, 144:168].unstack().hist();
# Pearson correlation (CCS) of the two HeLa data sets
np.round((evidence_tryptic_pearson_CCS.iloc[0:48, 144:168]).unstack().median(), 3)
evidence_tryptic_pearson_RT.iloc[0:48, 144:168].unstack().hist();
# Pearson correlation (retention time) of the two HeLa data sets
np.round((evidence_tryptic_pearson_RT.iloc[0:48, 144:168]).unstack().median(), 3)
evidence_tryptic_overlap.iloc[0:48, 144:168].unstack().hist();
# Pearson correlation (retention time) of the two HeLa data sets
np.round(evidence_tryptic_overlap.iloc[0:48, 144:168].unstack().mean())
plt.figure(figsize=(9,6))
plt.title('Pearson correlation, retention time')
plt.pcolor(evidence_tryptic_pearson_RT.iloc[0:48, 144:168], cmap='magma', vmin=0.9, vmax=1)
plt.colorbar()
plt.show()
plt.figure(figsize=(9,6))
plt.title('Pearson correlation, CCS')
plt.pcolor(evidence_tryptic_pearson_CCS.iloc[0:48, 144:168], cmap='magma', vmin=0.9, vmax=1)
plt.colorbar()
plt.show()
plt.figure(figsize=(9,6))
plt.title('Overlapping features')
plt.pcolor(evidence_tryptic_overlap.iloc[0:48, 144:168], cmap='magma')
plt.colorbar()
plt.show()
# keep only triagonal matrix for figure
evidence_tryptic_pearson_RT = evidence_tryptic_pearson_RT.mask(
np.arange(len(evidence_tryptic_pearson_RT))[:,None] <= np.arange(len(evidence_tryptic_pearson_RT)))
evidence_tryptic_pearson_CCS = evidence_tryptic_pearson_CCS.mask(
np.arange(len(evidence_tryptic_pearson_CCS))[:,None] >= np.arange(len(evidence_tryptic_pearson_CCS)))
fig = plt.figure(figsize=(9,6))
ax1 = fig.add_subplot(111)
plt.pcolor(evidence_tryptic_pearson_RT, cmap='YlOrRd', vmin=0.95, vmax=1)
ax2 = ax1.twiny()
X2tick_location= ax1.xaxis.get_ticklocs()
ax2.set_xticks(X2tick_location)
ax2.set_xticklabels(X2tick_location)
plt.colorbar()
plt.savefig("figures/Figure_2_a_rt.pdf");
plt.show()
fig = plt.figure(figsize=(9,6))
ax1 = fig.add_subplot(111)
plt.pcolor(evidence_tryptic_pearson_CCS, cmap='YlOrRd', vmin=0.95, vmax=1)
ax3 = ax1.twiny()
X2tick_location= ax1.yaxis.get_ticklocs()
ax3.set_yticks(X2tick_location)
ax3.set_yticklabels(X2tick_location)
plt.colorbar()
plt.savefig("figures/Figure_2_a_ccs.pdf");
start = time.time()
evidence_tryptic_distance_CCS = pd.DataFrame(mean_dist_pairwise(np.array(evidence_pivot_tryptic_CCS),
evidence_pivot_tryptic_CCS.shape[1]))
end = time.time()
print((end - start)/60)
# make triangular matrix
evidence_tryptic_distance_CCS = evidence_tryptic_distance_CCS.mask(
np.arange(len(evidence_tryptic_distance_CCS))[:,None] <= np.arange(len(evidence_tryptic_distance_CCS)))
plt.figure(figsize=(9,6))
plt.title('Pairwise mean distance ($\AA^2$)')
plt.pcolor(evidence_tryptic_distance_CCS, cmap='magma')
plt.colorbar()
plt.show()
evidence_tryptic_distance_CCS.unstack().abs().hist()
plt.ylabel('Count')
plt.xlabel('Abs. pairwise mean distance ($\AA^2$)');
print("Max. abs. distance: {:.1f} Å^2".format(evidence_tryptic_distance_CCS.unstack().abs().max()))
print("Median abs. distance: {:.1f} Å^2".format(evidence_tryptic_distance_CCS.unstack().abs().median()))
evidence_pivot_aligned = pd.read_csv('output/evidence_pivot_aligned.csv')
evidence_pivot_aligned.head()
# Exclude Proteome Tools data
proteometools = set(evidence_all.loc[evidence_all['Experiment'].isin(
['Proteotypic', 'SRMATLAS', 'MissingGeneSet'])]['Raw file'])
evidence_pivot_aligned_endo = evidence_pivot_aligned.drop(proteometools, axis = 1)
len(evidence_pivot_aligned.columns), len(evidence_pivot_aligned_endo.columns)
runs = ['20190504_TIMS1_FlMe_SA_HeLa_frac03_C10_1_95',
'20190119_TIMS2_AnBr_SA_200ng_Drosophila_Trypsin_IRT_Fraction03_C7_01_4020']
x = evidence_pivot_aligned[runs[0]]
y = evidence_pivot_aligned[runs[1]]
mask = ~np.logical_or(np.isnan(x), np.isnan(y))
pcorr = stats.pearsonr(x[mask], y[mask])[0]
fig = plt.figure(figsize=(4,3))
ax = plt.axes()
plt.scatter(x, y, s = 30, alpha = 1)
plt.xlabel(runs[0])
plt.ylabel(runs[1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.plot(np.arange(300,700,1), np.arange(300,700,1), color = 'black')
plt.savefig("figures/Figure_2_b.pdf");
plt.show()
print("n = {0}".format(mask.sum()))
print("Pearson correlation: {:1.3f}".format(pcorr))
print("Median delta = {:1.2f}%".format(np.nanmedian((x-y)/y*100)))
print("Median absolute delta = {:1.2f}%".format(np.nanmedian(np.abs((x-y)/y*100))))
# Calculate coefficients of variation
evidence_pivot_aligned_endo_CV = evidence_pivot_aligned_endo.std(axis = 1, skipna = True) / \
evidence_pivot_aligned_endo.mean(axis = 1, skipna = True) * 100
print("Median relative standard deviation: {:10.2f}%".format(evidence_pivot_aligned_endo_CV.dropna().median()))
print("Unique sequences*charge with >1 measurements: {}".format(len(evidence_pivot_aligned_endo_CV.dropna())))
fig = plt.figure(figsize=(4,3))
ax = plt.axes()
plt.hist(x = evidence_pivot_aligned_endo_CV.dropna(), bins = 100, range = (0,5))
plt.xlabel('Coefficient of variation (%)')
plt.ylabel('Count')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.savefig("figures/Figure_2_c.pdf");
plt.show()
def kdtree_worker(mass_tolerance, ccs_tolerance, masses, crossections):
found_points = []
results = []
tolerance = 0.1
points = np.array([np.log(masses)/mass_tolerance*1e6*tolerance, np.log(crossections)/ccs_tolerance*1e2*tolerance]).T
point_tree = spatial.cKDTree(points)
for i, point in enumerate(points):
n_found = point_tree.query_ball_point(point, tolerance, p=np.float('inf'))
results.append(len(n_found)-1)
ratio_kd = np.sum(np.array(results)>0)/len(results)
return ratio_kd
def kdtree_worker_count(mass_tolerance, ccs_tolerance, masses, crossections):
found_points = []
results = []
tolerance = 0.1
points = np.array([np.log(masses)/mass_tolerance*1e6*tolerance, np.log(crossections)/ccs_tolerance*1e2*tolerance]).T
point_tree = spatial.cKDTree(points)
for i, point in enumerate(points):
n_found = point_tree.query_ball_point(point, tolerance, p=np.float('inf'))
results.append(len(n_found))
return results
cmap = plt.get_cmap("RdYlBu")
colors = cmap(np.linspace(0, 1, num=20))
evidence_aligned = pd.read_csv('output/evidence_aligned.csv')
evidence_aligned.head()
# Select peptides with N-terminal R or K
evidence_aligned['lastAA'] = evidence_aligned['Sequence'].str[-1:]
evidence_tryptic = evidence_aligned.loc[(evidence_aligned['lastAA'] == 'R') | (evidence_aligned['lastAA'] == 'K')]
len(evidence_aligned), len(evidence_tryptic)
# Split by charge state
evidence_tryptic_cs2 = evidence_tryptic.loc[evidence_tryptic['Charge'] == 2]
evidence_tryptic_cs3 = evidence_tryptic.loc[evidence_tryptic['Charge'] == 3]
len(evidence_tryptic_cs2), len(evidence_tryptic_cs3)
set(evidence_aligned['Experiment'])
# Calculate interference ratios for charge state 2
CCS_tolerances = [0.1, 0.2, 0.3, 0.5, 0.7, 1.0, 1.5, 2.0, 3, 5, 10, 20, 50]
mass_tolerance = 1.5
evaluation_cs2 = []
for ccs_tolerance in CCS_tolerances:
ratio = kdtree_worker(mass_tolerance, ccs_tolerance, evidence_tryptic_cs2['m/z'], evidence_tryptic_cs2['CCS'])
evaluation_cs2.append((mass_tolerance, ccs_tolerance, ratio))
# Results, charge state 2
out2 = pd.DataFrame(evaluation_cs2)
out2.columns = ['Mass Tolerance', 'CCS Tolerance', 'Interference Ratio']
out2
CCS_tolerances = [0.1, 0.2, 0.3, 0.5, 0.7, 1.0, 1.5, 2.0, 3, 5, 10, 20, 50]
mass_tolerance = 1.5
evaluation_cs3 = []
for ccs_tolerance in CCS_tolerances:
ratio = kdtree_worker(mass_tolerance, ccs_tolerance, evidence_tryptic_cs3['m/z'], evidence_tryptic_cs3['CCS'])
evaluation_cs3.append((mass_tolerance, ccs_tolerance, ratio))
# Results, charge state 3
out3 = pd.DataFrame(evaluation_cs3)
out3.columns = ['Mass Tolerance', 'CCS Tolerance', 'Interference Ratio']
out3
fig = plt.figure(figsize=(4,3))
ax = plt.axes()
plt.scatter(out2['CCS Tolerance'], out2['Interference Ratio']*100, marker = 'x', label = 'charge 2', color = 'black')
plt.scatter(out3['CCS Tolerance'], out3['Interference Ratio']*100, marker = '+', label = 'charge 3', color = 'black')
plt.xlabel('CCS Tolerance (+/- %)')
plt.ylabel('Non-unique fraction (%)')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
plt.ylim((0,100))
plt.xlim((0.05,60))
plt.xscale('log')
plt.legend()
plt.savefig("figures/Figure_2_d.pdf");
plt.show()
CCS_tolerances = [0.5, 1.5, 100]
mass_tolerance = 1.5
evaluation = []
for ccs_tolerance in CCS_tolerances:
candidates = kdtree_worker_count(mass_tolerance, ccs_tolerance, evidence_tryptic_cs2['m/z'], evidence_tryptic_cs2['CCS'])
evaluation.append((mass_tolerance, ccs_tolerance, candidates))
# no ion mobility
plt.hist(evaluation[2][2], align='left', bins=range(1,50))
plt.xticks(range(1, 50, 2), rotation=45);
# 1.5% mobility deviation
plt.hist(evaluation[1][2], align='left', bins=range(1,30))
plt.xticks(range(1, 30, 2), rotation=45);
# 0.5% mobility deviation
plt.hist(evaluation[0][2], align='left', bins=range(1,14))
plt.xticks(range(1, 14), rotation=45);
# Median number of doubly charged peptides in a +- 1.5 ppm x [0.5%, 1.5%, 100%] window
np.median(evaluation[0][2]), np.median(evaluation[1][2]), np.median(evaluation[2][2])
evaluation = []
for ccs_tolerance in CCS_tolerances:
candidates = kdtree_worker_count(mass_tolerance, ccs_tolerance, evidence_tryptic_cs3['m/z'], evidence_tryptic_cs3['CCS'])
evaluation.append((mass_tolerance, ccs_tolerance, candidates))
# no ion mobility
plt.hist(evaluation[2][2], align='left', bins=range(1, 16))
plt.xticks(range(1, 16));
# 1.5% mobility deviation
plt.hist(evaluation[1][2], align='left', bins=range(1,6))
plt.xticks(range(1, 6));
# 0.5% mobility deviation
plt.hist(evaluation[0][2], align='left', bins=range(1,6))
plt.xticks(range(1, 6));
# Median number of triply charged peptides in a +- 1.5 ppm x [0.5%, 1.5%, 100%] window
np.median(evaluation[0][2]), np.median(evaluation[1][2]), np.median(evaluation[2][2])
| 0.575946 | 0.882276 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.