text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
Deep Learning Models -- A collection of various deep learning architectures, models, and tips for TensorFlow and PyTorch in Jupyter Notebooks.
- Author: Sebastian Raschka
- GitHub Repository: https://github.com/rasbt/deeplearning-models
```
!pip install -q IPython
!pip install -q ipykernel
!pip install -q watermark
!pip install -q matplotlib
!pip install -q sklearn
!pip install -q pandas
!pip install -q pydot
!pip install -q hiddenlayer
!pip install -q graphviz
%load_ext watermark
%watermark -a 'Sebastian Raschka' -v -p torch
```
- Runs on CPU (not recommended here) or GPU (if available)
# Model Zoo -- Convolutional Neural Network (VGG19 Architecture)
Implementation of the VGG-19 architecture on Cifar10.
Reference for VGG-19:
- Simonyan, K., & Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
The following table (taken from Simonyan & Zisserman referenced above) summarizes the VGG19 architecture:

## Imports
```
import numpy as np
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
```
## Settings and Dataset
```
##########################
### SETTINGS
##########################
# Device
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Device:', DEVICE)
# Hyperparameters
random_seed = 1
learning_rate = 0.001
num_epochs = 20
batch_size = 128
# Architecture
num_features = 784
num_classes = 10
##########################
### MNIST DATASET
##########################
# Note transforms.ToTensor() scales input images
# to 0-1 range
train_dataset = datasets.CIFAR10(root='data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.CIFAR10(root='data',
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# Checking the dataset
for images, labels in train_loader:
print('Image batch dimensions:', images.shape)
print('Image label dimensions:', labels.shape)
break
```
## Model
```
##########################
### MODEL
##########################
class VGG16(torch.nn.Module):
def __init__(self, num_features, num_classes):
super(VGG16, self).__init__()
# calculate same padding:
# (w - k + 2*p)/s + 1 = o
# => p = (s(o-1) - w + k)/2
self.block_1 = nn.Sequential(
nn.Conv2d(in_channels=3,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
# (1(32-1)- 32 + 3)/2 = 1
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_2 = nn.Sequential(
nn.Conv2d(in_channels=64,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_3 = nn.Sequential(
nn.Conv2d(in_channels=128,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=256,
out_channels=256,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_4 = nn.Sequential(
nn.Conv2d(in_channels=256,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.block_5 = nn.Sequential(
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=512,
out_channels=512,
kernel_size=(3, 3),
stride=(1, 1),
padding=1),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2),
stride=(2, 2))
)
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(True),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes)
)
for m in self.modules():
if isinstance(m, torch.nn.Conv2d):
#n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
#m.weight.data.normal_(0, np.sqrt(2. / n))
m.weight.detach().normal_(0, 0.05)
if m.bias is not None:
m.bias.detach().zero_()
elif isinstance(m, torch.nn.Linear):
m.weight.detach().normal_(0, 0.05)
m.bias.detach().detach().zero_()
def forward(self, x):
x = self.block_1(x)
x = self.block_2(x)
x = self.block_3(x)
x = self.block_4(x)
x = self.block_5(x)
logits = self.classifier(x.view(-1, 512))
probas = F.softmax(logits, dim=1)
return logits, probas
torch.manual_seed(random_seed)
model = VGG16(num_features=num_features,
num_classes=num_classes)
model = model.to(DEVICE)
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
import hiddenlayer as hl
hl.build_graph(model, torch.zeros([128, 3, 32, 32]).to(DEVICE))
```
## Training
```
def compute_accuracy(model, data_loader):
model.eval()
correct_pred, num_examples = 0, 0
for i, (features, targets) in enumerate(data_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits, probas = model(features)
_, predicted_labels = torch.max(probas, 1)
num_examples += targets.size(0)
correct_pred += (predicted_labels == targets).sum()
return correct_pred.float()/num_examples * 100
def compute_epoch_loss(model, data_loader):
model.eval()
curr_loss, num_examples = 0., 0
with torch.no_grad():
for features, targets in data_loader:
features = features.to(DEVICE)
targets = targets.to(DEVICE)
logits, probas = model(features)
loss = F.cross_entropy(logits, targets, reduction='sum')
num_examples += targets.size(0)
curr_loss += loss
curr_loss = curr_loss / num_examples
return curr_loss
start_time = time.time()
for epoch in range(num_epochs):
model.train()
for batch_idx, (features, targets) in enumerate(train_loader):
features = features.to(DEVICE)
targets = targets.to(DEVICE)
### FORWARD AND BACK PROP
logits, probas = model(features)
cost = F.cross_entropy(logits, targets)
optimizer.zero_grad()
cost.backward()
### UPDATE MODEL PARAMETERS
optimizer.step()
### LOGGING
if not batch_idx % 50:
print ('Epoch: %03d/%03d | Batch %04d/%04d | Cost: %.4f'
%(epoch+1, num_epochs, batch_idx,
len(train_loader), cost))
model.eval()
with torch.set_grad_enabled(False): # save memory during inference
print('Epoch: %03d/%03d | Train: %.3f%% | Loss: %.3f' % (
epoch+1, num_epochs,
compute_accuracy(model, train_loader),
compute_epoch_loss(model, train_loader)))
print('Time elapsed: %.2f min' % ((time.time() - start_time)/60))
print('Total Training Time: %.2f min' % ((time.time() - start_time)/60))
```
## Evaluation
```
with torch.set_grad_enabled(False): # save memory during inference
print('Test accuracy: %.2f%%' % (compute_accuracy(model, test_loader)))
%watermark -iv
```
|
github_jupyter
|
# Changing the input current when solving PyBaMM models
This notebook shows you how to change the input current when solving PyBaMM models. It also explains how to load in current data from a file, and how to add a user-defined current function. For more examples of different drive cycles see [here](https://github.com/pybamm-team/PyBaMM/tree/master/results/drive_cycles).
### Table of Contents
1. [Constant current](#constant)
1. [Loading in current data](#data)
1. [Adding your own current function](#function)
## Constant current <a name="constant"></a>
In this notebook we will use the SPM as the example model, and change the input current from the default option. If you are not familiar with running a model in PyBaMM, please see [this](./models/SPM.ipynb) notebook for more details.
In PyBaMM, the current function is set using the parameter "Current function [A]". Below we load the SPM with the default parameters, and then change the the current function to be an input parameter, so that we can change it easily later.
```
%pip install pybamm -q # install PyBaMM if it is not installed
import pybamm
import numpy as np
import os
os.chdir(pybamm.__path__[0]+'/..')
# create the model
model = pybamm.lithium_ion.DFN()
# set the default model parameters
param = model.default_parameter_values
# change the current function to be an input parameter
param["Current function [A]"] = "[input]"
```
We can set up a simulation in the usual way, making sure we pass in our updated parameters. We choose to solve with a 1.6A current. In order to do this we must pass a dictionary of inputs whose keys are the parameter names and values are the values we want to use for that call to solve
```
# set up simlation
simulation = pybamm.Simulation(model, parameter_values=param)
# solve the model at the given time points, passing the current as an input
t_eval = np.linspace(0, 600, 300)
simulation.solve(t_eval, inputs={"Current function [A]": 1.6})
# plot
simulation.plot()
```
PyBaMM can also simulate rest behaviour by setting the current function to zero:
```
# solve the model at the given time points
simulation.solve(t_eval, inputs={"Current function [A]": 0})
# plot
simulation.plot()
```
## Loading in current data <a name="data"></a>
To run drive cycles from data we can create an interpolant and pass it as the current function.
```
import pandas as pd # needed to read the csv data file
model = pybamm.lithium_ion.DFN()
# import drive cycle from file
drive_cycle = pd.read_csv("pybamm/input/drive_cycles/US06.csv", comment="#", header=None).to_numpy()
# load parameter values
param = model.default_parameter_values
# create interpolant - must be a function of *dimensional* time
timescale = param.evaluate(model.timescale)
current_interpolant = pybamm.Interpolant(drive_cycle, timescale * pybamm.t)
# set drive cycle
param["Current function [A]"] = current_interpolant
# set up simulation - for drive cycles we recommend using the CasadiSolver in "fast" mode
solver = pybamm.CasadiSolver(mode="fast")
simulation = pybamm.Simulation(model, parameter_values=param, solver=solver)
```
Note that when simulating drive cycles there is no need to pass a list of times at which to return the solution, the results are automatically returned at the time points in the data. If you would like the solution returned at times different to those in the data then you can pass an array of times `t_eval` to `solve` in the usual way.
```
# simulate US06 drive cycle (duration 600 seconds)
simulation.solve()
# plot
simulation.plot()
```
Note that some solvers try to evaluate the model equations at a very large value of `t` during the first step. This may raise a warning if the time requested by the solver is outside of the range of the data provided. However, this does not affect the solve since this large timestep is rejected by the solver, and a suitable shorter initial step is taken.
## Adding your own current function <a name="function"></a>
A user defined current function can be passed to any model by specifying either a function or a set of data points for interpolation.
For example, you may want to simulate a sinusoidal current with amplitude A and frequency omega. In order to do so you must first define the method
```
# create user-defined function
def my_fun(A, omega):
def current(t):
return A * pybamm.sin(2 * np.pi * omega * t)
return current
```
Note that the function returns a function which takes the input time.
Then the model may be loaded and the "Current function" parameter updated to `my_fun` called with a specific value of `A` and `omega`
```
model = pybamm.lithium_ion.SPM()
# load default parameter values
param = model.default_parameter_values
# set user defined current function
A = model.param.I_typ
omega = 0.1
param["Current function [A]"] = my_fun(A,omega)
```
Note that when `my_fun` is evaluated with `A` and `omega`, this creates a new function `current(t)` which can then be used in the expression tree. The model may then be solved in the usual way
```
# set up simulation
simulation = pybamm.Simulation(model, parameter_values=param)
# Example: simulate for 30 seconds
simulation_time = 30 # end time in seconds
npts = int(50 * simulation_time * omega) # need enough timesteps to resolve output
t_eval = np.linspace(0, simulation_time, npts)
solution = simulation.solve(t_eval)
label = ["Frequency: {} Hz".format(omega)]
# plot current and voltage
output_variables = ["Current [A]", "Terminal voltage [V]"]
simulation.plot(output_variables, labels=label)
```
|
github_jupyter
|
## Discretisation
Discretisation is the process of transforming continuous variables into discrete variables by creating a set of contiguous intervals that span the range of the variable's values. Discretisation is also called **binning**, where bin is an alternative name for interval.
### Discretisation helps handle outliers and may improve value spread in skewed variables
Discretisation helps handle outliers by placing these values into the lower or higher intervals, together with the remaining inlier values of the distribution. Thus, these outlier observations no longer differ from the rest of the values at the tails of the distribution, as they are now all together in the same interval / bucket. In addition, by creating appropriate bins or intervals, discretisation can help spread the values of a skewed variable across a set of bins with equal number of observations.
### Discretisation approaches
There are several approaches to transform continuous variables into discrete ones. Discretisation methods fall into 2 categories: **supervised and unsupervised**. Unsupervised methods do not use any information, other than the variable distribution, to create the contiguous bins in which the values will be placed. Supervised methods typically use target information in order to create the bins or intervals.
#### Unsupervised discretisation methods
- Equal width discretisation
- Equal frequency discretisation
- K-means discretisation
#### Supervised discretisation methods
- Discretisation using decision trees
In this lecture, I will describe **equal frequency discretisation**.
## Equal frequency discretisation
Equal frequency discretisation divides the scope of possible values of the variable into N bins, where each bin carries the same amount of observations. This is particularly useful for skewed variables as it spreads the observations over the different bins equally. We find the interval boundaries by determining the quantiles.
Equal frequency discretisation using quantiles consists of dividing the continuous variable into N quantiles, N to be defined by the user.
Equal frequency binning is straightforward to implement and by spreading the values of the observations more evenly it may help boost the algorithm's performance. This arbitrary binning may also disrupt the relationship with the target.
## In this demo
We will learn how to perform equal frequency discretisation using the Titanic dataset with
- pandas and NumPy
- Feature-engine
- Scikit-learn
## Titanic dataset
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import KBinsDiscretizer
from feature_engine.discretisers import EqualFrequencyDiscretiser
# load the numerical variables of the Titanic Dataset
data = pd.read_csv('../titanic.csv',
usecols=['age', 'fare', 'survived'])
data.head()
# Let's separate into train and test set
X_train, X_test, y_train, y_test = train_test_split(
data[['age', 'fare']],
data['survived'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
```
The variables Age and Fare contain missing data, that I will fill by extracting a random sample of the variable.
```
def impute_na(data, variable):
# function to fill NA with a random sample
df = data.copy()
# random sampling
df[variable+'_random'] = df[variable]
# extract the random sample to fill the na
random_sample = X_train[variable].dropna().sample(
df[variable].isnull().sum(), random_state=0)
# pandas needs to have the same index in order to merge datasets
random_sample.index = df[df[variable].isnull()].index
df.loc[df[variable].isnull(), variable+'_random'] = random_sample
return df[variable+'_random']
# replace NA in both train and test sets
X_train['age'] = impute_na(data, 'age')
X_test['age'] = impute_na(data, 'age')
X_train['fare'] = impute_na(data, 'fare')
X_test['fare'] = impute_na(data, 'fare')
# let's explore the distribution of age
X_train[['age', 'fare']].hist(bins=30, figsize=(8,4))
plt.show()
```
## Equal frequency discretisation with pandas and NumPy
The interval limits are the quantile limits. We can find those out with pandas qcut.
```
# let's use pandas qcut (quantile cut) and I indicate that
# we want 10 bins.
# retbins = True indicates that I want to capture the limits
# of each interval (so I can then use them to cut the test set)
Age_disccretised, intervals = pd.qcut(
X_train['age'], 10, labels=None, retbins=True, precision=3, duplicates='raise')
pd.concat([Age_disccretised, X_train['age']], axis=1).head(10)
```
We can see in the above output how by discretising using quantiles, we placed each Age observation within one interval. For example, age 29 was placed in the 26-30 interval, whereas age 63 was placed into the 49-80 interval.
Note how the interval widths are different.
We can visualise the interval cut points below:
```
intervals
```
And because we generated the bins using the quantile cut method, we should have roughly the same amount of observations per bin. See below.
```
# roughly the same number of passengers per interval
Age_disccretised.value_counts()
# we can also add labels instead of having the interval boundaries, to the bins, as follows:
labels = ['Q'+str(i) for i in range(1,11)]
labels
Age_disccretised, intervals = pd.qcut(X_train['age'], 10, labels=labels,
retbins=True,
precision=3, duplicates='raise')
Age_disccretised.head()
# to transform the test set:
# we use pandas cut method (instead of qcut) and
# pass the quantile edges calculated in the training set
X_test['Age_disc_label'] = pd.cut(x = X_test['age'], bins=intervals, labels=labels)
X_test['Age_disc'] = pd.cut(x = X_test['age'], bins=intervals)
X_test.head(10)
# let's check that we have equal frequency (equal number of observations per bin)
X_test.groupby('Age_disc')['age'].count().plot.bar()
```
We can see that the top intervals have less observations. This may happen with skewed distributions if we try to divide in a high number of intervals. To make the value spread more homogeneous, we should discretise in less intervals.
## Equal frequency discretisation with Feature-Engine
```
# Let's separate into train and test set
X_train, X_test, y_train, y_test = train_test_split(
data[['age', 'fare']],
data['survived'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# replace NA in both train and test sets
X_train['age'] = impute_na(data, 'age')
X_test['age'] = impute_na(data, 'age')
X_train['fare'] = impute_na(data, 'fare')
X_test['fare'] = impute_na(data, 'fare')
# with feature engine we can automate the process for many variables
# in one line of code
disc = EqualFrequencyDiscretiser(q=10, variables = ['age', 'fare'])
disc.fit(X_train)
# in the binner dict, we can see the limits of the intervals. Note
# that the intervals have different widths
disc.binner_dict_
# transform train and text
train_t = disc.transform(X_train)
test_t = disc.transform(X_test)
train_t.head()
# and now let's explore the number of observations per bucket
t1 = train_t.groupby(['age'])['age'].count() / len(train_t)
t2 = test_t.groupby(['age'])['age'].count() / len(test_t)
tmp = pd.concat([t1, t2], axis=1)
tmp.columns = ['train', 'test']
tmp.plot.bar()
plt.xticks(rotation=0)
plt.ylabel('Number of observations per bin')
t1 = train_t.groupby(['fare'])['fare'].count() / len(train_t)
t2 = test_t.groupby(['fare'])['fare'].count() / len(test_t)
tmp = pd.concat([t1, t2], axis=1)
tmp.columns = ['train', 'test']
tmp.plot.bar()
plt.xticks(rotation=0)
plt.ylabel('Number of observations per bin')
```
Note how equal frequency discretisation obtains a better value spread across the different intervals.
## Equal frequency discretisation with Scikit-learn
```
# Let's separate into train and test set
X_train, X_test, y_train, y_test = train_test_split(
data[['age', 'fare']],
data['survived'],
test_size=0.3,
random_state=0)
X_train.shape, X_test.shape
# replace NA in both train and test sets
X_train['age'] = impute_na(data, 'age')
X_test['age'] = impute_na(data, 'age')
X_train['fare'] = impute_na(data, 'fare')
X_test['fare'] = impute_na(data, 'fare')
disc = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy='quantile')
disc.fit(X_train[['age', 'fare']])
disc.bin_edges_
train_t = disc.transform(X_train[['age', 'fare']])
train_t = pd.DataFrame(train_t, columns = ['age', 'fare'])
train_t.head()
test_t = disc.transform(X_test[['age', 'fare']])
test_t = pd.DataFrame(test_t, columns = ['age', 'fare'])
t1 = train_t.groupby(['age'])['age'].count() / len(train_t)
t2 = test_t.groupby(['age'])['age'].count() / len(test_t)
tmp = pd.concat([t1, t2], axis=1)
tmp.columns = ['train', 'test']
tmp.plot.bar()
plt.xticks(rotation=0)
plt.ylabel('Number of observations per bin')
t1 = train_t.groupby(['fare'])['fare'].count() / len(train_t)
t2 = test_t.groupby(['fare'])['fare'].count() / len(test_t)
tmp = pd.concat([t1, t2], axis=1)
tmp.columns = ['train', 'test']
tmp.plot.bar()
plt.xticks(rotation=0)
plt.ylabel('Number of observations per bin')
```
|
github_jupyter
|
This baseline has reached Top %11 with rank of #457/4540 Teams at Private Leader Board (missed Bronze with only 2 places)
```
import numpy as np
import pandas as pd
import sys
import gc
from scipy.signal import hilbert
from scipy.signal import hann
from scipy.signal import convolve
pd.options.display.precision = 15
train_set = pd.read_csv('../input/train.csv', dtype={'acoustic_data': np.int16, 'time_to_failure': np.float32})
segments = int(np.floor(train_set.shape[0] / 150000))
X_train = pd.DataFrame(index=range(segments), dtype=np.float64)
y_train = pd.DataFrame(index=range(segments), dtype=np.float64, columns=['time_to_failure'])
def feature_generate(df,x,seg):
df.loc[seg, 'ave'] = x.mean()
df.loc[seg, 'std'] = x.std()
df.loc[seg, 'max'] = x.max()
df.loc[seg, 'min'] = x.min()
df.loc[seg, 'sum'] = x.sum()
df.loc[seg, 'mad'] = x.mad()
df.loc[seg, 'kurtosis'] = x.kurtosis()
df.loc[seg, 'skew'] = x.skew()
df.loc[seg, 'quant0_01'] = np.quantile(x,0.01)
df.loc[seg, 'quant0_05'] = np.quantile(x,0.05)
df.loc[seg, 'quant0_95'] = np.quantile(x,0.95)
df.loc[seg, 'quant0_99'] = np.quantile(x,0.99)
df.loc[seg, 'abs_min'] = np.abs(x).min()
df.loc[seg, 'abs_max'] = np.abs(x).max()
df.loc[seg, 'abs_mean'] = np.abs(x).mean()
df.loc[seg, 'abs_std'] = np.abs(x).std()
df.loc[seg, 'mean_change_abs'] = np.mean(np.diff(x))
df.loc[seg, 'max_to_min'] = x.max() / np.abs(x.min())
df.loc[seg, 'max_to_min_diff'] = x.max() - np.abs(x.min())
df.loc[seg, 'count_big'] = len(x[np.abs(x) > 500])
df.loc[seg, 'average_first_10000'] = x[:10000].mean()
df.loc[seg, 'average_last_10000'] = x[-10000:].mean()
df.loc[seg, 'average_first_50000'] = x[:50000].mean()
df.loc[seg, 'average_last_50000'] = x[-50000:].mean()
df.loc[seg, 'std_first_10000'] = x[:10000].std()
df.loc[seg, 'std_last_10000'] = x[-10000:].std()
df.loc[seg, 'std_first_50000'] = x[:50000].std()
df.loc[seg, 'std_last_50000'] = x[-50000:].std()
df.loc[seg, '10q'] = np.percentile(x, 0.10)
df.loc[seg, '25q'] = np.percentile(x, 0.25)
df.loc[seg, '50q'] = np.percentile(x, 0.50)
df.loc[seg, '75q'] = np.percentile(x, 0.75)
df.loc[seg, '90q'] = np.percentile(x, 0.90)
df.loc[seg, 'abs_1q'] = np.percentile(x, np.abs(0.01))
df.loc[seg, 'abs_5q'] = np.percentile(x, np.abs(0.05))
df.loc[seg, 'abs_30q'] = np.percentile(x, np.abs(0.30))
df.loc[seg, 'abs_60q'] = np.percentile(x, np.abs(0.60))
df.loc[seg, 'abs_95q'] = np.percentile(x, np.abs(0.95))
df.loc[seg, 'abs_99q'] = np.percentile(x, np.abs(0.99))
df.loc[seg, 'hilbert_mean'] = np.abs(hilbert(x)).mean()
df.loc[seg, 'hann_window_mean'] = (convolve(x, hann(150), mode = 'same') / sum(hann(150))).mean()
for windows in [10, 100, 1000]:
x_roll_std = x.rolling(windows).std().dropna().values
x_roll_mean = x.rolling(windows).mean().dropna().values
df.loc[seg, 'avg_roll_std' + str(windows)] = x_roll_std.mean()
df.loc[seg, 'std_roll_std' + str(windows)] = x_roll_std.std()
df.loc[seg, 'max_roll_std' + str(windows)] = x_roll_std.max()
df.loc[seg, 'min_roll_std' + str(windows)] = x_roll_std.min()
df.loc[seg, '1q_roll_std' + str(windows)] = np.quantile(x_roll_std, 0.01)
df.loc[seg, '5q_roll_std' + str(windows)] = np.quantile(x_roll_std, 0.05)
df.loc[seg, '95q_roll_std' + str(windows)] = np.quantile(x_roll_std, 0.95)
df.loc[seg, '99q_roll_std' + str(windows)] = np.quantile(x_roll_std, 0.99)
df.loc[seg, 'av_change_abs_roll_std' + str(windows)] = np.mean(np.diff(x_roll_std))
df.loc[seg, 'abs_max_roll_std' + str(windows)] = np.abs(x_roll_std).max()
df.loc[seg, 'avg_roll_mean' + str(windows)] = x_roll_mean.mean()
df.loc[seg, 'std_roll_mean' + str(windows)] = x_roll_mean.std()
df.loc[seg, 'max_roll_mean' + str(windows)] = x_roll_mean.max()
df.loc[seg, 'min_roll_mean' + str(windows)] = x_roll_mean.min()
df.loc[seg, '1q_roll_mean' + str(windows)] = np.quantile(x_roll_mean, 0.01)
df.loc[seg, '5q_roll_mean' + str(windows)] = np.quantile(x_roll_mean, 0.05)
df.loc[seg, '95q_roll_mean' + str(windows)] = np.quantile(x_roll_mean, 0.95)
df.loc[seg, '99q_roll_mean' + str(windows)] = np.quantile(x_roll_mean, 0.99)
df.loc[seg, 'av_change_abs_roll_mean' + str(windows)] = np.mean(np.diff(x_roll_mean))
df.loc[seg, 'abs_max_roll_mean' + str(windows)] = np.abs(x_roll_mean).max()
return df
for s in range(segments):
seg = train_set.iloc[s*150000:s*150000+150000]
x = pd.Series(seg['acoustic_data'].values)
y = seg['time_to_failure'].values[-1]
y_train.loc[s, 'time_to_failure'] = y
X_train = feature_generate(X_train,x,s)
columns=X_train.columns
del train_set
gc.collect()
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
y_train = y_train.values.flatten()
gc.collect()
import xgboost as xgb
model = xgb.XGBRegressor(objective = 'reg:linear',
metric = 'mae',
tree_method = 'gpu_hist',
verbosity = 0)
%%time
model.fit(X_train,y_train)
from matplotlib import pyplot
print(model.feature_importances_)
pyplot.bar(range(len(model.feature_importances_)), model.feature_importances_)
pyplot.show()
from xgboost import plot_importance
plot_importance(model)
pyplot.show()
submission = pd.read_csv('../input/sample_submission.csv', index_col='seg_id')
X_test = pd.DataFrame(columns=columns, dtype=np.float64, index=submission.index)
for s in X_test.index:
seg = pd.read_csv('../input/test/' + s + '.csv')
x = pd.Series(seg['acoustic_data'].values)
X_test = feature_generate(X_test,x,s)
X_test = scaler.transform(X_test)
submission['time_to_failure'] = model.predict(X_test).clip(0, 16)
submission.to_csv('submission.csv')
```
|
github_jupyter
|
# Exercise 4 - Optimizing Model Training
In [the previous exercise](./03%20-%20Compute%20Contexts.ipynb), you created cloud-based compute and used it when running a model training experiment. The benefit of cloud compute is that it offers a cost-effective way to scale out your experiment workflow and try different algorithms and parameters in order to optimize your model's performance; and that's what we'll explore in this exercise.
> **Important**: This exercise assumes you have completed the previous exercises in this series - specifically, you must have:
>
> - Created an Azure ML Workspace.
> - Uploaded the diabetes.csv data file to the workspace's default datastore.
> - Registered a **Diabetes Dataset** dataset in the workspace.
> - Provisioned an Azure ML Compute resource named **cpu-cluster**.
>
> If you haven't done that, now would be a good time - nobody's going to do it for you!
## Task 1: Connect to Your Workspace
The first thing you need to do is to connect to your workspace using the Azure ML SDK. Let's start by ensuring you still have the latest version installed (if you ended and restarted your Azure Notebooks session, the environment may have been reset)
```
!pip install --upgrade azureml-sdk[notebooks,automl,explain]
import azureml.core
print("Ready to use Azure ML", azureml.core.VERSION)
```
Now you're ready to connect to your workspace. When you created it in the previous exercise, you saved its configuration; so now you can simply load the workspace from its configuration file.
> **Note**: If the authenticated session with your Azure subscription has expired since you completed the previous exercise, you'll be prompted to reauthenticate.
```
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to work with', ws.name)
```
Now let's get the Azure ML compute resource you created previously (or recreate it if you deleted it!)
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "cpu-cluster"
# Verify that cluster does not exist already
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# Create an AzureMl Compute resource (a container cluster)
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D2_V2',
vm_priority='lowpriority',
max_nodes=4)
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
cpu_cluster.wait_for_completion(show_output=True)
```
## Task 2: Use *Hyperdrive* to Determine Optimal Parameter Values
The remote compute you created is a four-node cluster, and you can take advantage of this to execute multiple experiment runs in parallel. One key reason to do this is to try training a model with a range of different hyperparameter values.
Azure ML includes a feature called *hyperdrive* that enables you to randomly try different values for one or more hyperparameters, and find the best performing trained model based on a metric that you specify - such as *Accuracy* or *Area Under the Curve (AUC)*.
> **More Information**: For more information about Hyperdrive, see the [Azure ML documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters).
Let's run a Hyperdrive experiment on the remote compute you have provisioned. First, we'll create the experiment and its associated folder.
```
import os
from azureml.core import Experiment
# Create an experiment
experiment_name = 'diabetes_training'
experiment = Experiment(workspace = ws, name = experiment_name)
# Create a folder for the experiment files
experiment_folder = './' + experiment_name
os.makedirs(experiment_folder, exist_ok=True)
print("Experiment:", experiment.name)
```
Now we'll create the Python script our experiment will run in order to train a model.
```
%%writefile $experiment_folder/diabetes_training.py
# Import libraries
import argparse
import joblib
from azureml.core import Workspace, Dataset, Experiment, Run
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
# Set regularization parameter
parser = argparse.ArgumentParser()
parser.add_argument('--regularization', type=float, dest='reg_rate', default=0.01, help='regularization rate')
args = parser.parse_args()
reg = args.reg_rate
# Get the experiment run context
run = Run.get_context()
# load the diabetes dataset
dataset_name = 'Diabetes Dataset'
print("Loading data from " + dataset_name)
diabetes = Dataset.get_by_name(workspace=run.experiment.workspace, name=dataset_name).to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
# Train a logistic regression model
print('Training a logistic regression model with regularization rate of', reg)
run.log('Regularization Rate', np.float(reg))
model = LogisticRegression(C=1/reg, solver="liblinear").fit(X_train, y_train)
# calculate accuracy
y_hat = model.predict(X_test)
acc = np.average(y_hat == y_test)
print('Accuracy:', acc)
run.log('Accuracy', np.float(acc))
# calculate AUC
y_scores = model.predict_proba(X_test)
auc = roc_auc_score(y_test,y_scores[:,1])
print('AUC: ' + str(auc))
run.log('AUC', np.float(auc))
# plot ROC curve
fpr, tpr, thresholds = roc_curve(y_test, y_scores[:,1])
fig = plt.figure(figsize=(6, 4))
# Plot the diagonal 50% line
plt.plot([0, 1], [0, 1], 'k--')
# Plot the FPR and TPR achieved by our model
plt.plot(fpr, tpr)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
run.log_image(name = "ROC", plot = fig)
plt.show()
os.makedirs('outputs', exist_ok=True)
# note file saved in the outputs folder is automatically uploaded into experiment record
joblib.dump(value=model, filename='outputs/diabetes_model.pkl')
run.complete()
```
Now, we'll use the *Hyperdrive* feature of Azure ML to run multiple experiments in parallel, using different values for the **regularization** parameter to find the optimal value for our data.
```
from azureml.train.hyperdrive import GridParameterSampling, BanditPolicy, HyperDriveConfig, PrimaryMetricGoal
from azureml.train.hyperdrive import choice
from azureml.widgets import RunDetails
from azureml.train.sklearn import SKLearn
# Sample a range of parameter values
params = GridParameterSampling(
{
# There's only one parameter, so grid sampling will try each value - with multiple parameters it would try every combination
'--regularization': choice(0.001, 0.005, 0.01, 0.05, 0.1, 1.0)
}
)
# Set evaluation policy to stop poorly performing training runs early
policy = BanditPolicy(evaluation_interval=2, slack_factor=0.1)
# Create an estimator that uses the remote compute
hyper_estimator = SKLearn(source_directory=experiment_folder,
compute_target = cpu_cluster,
conda_packages=['pandas','ipykernel','matplotlib'],
pip_packages=['azureml-sdk','argparse','pyarrow'],
entry_script='diabetes_training.py')
# Configure hyperdrive settings
hyperdrive = HyperDriveConfig(estimator=hyper_estimator,
hyperparameter_sampling=params,
policy=policy,
primary_metric_name='AUC',
primary_metric_goal=PrimaryMetricGoal.MAXIMIZE,
max_total_runs=6,
max_concurrent_runs=4)
# Run the experiment
run = experiment.submit(config=hyperdrive)
# Show the status in the notebook as the experiment runs
RunDetails(run).show()
```
When all of the runs have finished, you can find the best one based on the performance metric you specified (in this case, the one with the best AUC).
```
best_run = run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
parameter_values = best_run.get_details() ['runDefinition']['arguments']
print('Best Run Id: ', best_run.id)
print(' -AUC:', best_run_metrics['AUC'])
print(' -Accuracy:', best_run_metrics['Accuracy'])
print(' -Regularization Rate:',parameter_values)
```
Since we've found the best run, we can register the model it trained.
```
from azureml.core import Model
# Register model
best_run.register_model(model_path='outputs/diabetes_model.pkl', model_name='diabetes_model', tags={'Training context':'Hyperdrive'}, properties={'AUC': best_run_metrics['AUC'], 'Accuracy': best_run_metrics['Accuracy']})
# List registered models
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
## Task 3: Use *Auto ML* to Find the Best Model
Hyperparameter tuning has helped us find the optimal regularization rate for our logistic regression model, but we might get better results by trying a different algorithm, and by performing some basic feature-engineering, such as scaling numeric feature values. You could just create lots of different training scripts that apply various scikit-learn algorithms, and try them all until you find the best result; but Azure ML provides a feature called *Automated Machine Learning* (or *Auto ML*) that can do this for you.
First, let's create a folder for a new experiment.
```
# Create a project folder if it doesn't exist
automl_folder = "automl_experiment"
if not os.path.exists(automl_folder):
os.makedirs(automl_folder)
print(automl_folder, 'folder created')
```
You don't need to create a training script (Auto ML will do that for you), but you do need to load the training data; and when using remote compute, this is best achieved by creating a script containing a **get_data** function.
```
%%writefile $automl_folder/get_data.py
#Write the get_data file.
from azureml.core import Run, Workspace, Dataset
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
def get_data():
# load the diabetes dataset
run = Run.get_context()
dataset_name = 'Diabetes Dataset'
diabetes = Dataset.get_by_name(workspace=run.experiment.workspace, name=dataset_name).to_pandas_dataframe()
# Separate features and labels
X, y = diabetes[['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']].values, diabetes['Diabetic'].values
# Split data into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=0)
return { "X" : X_train, "y" : y_train, "X_valid" : X_test, "y_valid" : y_test }
```
Now you're ready to confifure the Auto ML experiment. To do this, you'll need a run configuration that includes the required packages for the experiment environment, and a set of configuration settings that tells Auto ML how many options to try, which metric to use when evaluating models, and so on.
> **More Information**: For more information about options when using Auto ML, see the [Azure ML documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train).
```
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
from azureml.train.automl import AutoMLConfig
import time
import logging
automl_run_config = RunConfiguration(framework="python")
automl_run_config.environment.docker.enabled = True
auto_ml_dependencies = CondaDependencies.create(
pip_packages=["azureml-sdk", "pyarrow", "pandas", "scikit-learn", "numpy"])
automl_run_config.environment.python.conda_dependencies = auto_ml_dependencies
automl_settings = {
"name": "Diabetes_AutoML_{0}".format(time.time()),
"iteration_timeout_minutes": 10,
"iterations": 10,
"primary_metric": 'AUC_weighted',
"preprocess": False,
"max_concurrent_iterations": 4,
"verbosity": logging.INFO
}
automl_config = AutoMLConfig(task='classification',
debug_log='automl_errors.log',
path=automl_folder,
compute_target=cpu_cluster,
run_configuration=automl_run_config,
data_script=automl_folder + "/get_data.py",
model_explainability=True,
**automl_settings,
)
```
OK, we're ready to go. Let's start the Auto ML run, which will generate child runs for different algorithms.
> **Note**: This will take some time. Progress will be displayed as each child run completes, and then a widget showing the results will be displayed.
```
from azureml.core.experiment import Experiment
from azureml.widgets import RunDetails
automl_experiment = Experiment(ws, 'diabetes_automl')
automl_run = automl_experiment.submit(automl_config, show_output=True)
RunDetails(automl_run).show()
```
View the output of the experiment in the widget, and click the run that produced the best result to see its details.
Then click the link to view the experiment details in the Azure portal and view the overall experiment details before viewing the details for the individual run that produced the best result. There's lots of information here about the performance of the model generated and how its features were used.
Let's get the best run and the model that was generated (you can ignore any warnings about Azure ML package versions that might appear).
```
best_run, fitted_model = automl_run.get_output()
print(best_run)
print(fitted_model)
best_run_metrics = best_run.get_metrics()
for metric_name in best_run_metrics:
metric = best_run_metrics[metric_name]
print(metric_name, metric)
```
One of the options you used was to include model *explainability*. This uses a test dataset to evaluate the importance of each feature. You can view this data in the notebook widget or the portal, and you can also retrieve it from the run.
```
from azureml.train.automl.automlexplainer import retrieve_model_explanation
shap_values, expected_values, overall_summary, overall_imp, per_class_summary, per_class_imp = retrieve_model_explanation(best_run)
# Overall feature importance (the Feature value is the column index in the training data)
print("Feature\tImportance")
for i in range(len(overall_imp)):
print(overall_imp[i], '\t', overall_summary[i])
```
Finally, having found the best performing model, you can register it.
```
# Register model
best_run.register_model(model_path='outputs/model.pkl', model_name='diabetes_model', tags={'Training context':'Auto ML'}, properties={'AUC': best_run_metrics['AUC_weighted'], 'Accuracy': best_run_metrics['accuracy']})
# List registered models
for model in Model.list(ws):
print(model.name, 'version:', model.version)
for tag_name in model.tags:
tag = model.tags[tag_name]
print ('\t',tag_name, ':', tag)
for prop_name in model.properties:
prop = model.properties[prop_name]
print ('\t',prop_name, ':', prop)
print('\n')
```
Now you've seen several ways to leverage the high-scale compute capabilities of the cloud to experiment with model training and find the best performing model for your data. In the next exerise, you'll deploy a registered model into production.
|
github_jupyter
|
# Numpy实现浅层神经网络
实践部分将搭建神经网络,包含一个隐藏层,实验将会展现出与Logistic回归的不同之处。
实验将使用两层神经网络实现对“花”型图案的分类,如图所示,图中的点包含红点(y=0)和蓝点(y=1)还有点的坐标信息,实验将通过以下步骤完成对两种点的分类,使用Numpy实现。
- 输入样本;
- 搭建神经网络;
- 初始化参数;
- 训练,包括前向传播与后向传播(即BP算法);
- 得出训练后的参数;
- 根据训练所得参数,绘制两类点边界曲线。
<img src="image/data.png" style="width:400px;height:300px;">
该实验将使用Python原生库实现两层神经网络的搭建,完成分类。
## 1 - 引用库
首先,载入几个需要用到的库,它们分别是:
- numpy:一个python的基本库,用于科学计算
- planar_utils:定义了一些工具函数
- matplotlib.pyplot:用于生成图,在验证模型准确率和展示成本变化趋势时会使用到
- sklearn:用于数据挖掘和数据分析
```
import numpy as np
import sklearn
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(1)
```
## 2 - 载入数据并观察纬度
载入数据后,输出维度
```
#载入数据
train_x, train_y, test_x, test_y = load_planar_dataset()
#输出维度
shape_X = train_x.shape
shape_Y = train_y.shape
print ('The shape of X is: ' + str(shape_X))
print ('The shape of Y is: ' + str(shape_Y))
```
由输出可知每组输入坐标包含两个值,包含一个值,共320组数据(测试集在训练集基础上增加80组数据,共400组)。
## 3 - 简单逻辑回归实验
使用逻辑回归处理该数据,观察分类结果
```
#训练逻辑回归分类器
clf = sklearn.linear_model.LogisticRegressionCV();
clf.fit(train_x.T, train_y.T);
#绘制逻辑回归分类边界
plot_decision_boundary(lambda x: clf.predict(x), train_x, train_y)
plt.title("Logistic Regression")
#输出准确率
LR_predictions = clf.predict(train_x.T)
print ('Accuracy of logistic regression:%d ' % float((np.dot(train_y,LR_predictions) + np.dot(1-train_y,1-LR_predictions))/float(train_y.size)*100) +
'% ' + "(percentage of correctly labelled datapoints)")
```
可以看出逻辑回归效果并不好,这是因为逻辑回归网络结构只包含输入层和输出层,无法拟合更为复杂的模型,下面尝试神经网络模型。
## 4 - 神经网络模型
下面开始搭建神经网络模型,我们采用两层神经网络实验,隐藏层包含4个节点,使用tanh激活函数;输出层包含一个节点,使用Sigmoid激活函数,结果小于0.5即认为是0,否则认为是1。
** 神经网络结构 **
下面用代码实现神经网络结构,首先确定神经网络的结构,即获取相关数据维度,并设置隐藏层节点个数(本实验设置4个隐藏层节点),用以初始化参数
```
#定义各层规模函数
def layer_sizes(X, Y):
"""
参数含义:
X -- 输入的数据
Y -- 输出值
返回值:
n_x -- 输入层节点数
n_h -- 隐藏层节点数
n_y -- 输出层节点数
"""
n_x = X.shape[0] #输入层大小(节点数)
n_h = 4
n_y = Y.shape[0] #输出层大小(节点数)
return (n_x, n_h, n_y)
```
** 初始化模型参数 **
获取相关维度信息后,开始初始化参数,定义相关函数
```
# 定义函数:初始化参数
def initialize_parameters(n_x, n_h, n_y):
"""
参数:
n_x -- 输入层大小
n_h -- 隐藏层大小
n_y -- 输出层大小
返回值:
params -- 一个包含所有参数的python字典:
W1 -- (隐藏层)权重,维度是 (n_h, n_x)
b1 -- (隐藏层)偏移量,维度是 (n_h, 1)
W2 -- (输出层)权重,维度是 (n_y, n_h)
b2 -- (输出层)偏移量,维度是 (n_y, 1)
"""
np.random.seed(2) # 设置随机种子
#随机初始化参数
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
```
** 前向传播与后向传播 **
获取输入数据,参数初始化完成后,可以开始前向传播的计算
```
# 定义函数:前向传播
def forward_propagation(X, parameters):
"""
参数:
X -- 输入值
parameters -- 一个python字典,包含计算所需全部参数(是initialize_parameters函数的输出)
返回值:
A2 -- 模型输出值
cache -- 一个字典,包含 "Z1", "A1", "Z2" and "A2"
"""
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
#计算中间量和节点值
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = 1/(1+np.exp(-Z2))
assert(A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1,
"A1": A1,
"Z2": Z2,
"A2": A2}
return A2, cache
```
前向传播最后可得出模型输出值(即代码中的A2),即可计算成本函数cost
```
# 定义函数:成本函数
def compute_cost(A2, Y, parameters):
"""
根据第三章给出的公式计算成本
参数:
A2 -- 模型输出值
Y -- 真实值
parameters -- 一个python字典包含参数 W1, b1, W2和b2
返回值:
cost -- 成本函数
"""
m = Y.shape[1] #样本个数
#计算成本
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), 1 - Y)
cost = -1. / m * np.sum(logprobs)
cost = np.squeeze(cost) # 确保维度的正确性
assert(isinstance(cost, float))
return cost
```
计算了成本函数,可以开始后向传播的计算
```
# 定义函数:后向传播
def backward_propagation(parameters, cache, X, Y):
"""
参数:
parameters -- 一个python字典,包含所有参数
cache -- 一个python字典包含"Z1", "A1", "Z2"和"A2".
X -- 输入值
Y -- 真实值
返回值:
grads -- 一个python字典包含所有参数的梯度
"""
m = X.shape[1]
#首先从"parameters"获取W1,W2
W1 = parameters["W1"]
W2 = parameters["W2"]
# 从"cache"中获取A1,A2
A1 = cache["A1"]
A2 = cache["A2"]
#后向传播: 计算dW1, db1, dW2, db2.
dZ2 = A2 - Y
dW2 = 1. / m * np.dot(dZ2, A1.T)
db2 = 1. / m * np.sum(dZ2, axis = 1, keepdims = True)
dZ1 = np.dot(W2.T, dZ2) * (1 - np.power(A1, 2))
dW1 = 1. / m * np.dot(dZ1, X.T)
db1 = 1. / m * np.sum(dZ1, axis = 1, keepdims = True)
grads = {"dW1": dW1,
"db1": db1,
"dW2": dW2,
"db2": db2}
return grads
```
通过后向传播获取梯度后,可以根据梯度下降公式更新参数
```
def update_parameters(parameters, grads, learning_rate = 1.2):
"""
使用梯度更新参数
参数:
parameters -- 包含所有参数的python字典
grads -- 包含所有参数梯度的python字典
返回值:
parameters -- 包含更新后参数的python
"""
#从"parameters"中读取全部参数
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# 从"grads"中读取全部梯度
dW1 = grads["dW1"]
db1 = grads["db1"]
dW2 = grads["dW2"]
db2 = grads["db2"]
#更新参数
W1 = W1 - learning_rate * dW1
b1 = b1 - learning_rate * db1
W2 = W2 - learning_rate * dW2
b2 = b2 - learning_rate * db2
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters
```
** 神经网络模型 **
前向传播、成本函数计算和后向传播构成一个完整的神经网络,将上述函数组合,构建一个神经网络模型
```
#定义函数:神经网络模型
def nn_model(X, Y, n_h, num_iterations = 10000, print_cost=False):
"""
参数:
X -- 输入值
Y -- 真实值
n_h -- 隐藏层大小/节点数
num_iterations -- 训练次数
print_cost -- 设置为True,则每1000次训练打印一次成本函数值
返回值:
parameters -- 训练结束,更新后的参数值
"""
np.random.seed(3)
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
#根据n_x, n_h, n_y初始化参数,并取出W1,b1,W2,b2
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
for i in range(0, num_iterations):
#前向传播, 输入: "X, parameters". 输出: "A2, cache".
A2, cache = forward_propagation(X, parameters)
#成本计算. 输入: "A2, Y, parameters". 输出: "cost".
cost = compute_cost(A2, Y, parameters)
#后向传播, 输入: "parameters, cache, X, Y". 输出: "grads".
grads = backward_propagation(parameters, cache, X, Y)
#参数更新. 输入: "parameters, grads". 输出: "parameters".
parameters = update_parameters(parameters, grads)
#每1000次训练打印一次成本函数值
if print_cost and i % 1000 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
return parameters
```
** 预测 **
通过上述模型可以训练得出最后的参数,此时需检测其准确率,用训练后的参数预测训练的输出,大于0.5的值视作1,否则视作0
```
#定义函数:预测
def predict(parameters, X):
"""
使用训练所得参数,对每个训练样本进行预测
参数:
parameters -- 保安所有参数的python字典
X -- 输入值
返回值:
predictions -- 模型预测值向量(红色: 0 / 蓝色: 1)
"""
#使用训练所得参数进行前向传播计算,并将模型输出值转化为预测值(大于0.5视作1,即True)
A2, cache = forward_propagation(X, parameters)
predictions = A2 > 0.5
return predictions
```
下面对获取的数据进行训练,并输出准确率
```
#建立神经网络模型
parameters = nn_model(train_x, train_y, n_h = 4, num_iterations = 10000, print_cost=True)
#绘制分类边界
plot_decision_boundary(lambda x: predict(parameters, x.T), train_x, train_y)
plt.title("Decision Boundary for hidden layer size " + str(4))
predictions = predict(parameters, train_x)
# 预测训练集
print('Train Accuracy: %d' % float((np.dot(train_y, predictions.T) +
np.dot(1 - train_y, 1 - predictions.T)) /
float(train_y.size) * 100) + '%')
# 预测测试集
predictions = predict(parameters, test_x)
print('Test Accuracy: %d' % float((np.dot(test_y, predictions.T) +
np.dot(1 - test_y, 1 - predictions.T)) /
float(test_y.size) * 100) + '%')
```
对比逻辑回归47%的准确率和分类结果图,神经网络分类的结果提高了不少,这是因为神经网络增加的隐藏层,为模型训练提供了更多选择,使得神经网络能拟合更加复杂的模型,对于更加复杂的图案分类更加准确。
|
github_jupyter
|
# Trump Tweets at the Internet Archive
So Trump's Twitter account is gone. At least at twitter.com. But (fortunately for history) there has probably never been a more heavily archived social media account at the Internet Archive and elsewhere on the web. There are also a plethora of online "archives" like [The Trump Archive](https://www.thetrumparchive.com/) which have collected these tweets as data. But seeing the tweets as they appeared in the browser is important. Of course you can go view the account in the Wayback Machine and [browse around](https://web.archive.org/web/20210107055108/https://twitter.com/realDonaldTrump) but what if we wanted a list of all the Trump tweets? How many times were these tweets actually archived?
## CDX API
The Wayback Machine (and many other web archives) have a service called the [CDX API](https://github.com/internetarchive/wayback/tree/master/wayback-cdx-server). Think of it as the index to the archive. You can give it a URL and it'll tell you what snapshots it has for it. You can also ask the CDX API to search for a *url prefix* and it will tell you what snapshots it has that start with that string. Lets use the handy [wayback](https://wayback.readthedocs.io/en/stable/usage.html) Python module to search for tweet URLs in the Wayback machine. So URLs that look like:
https://twitter.com/realDonaldTrump/status/{id}
```
! pip install wayback
```
The search() method handles paging through the API results using a resumption token behind the scenes. Lets look at the first 100 results just to see what they look like.
```
from wayback import WaybackClient
wb = WaybackClient()
count = 0
for result in wb.search('twitter.com/realDonaldTrump/status/', matchType='prefix'):
print(result.url)
count += 1
if count > 100:
break
```
So there are some weird URLs in there, that look like the result of buggy automated archive processes that aren't constructing URLs properly?
* https://twitter.com/realDonaldTrump/status/%22/SyakibPutera/status/636601131339087872%22
* https://twitter.com/realDonaldTrump/status/'+twitter_id+
And then we can see lots of results for the same URL such as https://twitter.com/realDonaldTrump/status/1000061992042975232 repeated over and over. This is because that URL was archived at multiple points in time. So lets improve on this to filter out the URLs that don't look like tweet URLs, and to only emit the unique ones. But still we'll just look at the first 100 results to make sure things are working properly.
```
import re
seen = set()
for result in wb.search('twitter.com/realDonaldTrump/status/', matchType='prefix'):
if re.search(r'/realDonaldTrump/status/\d+', result.url):
if result.url not in seen:
print(result.url)
seen.add(result.url)
if len(seen) > 100:
break
```
This list shows that some tweet URLs can have query strings, which modify the presentation of the tweet in various ways. For example to change the language of the user interface:
* https://twitter.com/realDonaldTrump/status/1000114139136606209?lang=en-gb
Or to highlight certain information:
* https://twitter.com/realDonaldTrump/status/1000114139136606209?conversation_id=1000114139136606209
The query parameters are essential for finding the right view in the Wayback Machine. But the different variants don't really matter if we are simply wanting to count the number of tweets that are archived. Also it looks like some URLs aren't for the tweets themselves, but for components of the tweet, like video:
* https://twitter.com/realDonaldTrump/status/1000114139136606209/video/1
The process can be adjusted to parse the URL to ensure the path is for an actual tweet, not a tweet component. The tweet id can also be extracted from the path in order to track whether it has been seen before.
```
from urllib.parse import urlparse
seen = set()
for result in wb.search('twitter.com/realDonaldTrump/status/', matchType='prefix'):
uri = urlparse(result.url)
m = re.match(r'^/realDonaldTrump/status/(\d+)/?$', uri.path, re.IGNORECASE)
if not m:
continue
tweet_id = m.group(1)
if tweet_id not in seen:
print(result.url)
seen.add(tweet_id)
if len(seen) > 100:
break
```
It looks like this is actually working pretty good. For completeness we can store a mapping of the tweet id to all the results for that tweet id. This will allow us to track how many tweet have been archiving, while letting us examing how many times that tweet was archived, and what their precise URLs are for playback.
This time we can let it keep running to get all the results.
```
from collections import defaultdict
tweets = defaultdict(list)
for result in wb.search('twitter.com/realDonaldTrump/status/', matchType='prefix'):
uri = urlparse(result.url)
m = re.match(r'^/realDonaldTrump/status/(\d{8,})/?$', uri.path, re.IGNORECASE)
if not m:
continue
tweet_id = m.group(1)
tweets[tweet_id].append(result)
```
Now we can see the tweet ids. Instead of printing them all out we can just look at the first 100:
```
list(tweets.keys())[0:100]
```
And we can look at when a given tweet was archived too, irrespective of the various query strings that can be part of it. Here we get all the snapshots for tweet id 1002298565299965953 and print out the times that it was archived, in descending order.
```
for result in sorted(tweets['1002298565299965953'], key=lambda r: r.timestamp, reverse=True):
print(result.timestamp)
len(tweets['1002298565299965953'])
```
So this particular URL was archived 252 times! The snapshots start on May 31, 2018 and most of the snapshots are from a few days of that. But there are also a handful of snapshots in 2019 and 2020. Examining [one of the snapshots] shows that it was sent on May 31st at 2:19 PM. It's hard to tell what time zone the display was generated for. But since the first snapshot was at May 31, 2018 at 21:19:36 UTC it is safe to assume that the display is for -07:00 UTC, or (given the time of year) Pacific Daylight Time.
The [overview](https://web.archive.org/web/20180101000000*/twitter.com/realDonaldTrump/status/1002298565299965953) gives a picture of some of these snapshots. But the nice thing about our index is that it factors in the way that the tweet ID is expressed in the URL. So we know more than what the URL specific overview shows. For example here are all the various URLs that were collected.
```
for result in sorted(tweets['1002298565299965953'], key=lambda r: r.timestamp, reverse=True):
print(result.url)
```
What was the most archived tweet?
```
sorted(tweets, key=lambda r: len(tweets[r]), reverse=True)[0]
len(tweets['1006837823469735936'])
```
So https://twitter.com/realDonaldTrump/status/1006837823469735936 was archived 23,419 times?! It's interesting that the [overview page](https://web.archive.org/web/*/twitter.com/realDonaldTrump/status/1006837823469735936) only says 595 times, because it is looking at that exact URL. Looking at [the content](https://web.archive.org/web/20180613095659/twitter.com/realDonaldTrump/status/1006837823469735936) of the tweet it is understandable why this one was archived so much.
## Missing Data?
So what does the coverage look like? Before Trump's account was suspended [his profile](https://web.archive.org/web/20210107045727/https://twitter.com/realDonaldTrump/) indicated he has sent 59.6K tweets. The [TrumpTweetArchive](https://www.thetrumparchive.com/) also shows 56,571 tweets. How many tweet IDs did we find?
```
len(tweets)
```
That is *a lot* less than what we should have found. So either there is a problem with my code, or the wayback module isn't paging results properly, or the CDX API isn't functioning properly, or not all of Trumps tweets have been archived?
In conversation with [Rob Brackett](https://robbrackett.com/) who is the principal author of the Python [wayback](https://pypi.org/project/wayback) library it seems that using the `limit` parameter can help return more results. So instead of doing:
wb.search('twitter.com/realDonaldTrump/status/', matchType='prefix')
the `limit` parameter should be used:
wb.search('twitter.com/realDonaldTrump/status/', matchType='prefix', limit=500000)
Here's Rob's explanation, which kind of begs more questions:
> Basically what’s happening here is that, without the `limit` parameter, the first page of results hits the maximum size and then, in that situation, does not include a resume key for moving on to the next page. Including a low enough limit (I think anything less than 1.5 million, but not sure) prevents you from hitting that ceiling and lets you successfully page through everything. When I do that, I get 64,329 tweet IDs across 16,253,658 CDX records (but the default Colab instance doesn’t have enough memory to store every record like you’re doing, so I had to just store the first record for each ID).
So lets give this a try. Rob noted that we're likely to consume all working memory storing all these CDX records in RAM. So lets persist them to a sqlite database instead.
```
import pathlib
data = pathlib.Path("data")
db_path = data / "trump-tweets.sqlite3"
import sqlite3
# only generate the sqlite db if it's not already there
if not db_path.is_file():
db = sqlite3.connect(db_path)
db.execute(
'''
CREATE TABLE tweets (
tweet_id TEXT,
url TEXT,
timestamp DATETIME,
mime_type TEXT,
status_code INTEGER,
digest TEXT,
length INTEGER
)
'''
)
count = 0
for result in wb.search('twitter.com/realDonaldTrump/status/', matchType='prefix', limit=500000):
uri = urlparse(result.url)
m = re.match(r'^/realDonaldTrump/status/(\d{8,})/?$', uri.path, re.IGNORECASE)
if not m:
continue
tweet_id = m.group(1)
db.execute('INSERT INTO tweets VALUES (?, ?, ?, ?, ?, ?, ?)', [
tweet_id,
result.url,
result.timestamp,
result.mime_type,
result.status_code,
result.digest,
result.length
])
count += 1
if count % 1000 == 0:
db.commit()
db.close()
```
Unfortunately GitHub won't let you upload the 3GB sqlite file--even with git-lfs enabled.
```
db = sqlite3.connect("data/trump-tweets.sqlite3")
db.execute('SELECT COUNT(DISTINCT(tweet_id)) FROM tweets').fetchall()
```
So 65,314 tweets were found. That's quite a bit more than the 59k suggested by the Twitter display and the 56,571 by the Trump Archive. Let's limit to snapshots that had a 200 OK HTTP response. As we saw above it's possible people tried to archive bogus tweet URLs.
```
db.execute(
'''
SELECT COUNT(DISTINCT(tweet_id))
FROM tweets
WHERE status_code = 200
''').fetchall()
```
That seems a lot more like it. So what were the most archived tweet? Let's get the top 10.
```
cursor = db.execute(
'''
SELECT tweet_id,
COUNT(*) AS total
FROM tweets
WHERE status_code = 200
GROUP by tweet_id
ORDER BY total DESC
LIMIT 10
'''
)
for row in cursor.fetchall():
print(row)
```
So the most archived URL was archived 56,571 times:
https://web.archive.org/web/20200521045242/https://twitter.com/realDonaldTrump/status/704834185471598592
The interface indicates it was archived 1,616 times, but remember we factored in alternate forms of the tweet URL. Lets see what those were.
```
cursor = db.execute(
'''
SELECT url,
COUNT(*) as total
FROM tweets
WHERE tweet_id = "704834185471598592"
AND status_code = 200
GROUP BY url
ORDER BY total DESC
'''
)
for row in cursor.fetchall():
print(row)
```
Now that is kind of fascinating. Why would there be 1,349 captures of each of those language specific URLs for this tweet? This seems like some kind of automation?
## Missing from Trump Archive?
So what tweets were found in the Internet Archive that are not in the Trump Archive. To figure this out we can first load in the Trump Archive tweets. This is relatively easy to do using the Google Drive download from their FAQ page.
```
import pandas
df = pandas.read_csv('https://drive.google.com/uc?export=download&id=1xRKHaP-QwACMydlDnyFPEaFdtskJuBa6'
)
trump_archive = set([str(tweet_id) for tweet_id in df['id']])
len(trump_archive)
```
Now we need the tweet ids from our sqlite db. Tweets will return a 200 OK and retweets will 301 Moved Permanently to the original tweet so we will include both here.
```
cursor = db.execute(
'''
SELECT DISTINCT(tweet_id)
FROM tweets
WHERE status_code in (200, 301)
'''
)
wayback = set([r[0] for r in cursor.fetchall()])
len(wayback)
```
Now we can see what tweet ids are in the Wayback Machine but not in the Trump Archive.
```
len(wayback - trump_archive)
```
Wow. so 3592 tweets are in the Wayback Machine but not in the Trump Archive?! Let's spot check one of them to see if this is the case. Lets generate the Wayback URLs for the first 25 of these ids.
https://web.archive.org/web/{datetime}/{url}
```
ids = list(wayback - trump_archive)[0:25]
cursor = db.execute(
'''
SELECT
"https://web.archive.org/web/"
|| STRFTIME('%Y%m%d%H%M%S', timestamp)
|| "/"
|| url
FROM tweets
WHERE tweet_id IN ({})
'''.format(",".join(["?"] * 25)),
ids
)
for row in cursor.fetchall():
print(row[0])
```
The first 5 of these seem to generate a *Something Went Wrong Page*. Perhaps there were tweets there and the Wayback Machine failed fetch them properly? Or maybe the data is there but failing to play back? It's hard to say with confidence.
https://web.archive.org/web/20201108045542/https://twitter.com/realDonaldTrump/status/1080839175392321541
<img src="images/twitter-something-went-wrong.png">
But then at least some of these appear to work such as:
* https://web.archive.org/web/20201106091445/https://twitter.com/realDonaldTrump/status/667434942826156032
* https://web.archive.org/web/20200307194518/https://twitter.com/realdonaldtrump/status/646009823356690432
The Trump Archive API can tell if they have these two:
https://www.thetrumparchive.com/tweets/{tweet-id}
* https://www.thetrumparchive.com/tweets/667434942826156032
* https://www.thetrumparchive.com/tweets/646009823356690432
So it looks like there are definitely some realDonaldTrump tweets in the Internet Archive's Wayback Machine that are not in the Trump Archive. Some number less than 3,592. It would be necessary to somehow verify these to be sure. Here's a CSV of all the tweet IDs to see if they can be curated.
```
import csv
out = csv.writer(open('data/trump-tweets-missing-from-archive.csv', 'w'))
out.writerow(['tweet_url', 'archive_url'])
for tweet_id in wayback - trump_archive:
sql = """
SELECT
url,
STRFTIME('%Y%m%d%H%M%S', timestamp) AS timestamp
FROM tweets
WHERE tweet_id = ?
ORDER BY timestamp DESC
LIMIT 1
"""
[tweet_url, timestamp] = db.execute(sql, [tweet_id]).fetchone()
out.writerow([
tweet_url,
"https://web.archive.org/web/{}/{}".format(timestamp, tweet_url)
])
print(tweet_url)
```
## Missing from Internet Archive?
How about the other angle: are there any tweet ids in the Trump Archive that didn't come back from the CDX API?
```
len(trump_archive - wayback)
```
It appears yes?
```
trump_archive - wayback
```
Lets examing the first one: 1175115230457802752. Is it in the Trump Archive?
https://www.thetrumparchive.com/tweets/1175115230457802752
Yes. It looks like a retweet of @FLOTUS:
RT @FLOTUS: Welcome to the @WhiteHouse PM Morrison and Mrs. Morrison! 🇺🇸🇦🇺 https://t.co/kYznIkJf9H
But the redirect of the retweet is not in the Internet Archive:
https://web.archive.org/web/*/https://twitter.com/realDonaldTrump/1175115230457802752
This in itself isn't too surprising because people wouldn't typically archive the retweet redirect. Are there any non-retweets in the Trump Archive but not in the Wayback Machine? To test that we need to examine the text of these tweets. Luckily we can look those up pretty easily using The Trump Archive API.
```
import requests
for tweet_id in trump_archive - wayback:
url = "https://www.thetrumparchive.com/tweets/{}".format(tweet_id)
resp = requests.get(url)
if resp.status_code == 200:
tweet = resp.json()
if tweet['isRetweet'] == False:
print("id: {}\ndate: {}\n{}\n".format(
tweet['id'],
tweet['date'],
tweet['text']
))
for tweet_id in trump_archive - wayback:
url = "https://www.thetrumparchive.com/tweets/{}".format(tweet_id)
resp = requests.get(url)
if resp.status_code == 200:
tweet = resp.json()
if not re.match(r'^"?RT', tweet['text']):
print("id: {}\ndate: {}\ndeleted: {}\n{}\n".format(
tweet['id'],
tweet['date'],
tweet['isDeleted'],
tweet['text']
))
```
We can verify by looking in our database for a tweet id like 1281926278845812736:
```
db.execute('SELECT * FROM tweets WHERE tweet_id = ?', ["1281926278845812736"]).fetchall()
```
Sure enough, it looks like Internet Archive wasn't quite quick enough to pick this one up. It's hard to say when the tweet was deleted, but it was archived on 2020-11-15 which was well after when it was sent on 2020-07-11.
But this is truly remarkable that the Wayback Machine only seems to be missing three original tweets (non-retweets), at least with respect with the Trump Archive. But since the Trump Archive appears to be missing at least some content that is present in the Wayback Machine its not exactly clear how accurate this is. In the end this highlights why it is important for Twitter to make an archival snapshot available.
## Archiving Activity
We can use our little SQLite database to plot the archiving activity related to Trump's tweets over time.
```
sql = \
'''
SELECT
STRFTIME('%Y%m%d', timestamp) AS day,
COUNT(*) AS "snapshots"
FROM tweets
GROUP BY day
ORDER BY day ASC
'''
df = pandas.read_sql_query(sql, db, parse_dates=['day'])
df.head()
```
Lets fill in the blanks for days where there was no archiving of Trump's tweets.
```
dates = pandas.date_range(min(df.day), max(df.day))
df = df.set_index('day').reindex(dates).fillna(0)
df.head()
```
Now we can try a plot!
```
df.plot(
kind='line',
title="Archiving Trump's Tweets at the Internet Archive",
figsize=(10, 4),
legend=False,
xlabel='Time',
ylabel='Snapshots per Day'
)
```
Kinda noisy. Maybe it will look better as tweets-per-week?
```
df = df.resample('W').sum().rename_axis('time')
df.plot(
kind='line',
title="Archiving Trump's Tweets at the Internet Archive",
figsize=(10, 4),
legend=False,
xlabel='Time',
ylabel='Snapshots per Week'
)
```
## Trump Archive URLs
To help media organizations update their links to point at snapshots at the Internet Archive I thought it could be useful to create a CSV dataset of the tweet ids and links. It's important to limit to known good tweets (within a particular range) and ones that returned a 200 OK. The latest snapshot will provide a picture of what interaction with that tweet looked like when the tweets were removed.
The situation is a bit tricky because just because there is a 200 OK response for a tweet URL in the Internet Archive doesn't mean its a good one to link to. For example this one seems to be OK but doesn't render: because of playback issues.
https://web.archive.org/web/20201112035441/https://twitter.com/realdonaldtrump/status/1698308935?s=21
What we can do is create a little function to make sure that it renders:
```
import requests_html
http = requests_html.AsyncHTMLSession()
async def response_ok(url, tries=10):
global http
try:
resp = await http.get(url)
await resp.html.arender(timeout=60)
match = resp.html.search("Something went wrong")
if match:
return False
return True
except Exception as e:
if tries == 0:
raise e
else:
http = requests_html.AsyncHTMLSession()
return await response_ok(url, tries - 1)
await response_ok('https://web.archive.org/web/20201112035441/https://twitter.com/realdonaldtrump/status/1698308935?s=21')
await response_ok('https://web.archive.org/web/20201106114341/https://twitter.com/realDonaldTrump/status/1776419923')
sql = \
"""
SELECT DISTINCT(CAST(tweet_id AS NUMERIC)) AS tweet_num
FROM tweets
WHERE tweet_num > 1698308934
AND tweet_num < 1351984482019115009
ORDER BY tweet_num ASC
"""
out = csv.writer(open('data/trump-tweet-archive.csv', 'w'))
out.writerow(['tweet_url', 'archive_url'])
count = 0
for row in db.execute(sql):
tweet_id = row[0]
sql = \
"""
SELECT url, STRFTIME('%Y%m%d%H%M%S', timestamp)
FROM tweets
WHERE tweet_id = ?
AND status_code = 200
ORDER BY timestamp DESC
"""
for [url, timestamp] in db.execute(sql, [tweet_id]):
archive_url = 'https://web.archive.org/web/{}/{}'.format(timestamp, url)
print('checking {}'.format(archive_url))
if await response_ok(archive_url):
tweet_url = 'https://twitter.com/realDonaldTrump/status/{}'.format(tweet_id)
print('ok {} {}'.format(tweet_url, archive_url))
out.writerow([tweet_url, archive_url])
break
```
## Top 10
What were the top 10 most archived tweets?
```
import sqlite3
import pandas
db = sqlite3.connect('data/trump-tweets.sqlite3')
df = pandas.read_csv('data/trump-archive.csv')
def get_text(tweet_id):
v = df[df['id'] == tweet_id].text.values
if len(v) != 0:
return v[0]
else:
return "???"
get_text(1698308935)
sql = '''
SELECT tweet_id,
COUNT(*) AS total
FROM tweets
GROUP BY tweet_id
ORDER By total DESC
LIMIT 10
'''
for [tweet_id, total] in db.execute(sql):
print('* [{}]({}) {}'.format(
get_text(int(tweet_id)),
'https://web.archive.org/web/*/https://twitter.com/realDonaldTrump/status/{}'.format(tweet_id),
total
))
```
## Politwoops
Just as a last exercise its interesting to see which tweets in Politwoops for Trump are in (or not in the Internet Archive). We saw one of them above when we were analyzing The Trump Twitter Archive.
First we need all the politwoops ids. We can use Politwoops API:
```
import requests
politwoops = set()
page = 1
while True:
url = "https://projects.propublica.org/politwoops/user/realDonaldTrump"
data = requests.get(url, params={"format": "json", "page": page}).json()
if not data or len(data["tweets"]) == 0:
break
for tweet in data["tweets"]:
politwoops.add(tweet["id"])
page += 1
len(politwoops)
wayback_missing = politwoops - wayback
len(wayback_missing)
len(wayback_missing) / len(politwoops)
```
So it looks like there are 179 tweets in Politwoops that are missing from Wayback Machine? Lets take a look at the URLs to spot check a few.
```
for tweet_id in wayback_missing:
politwoops_url = "https://projects.propublica.org/politwoops/tweet/{}".format(tweet_id)
wayback_url = "https://web.archive.org/web/*/http://twitter.com/realDonaldTrump/status/{}".format(tweet_id)
print(politwoops_url)
print(wayback_url)
print()
```
Looking at some of these it becomes clear that politwoops are lumping together realDonaldTrump and POTUS. But we didn't collect Wayback data for POTUS. We can collect the Politwoops data again but filter out the POTUS data.
```
politwoops = set()
page = 1
while True:
url = "https://projects.propublica.org/politwoops/user/realDonaldTrump"
data = requests.get(url, params={"format": "json", "page": page}).json()
if not data or len(data["tweets"]) == 0:
break
for tweet in data["tweets"]:
# make sure the user is realdonaldtrump and not potus
if tweet["user_name"].lower() == "realdonaldtrump":
politwoops.add(tweet["id"])
page += 1
len(politwoops)
wayback_missing = politwoops - wayback
len(wayback_missing)
for tweet_id in wayback_missing:
politwoops_url = "https://projects.propublica.org/politwoops/tweet/{}".format(tweet_id)
wayback_url = "https://web.archive.org/web/*/http://twitter.com/realDonaldTrump/status/{}".format(tweet_id)
print(politwoops_url)
print(wayback_url)
print()
out = csv.writer(open('data/trump-politwoops-wayback.csv', 'w'))
out.writerow(['tweet_id', 'politwoops_url', 'wayback_url'])
for tweet_id in wayback_missing:
politwoops_url = "https://projects.propublica.org/politwoops/tweet/{}".format(tweet_id)
wayback_url = "https://web.archive.org/web/*/http://twitter.com/realDonaldTrump/status/{}".format(tweet_id)
out.writerow([tweet_id, politwoops_url, wayback_url])
```
|
github_jupyter
|
## Bayesian Optimization with Scikit-Optimize
In this notebook, we will perform **Bayesian Optimization** with Gaussian Processes in Parallel, utilizing various CPUs, to speed up the search.
This is useful to reduce search times.
https://scikit-optimize.github.io/stable/auto_examples/parallel-optimization.html#example
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import cross_val_score, train_test_split
from skopt import Optimizer # for the optimization
from joblib import Parallel, delayed # for the parallelization
from skopt.space import Real, Integer, Categorical
from skopt.utils import use_named_args
# load dataset
breast_cancer_X, breast_cancer_y = load_breast_cancer(return_X_y=True)
X = pd.DataFrame(breast_cancer_X)
y = pd.Series(breast_cancer_y).map({0:1, 1:0})
X.head()
# the target:
# percentage of benign (0) and malign tumors (1)
y.value_counts() / len(y)
# split dataset into a train and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0)
X_train.shape, X_test.shape
```
## Define the Hyperparameter Space
Scikit-optimize provides an utility function to create the range of values to examine for each hyperparameters. More details in [skopt.Space](https://scikit-optimize.github.io/stable/modules/generated/skopt.Space.html)
```
# determine the hyperparameter space
param_grid = [
Integer(10, 120, name="n_estimators"),
Integer(1, 5, name="max_depth"),
Real(0.0001, 0.1, prior='log-uniform', name='learning_rate'),
Real(0.001, 0.999, prior='log-uniform', name="min_samples_split"),
Categorical(['deviance', 'exponential'], name="loss"),
]
# Scikit-optimize parameter grid is a list
type(param_grid)
```
## Define the model
```
# set up the gradient boosting classifier
gbm = GradientBoostingClassifier(random_state=0)
```
## Define the objective function
This is the hyperparameter response space, the function we want to minimize.
```
# We design a function to maximize the accuracy, of a GBM,
# with cross-validation
# the decorator allows our objective function to receive the parameters as
# keyword arguments. This is a requirement for scikit-optimize.
@use_named_args(param_grid)
def objective(**params):
# model with new parameters
gbm.set_params(**params)
# optimization function (hyperparam response function)
value = np.mean(
cross_val_score(
gbm,
X_train,
y_train,
cv=3,
n_jobs=-4,
scoring='accuracy')
)
# negate because we need to minimize
return -value
```
## Optimization with Gaussian Process
```
# We use the Optimizer
optimizer = Optimizer(
dimensions = param_grid, # the hyperparameter space
base_estimator = "GP", # the surrogate
n_initial_points=10, # the number of points to evaluate f(x) to start of
acq_func='EI', # the acquisition function
random_state=0,
n_jobs=4,
)
# we will use 4 CPUs (n_points)
# if we loop 10 times using 4 end points, we perform 40 searches in total
for i in range(10):
x = optimizer.ask(n_points=4) # x is a list of n_points points
y = Parallel(n_jobs=4)(delayed(objective)(v) for v in x) # evaluate points in parallel
optimizer.tell(x, y)
# the evaluated hyperparamters
optimizer.Xi
# the accuracy
optimizer.yi
# all together in one dataframe, so we can investigate further
dim_names = ['n_estimators', 'max_depth', 'min_samples_split', 'learning_rate', 'loss']
tmp = pd.concat([
pd.DataFrame(optimizer.Xi),
pd.Series(optimizer.yi),
], axis=1)
tmp.columns = dim_names + ['accuracy']
tmp.head()
```
## Evaluate convergence of the search
```
tmp['accuracy'].sort_values(ascending=False).reset_index(drop=True).plot()
```
The trade-off with parallelization, is that we will not optimize the search after each evaluation of f(x), instead after, in this case 4, evaluations of f(x). Thus, we may need to perform more evaluations to find the optima. But, because we do it in parallel, overall, we reduce wall time.
```
tmp.sort_values(by='accuracy', ascending=True)
```
|
github_jupyter
|
## Control Flow
Generally, a program is executed sequentially and once executed it is not repeated again. There may be a situation when you need to execute a piece of code n number of times, or maybe even execute certain piece of code based on a particular condition.. this is where the control flow statements come in.
In this section, we will be covering:
- Conditional statements -- if, else, and elif
- Loop statements -- for, while
- Loop control statements -- break, continue, pass
### Conditional Statements
Conditionals statements are used to change the flow of execution. You can use the relational operators, logical operators and membership operators for performing condition checks
```
result = 1
if result == 1:
print("Best Match")
elif result <= 3:
print("Close Enough")
else:
print("This is Blasphemy!")
```
The logic is very simple.. *`if`* < `condition_is_met` >, *`then`* do something; *`else`* do something else.
Python adopts the `if`-`else` clause as it is used in many languages.. However the `elif` part is unique to python. `elif` simply is a contraction for `else if`.
### Loop Statements
These statements are used when we want to execute a piece of code multiple times. Python has two types of loops -- `for` loop and `while` loop.
```
for i in [0,1,2]:
print("{}".format(i))
```
In `for` loop, we specify the variable we want to use, the `iterator` we want to loop over, and use the `in` (membership) operator to link them together.
```
i = 2
while i >= 0:
print("{}".format(i))
i -= 1
```
As you can see, they both serve different purposes. For loop is used when you want to run something for fixed amount of times, whereas while loop can theoretically run forever (if you use something like `while True:` .. *dont!* ).
One of the most commonly used `iterator` with for loop is the `range` object which is used to generate the sequence of numbers
```
list(range(10))
```
The `range` requires the *stop* argument. It can also accept *start* (at first position) and *step* (at third position) as arguments but if not passed, it creates a sequence of numbers from `0` till `stop - 1`. Remember, the *stop* is not included in the output
```
# With start and stop
list(range(2, 20))
# With start, stop and step
list(range(2, 20, 2))
```
When you have an iterator of iterators .. for example a list of lists .. then you can use what is known as nested loops to flatten the list.
```
# This is not the best way.. but for the sake of completion of
# topic, this example is included.
arr = [range(3), range(3, 6)]
for lists in arr:
for elem in lists:
print(elem)
```
### Loop Control Statements
Loop control statements change the executing of loop from its normal sequence.
#### Break
It terminates the current loop and resumes the execution at the next statement. The most common use for break is when some external condition is triggered requiring a hasty exit from a loop. The break statement can be used in both while and for loops.
```
for i in range(1, 10):
if i == 5:
print('Condition satisfied')
break
print(i) # What would happen if this is placed before if condition?
```
#### Continue
Continue statement returns the control to the beginning of the loop. The continue statement rejects all the remaining statements in the current iteration of the loop and moves the control back to the top of the loop.
```
for i in range(1, 10):
if i == 5:
print('Condition satisfied')
continue
print("whatever.. I won't get printed anyways.")
print(i)
```
#### Pass
Pass is used when a statement is required syntactically but performs a null operation i.e. nothing happens when the statement is executed.
```
for i in range(1, 10):
if i == 5:
print('Condition satisfied')
pass
print(i)
```
As you can see execution of pass statement had no effect on the flow of the code. It wouldn't have mattered if it was not there.
It is generally used as a temporary placeholder for an unimplemented logic. For example lets say you have written a function (we'll learn about functions a little later) and want to test the remaining part of code without actually running your function.. You can use pass statement in such cases. Python interpreter will read that and skip that part and get on with further execution.
### Loops with else
Python's Loop statements can be accompanies with an else block in cases where a certain block of code needs to be executed after the loop has successfully completed its execution i.e. iff the loop didn't `break` out in the middle of execution
```
best = 11
for i in range(10):
if i >= best:
print("Excellent")
break
else:
continue
else:
print("Couldn't find the best match")
```
Now if we change the `best` to something less than `10`
```
best = 9
for i in range(10):
if i >= best:
print("Excellent")
break
else:
continue
else:
print("Couldn't find the best match")
```
You can implement similar functionality using the `while` loop.
|
github_jupyter
|
## MatrixTable Tutorial
If you've gotten this far, you're probably thinking:
- "Can't I do all of this in `pandas` or `R`?"
- "What does this have to do with biology?"
The two crucial features that Hail adds are _scalability_ and the _domain-specific primitives_ needed to work easily with biological data. Fear not! You've learned most of the basic concepts of Hail and now are ready for the bit that makes it possible to represent and compute on genetic matrices: the [MatrixTable](https://hail.is/docs/0.2/hail.MatrixTable.html).
In the last example of the [Table Joins Tutorial](https://hail.is/docs/0.2/tutorials/08-joins.html), the ratings table had a compound key: `movie_id` and `user_id`. The ratings were secretly a movie-by-user matrix!
However, since this matrix is very sparse, it is reasonably represented in a so-called "coordinate form" `Table`, where each row of the table is an entry of the sparse matrix. For large and dense matrices (like sequencing data), the per-row overhead of coordinate reresentations is untenable. That's why we built `MatrixTable`, a 2-dimensional generalization of `Table`.
### MatrixTable Anatomy
Recall that `Table` has two kinds of fields:
- global fields
- row fields
`MatrixTable` has four kinds of fields:
- global fields
- row fields
- column fields
- entry fields
Row fields are fields that are stored once per row. These can contain information about the rows, or summary data calculated per row.
Column fields are stored once per column. These can contain information about the columns, or summary data calculated per column.
Entry fields are the piece that makes this structure a matrix -- there is an entry for each (row, column) pair.
### Importing and Reading
Like tables, matrix tables can be [imported](https://hail.is/docs/0.2/methods/impex.html) from a variety of formats: VCF, (B)GEN, PLINK, TSV, etc. Matrix tables can also be *read* from a "native" matrix table format. Let's read a sample of prepared [1KG](https://en.wikipedia.org/wiki/1000_Genomes_Project) data.
```
import hail as hl
from bokeh.io import output_notebook, show
output_notebook()
hl.utils.get_1kg('data/')
mt = hl.read_matrix_table('data/1kg.mt')
mt.describe()
```
There are a few things to note:
- There is a single column field `s`. This is the sample ID from the VCF. It is also the column key.
- There is a compound row key: `locus` and `alleles`.
- `locus` has type `locus<GRCh37>`
- `alleles` has type `array<str>`
- GT has type `call`. That's a genotype call!
Whereas table expressions could be indexed by nothing or indexed by rows, matrix table expression have four options: nothing, indexed by row, indexed by column, or indexed by row and column (the entries). Let's see some examples.
```
mt.s.describe()
mt.GT.describe()
```
### MatrixTable operations
We belabored the operations on tables because they all have natural analogs (sometimes several) on matrix tables. For example:
- `count` => `count_{rows, cols}` (and `count` which returns both)
- `filter` => `filter_{rows, cols, entries}`
- `annotate` => `annotate_{rows, cols, entries}` (and globals for both)
- `select` => `select_{rows, cols, entries}` (and globals for both)
- `transmute` => `transmute_{rows, cols, entries}` (and globals for both)
- `group_by` => `group_{rows, cols}_by`
- `explode` => `expode_{rows, cols}`
- `aggregate` => `aggregate_{rows, cols, entries}`
Some operations are unique to `MatrixTable`:
- The row fields can be accessed as a `Table` with [rows](https://hail.is/docs/0.2/hail.MatrixTable.html#hail.MatrixTable.rows)
- The column fields can be accessed as a `Table` with [cols](https://hail.is/docs/0.2/hail.MatrixTable.html#hail.MatrixTable.cols).
- The entire field space of a `MatrixTable` can be accessed as a coordinate-form `Table` with [entries](https://hail.is/docs/0.2/hail.MatrixTable.html#hail.MatrixTable.entries). Be careful with this! While it's fast to aggregate or query, trying to write this `Table` to disk could produce files _thousands of times larger_ than the corresponding `MatrixTable`.
Let's explore `mt` using these tools. Let's get the size of the dataset.
```
mt.count() # (rows, cols)
```
Let's look at the first few row keys (variants) and column keys (sample IDs).
```
mt.rows().select().show()
mt.s.show()
```
Let's investigate the genotypes and the call rate. Let's look at the first few genotypes:
```
mt.GT.show()
```
All homozygous reference, which is not surprising. Let's look at the distribution of genotype calls:
```
mt.aggregate_entries(hl.agg.counter(mt.GT.n_alt_alleles()))
```
Let's compute the overall call rate directly, and then plot the distribution of call rate per variant.
```
mt.aggregate_entries(hl.agg.fraction(hl.is_defined(mt.GT)))
```
Here's a nice trick: you can use an aggregator inside `annotate_rows` and it will aggregate over columns, that is, summarize the values in the row using the aggregator. Let's compute and plot call rate per variant.
```
mt2 = mt.annotate_rows(call_rate = hl.agg.fraction(hl.is_defined(mt.GT)))
mt2.describe()
p = hl.plot.histogram(mt2.call_rate, range=(0,1.0), bins=100,
title='Variant Call Rate Histogram', legend='Call Rate')
show(p)
```
### Exercise: GQ vs DP
In this exercise, you'll use Hail to investigate a strange property of sequencing datasets.
The `DP` field is the sequencing depth (the number of reads).
Let's first plot a histogram of `DP`:
```
p = hl.plot.histogram(mt.DP, range=(0,40), bins=40, title='DP Histogram', legend='DP')
show(p)
```
Now, let's do the same thing for GQ.
The `GQ` field is the phred-scaled "genotype quality". The formula to convert to a linear-scale confidence (0 to 1) is `10 ** -(mt.GQ / 10)`. GQ is truncated to lie between 0 and 99.
```
p = hl.plot.histogram(mt.GQ, range=(0,100), bins=100, title='GQ Histogram', legend='GQ')
show(p)
```
Whoa! That's a strange distribution! There's a big spike at 100. The rest of the values have roughly the same shape as the DP distribution, but form a [Dimetrodon](https://en.wikipedia.org/wiki/Dimetrodon). Use Hail to figure out what's going on!
|
github_jupyter
|
```
# 1. Loading Libraries
# Importing NumPy and Panda
import pandas as pd
import numpy as np
# ---------Import libraries & modules for data visualizaiton
from pandas.plotting import scatter_matrix
from matplotlib import pyplot
# Importing scit-learn module to split the dataset into train/test sub-datasets
from sklearn.model_selection import train_test_split
# Importing scit-learn module for the algorith/model: Linear Regression
from sklearn.linear_model import LogisticRegression
# Importing sci-Learn module for K-fole cross-validation - algorithm/modle evaluation & validation
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
# Importing scit-learn module fro classification report
from sklearn.metrics import classification_report
# 2. Specifing data file location
filename = 'C:/Data Sets/Iris.csv'
# Loading the data into a Pandas DataFrame
df = pd.read_csv(filename)
# 4 Preprocess Dataset
# 4.1 Cleaning Data: Find & Mark Missing Values
# Zero values cannot be use in these columns
# Marking and updating zero values as missing or NaN
df[['SepalLengthCm', 'SepalWidthCm', 'SepalWidthCm', 'PetalWidthCm']] \
= df[['SepalLengthCm', 'SepalWidthCm', 'SepalWidthCm', 'PetalWidthCm']].replace(0, np.NaN)
# count the number of NaN values in each column
print(df.isnull().sum())
# 5. Performing Exploratory Data Analysis on Dataset
# Get the dimensions or Shape of the dataset
# i.e. number of records/rows x number of variables/columns
print(df.shape)
# Getting the data types of all the variables/attributes of the data set
# The resutls shows
print(df.dtypes)
# Getting several records/rows at he top fo the dataset
# Get the first five records
print(df.head(5))
# Get the summary statistics of the numerica variables/attributes fo the dataset
print(df.describe())
# class distribution
# i.e. how many records for each class
# This dataset is a good candidate for the classification issues
print(df.groupby('Species').size())
# Plot historgram for each numerica variable/attribute of the dataset
# VIP NOTES: The first variable ID is also plotted. However, the plot should be ignored
df.hist(figsize=(12, 8))
pyplot.show()
# Density plots
# IMPORTANT NOTES: 5 numerica variables -->> at least 5 plots -->> layout (2, 3): 2 rows, each row with 3 plots
df.plot(kind='density', subplots=True, layout=(3,3), sharex=False, legend=True, fontsize=1, figsize=(12, 16))
pyplot.show()
df.plot(kind='box', subplots=True, layout=(3, 3), sharex=False, sharey=False, figsize=(12, 8))
pyplot.show()
# scatter plot matirx
scatter_matrix(df, alpha=0.8, figsize=(15,15))
pyplot.show()
# Store datafram values into a numpy array
array = df.values
# separate array into input and output components by slicign
# For X (input)[:, 1:5] --> all the rows, columns from 1 -4 (5 - 1)
X = array[:,1:5]
# For Y (input)[:, 5] --> all the rows, column 5
Y = array[:,5]
# Splittling the dataset --> training sub-dataset: 67%; test sub-dataset: 33%
test_size = 0.33
# Selection of records to include in which sub-dataset mush be done randomely
# Use this seed for randomizzation
seed = 7
# Split the dataset (both input & output) into training/testing datasets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
# Building the model
model = LogisticRegression()
# Train the model using the training sub-dataset
model.fit(X_train, Y_train)
# Print the classification report
# Ref: Section 10.2.5 Book: Machine Learning Mastery with Python
predicted = model.predict(X_test)
report = classification_report(Y_test, predicted)
print(report)
# Finding the Accuracy Leve
# score the accuracy level
result = model.score(X_test, Y_test)
# Print out the results
print(("Accuracy: %.3f%%") % (result*100))
# 10. Classify/Predict
model.predict([[5.3, 3.0, 4.5, 1.5]])
```
|
github_jupyter
|
# COCO Reader
Reader operator that reads a COCO dataset (or subset of COCO), which consists of an annotation file and the images directory.
`DALI_EXTRA_PATH` environment variable should point to the place where data from [DALI extra repository](https://github.com/NVIDIA/DALI_extra) is downloaded. Please make sure that the proper release tag is checked out.
```
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import numpy as np
from time import time
import os.path
test_data_root = os.environ['DALI_EXTRA_PATH']
file_root = os.path.join(test_data_root, 'db', 'coco', 'images')
annotations_file = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
num_gpus = 1
batch_size = 16
class COCOPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(COCOPipeline, self).__init__(batch_size, num_threads, device_id, seed = 15)
self.input = ops.COCOReader(file_root = file_root, annotations_file = annotations_file,
shard_id = device_id, num_shards = num_gpus, ratio=True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
def define_graph(self):
inputs, bboxes, labels = self.input()
images = self.decode(inputs)
return (images, bboxes, labels)
start = time()
pipes = [COCOPipeline(batch_size=batch_size, num_threads=2, device_id = device_id) for device_id in range(num_gpus)]
for pipe in pipes:
pipe.build()
total_time = time() - start
print("Computation graph built and dataset loaded in %f seconds." % total_time)
pipe_out = [pipe.run() for pipe in pipes]
images_cpu = pipe_out[0][0].as_cpu()
bboxes_cpu = pipe_out[0][1]
labels_cpu = pipe_out[0][2]
```
Bounding boxes returned by the operator are lists of floats containing composed of **\[x, y, width, height]** (`ltrb` is set to `False` by default).
```
bboxes = bboxes_cpu.at(4)
bboxes
```
Let's see the ground truth bounding boxes drawn on the image.
```
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import random
img_index = 4
img = images_cpu.at(img_index)
H = img.shape[0]
W = img.shape[1]
fig,ax = plt.subplots(1)
ax.imshow(img)
bboxes = bboxes_cpu.at(img_index)
labels = labels_cpu.at(img_index)
categories_set = set()
for label in labels:
categories_set.add(label[0])
category_id_to_color = dict([ (cat_id , [random.uniform(0, 1) ,random.uniform(0, 1), random.uniform(0, 1)]) for cat_id in categories_set])
for bbox, label in zip(bboxes, labels):
rect = patches.Rectangle((bbox[0]*W,bbox[1]*H),bbox[2]*W,bbox[3]*H,linewidth=1,edgecolor=category_id_to_color[label[0]],facecolor='none')
ax.add_patch(rect)
plt.show()
```
|
github_jupyter
|

<a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/PatternsAndRelations/patterns-and-relations.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
```
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
The raw code for this IPython notebook is by default hidden for easier reading.
To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.''')
# Modules
import string
import numpy as np
import pandas as pd
import qgrid as q
import matplotlib.pyplot as plt
# Widgets & Display modules, etc..
from ipywidgets import widgets as w
from ipywidgets import Button, Layout, widgets
from IPython.display import display, Javascript, Markdown
# grid features for interactive grids
grid_features = { 'fullWidthRows': True,
'syncColumnCellResize': True,
'forceFitColumns': True,
'rowHeight': 40,
'enableColumnReorder': True,
'enableTextSelectionOnCells': True,
'editable': True,
'filterable': False,
'sortable': False,
'highlightSelectedRow': True}
from ipywidgets import Button , Layout , interact,widgets
from IPython.display import Javascript, display
# Function: executes previous cell on button widget click event and hides achievement indicators message
def run_current(ev):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+0,IPython.notebook.get_selected_index()+1)'))
# Counter for toggling achievement indicator on/off
button_ctr = 0
# Achievement Indicators
line_1 = "#### Achievement Indicators"
line_2 = "**General Outcome: **"
line_3 = "* Create a table of values from a linear relation, graph the table of values, and analyze the graph to draw conclusions and solve problems"
# Use to print lines, then save in lines_list
def print_lines(n):
lines_str = ""
for i in range(1,n+1):
lines_str = lines_str + "line_"+str(i)+","
lines_str = lines_str[:-1]
print(lines_str)
lines_list = [line_1,line_2,line_3]
# Show/Hide buttons
ai_button_show = widgets.Button(button_style='info',description="Show Achievement Indicators", layout=Layout(width='25%', height='30px') )
ai_button_hide = widgets.Button(button_style='info',description="Hide Achievement Indicators", layout=Layout(width='25%', height='30px') )
display(Markdown("For instructors:"))
button_ctr += 1
if(button_ctr % 2 == 0):
for line in lines_list:
display(Markdown(line))
display(ai_button_hide)
ai_button_hide.on_click( run_current )
else:
display(ai_button_show)
ai_button_show.on_click( run_current )
# Import libraires
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import operator
import qgrid as q
from ipywidgets import widgets
from ipywidgets import Button, Layout,interact_manual,interact
from IPython.display import display, Javascript, Markdown
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from ipywidgets import widgets as w
from ipywidgets import Button, Layout
from IPython.display import display, Javascript, Markdown
```
<h1 align='center'>Patterns & Relations</h1>
<h4 align = 'center'> $\mid$ Grade 7 $\mid$ Math $\mid$</h4>
<h2 align='center'>Introduction</h2>
In this notebook we will learn what an ordered pair is and how we can use a table of values to represent them. We will work with simple linear equations (relations) and tabulate values for them.
We will also learn what a plane and coordinate plane are and explore the relationship between an equation and a coordinate plane is.
We will then have an opportunity to practice the concepts we learned via a set of exercises that will help us build and plot a few points for a given linear relation.
This notebook is one in a series of notebooks that explore the use patterns to describe the world and to solve problems. Please refer to notebook CC-63 for specific outcome 1.
We begin with a few definitions.
<div class="alert alert-warning">
<font color="black"><b>Definition.</b> An **ordered pair** $(n_1,n_2)$ is a pair of numbers where *order* matters.
</font>
</div>
For example, the pair $(1,2)$ is different from the pair $(2,1)$.
<div class="alert alert-warning">
<font color="black"><b>Definition.</b> An **equation** (also referred to as a **relation**) is an expression asserting that two quantities are equal.
</font>
</div>
For example,
$y = x + 2$
$y = 3x$
$y = 2$
are all equations.
<div class="alert alert-warning">
<font color="black"><b>Definition.</b> An **linear equation** (or **linear relation**) is an equation of the form $$y = ax + b$$, where $a,b$ are fixed values.
</font>
</div>
For example,
| a | b|Linear Relation |
|---|--|-----------|
|1|2|$$y = x + 2$$|
| 3 |1|$$y = 3x + 1$$|
|5|0|$$y = 5x$$ |
|0|0|$$y = 0$$|
<div class="alert alert-warning">
<font color="black"><b>Definition.</b> A **table of values** is a set of ordered pairs usually resulting from substituting numbers into an equation. </font>
</div>
For example, if we consider the equation
$$y = x + 1$$
and the values $x = 1,2,3$, the table of values corresponds to
| Value for x | Value for y|Ordered Pair (x,y)|
|---|--|-----|
|1|2|(1,2)|
|2|3|(2,3)|
|3|4|(3,4)|
Let us illustrate this with an example you can interact with.
<h2 align='center'>Interactive Example: Generating a table of values from a linear relation</h2>
Let us take the relation
$$y = x + 3$$
and suppose that $x$ is an integer. We can then obtain different values for $y$, depending on the value of $x$.
Then, if we consider the following values for x:
| | | | |
|---------|--|--|--|
| x = ||0|1|2|3|4|5|
We can substitute each in the equation to obtain a new value of y.
**Activity**
Let us try all entries to illustrate. Using the widget below change the value of $x$. What is the value for $y$ as $x$ changes?
```
%matplotlib inline
style = {'description_width': 'initial'}
@interact(x_value=widgets.IntSlider(value=0,
min=0,
max=5,
step=1,
description='Value for x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
))
def plug_and_play(x_value):
fig = plt.figure(figsize=(16,5))
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
ax1.text(0.4,0.5,"x = " + str(x_value),fontsize=30)
ax2.text(0.34,0.7,"y = x + 3",fontsize=30)
ax2.text(0.34,0.5,"y =" + str(x_value) + " + 3",fontsize=30)
ax2.text(0.34,0.3,"y =" + str(x_value + 3),fontsize=30)
ax3.text(0.4,0.5,"(" + str(x_value) + "," + str(x_value + 3) + ")",fontsize=30)
ax1.set_title("Value for x",fontsize=30)
ax2.set_title("Value for y",fontsize=30)
ax3.set_title("Ordered Pair",fontsize=30)
ax1.set_xticklabels([]),ax1.set_yticklabels([])
ax2.set_xticklabels([]),ax2.set_yticklabels([])
ax3.set_xticklabels([]),ax3.set_yticklabels([])
ax1.axis("Off"),ax2.axis("Off"),ax3.axis("Off")
plt.show()
```
**Question**
Knowing that the linear relation is $y = x + 3$, what is the value for y, when $x = 2$? Use the widget above to help you find the answer.
```
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_q(answer):
if answer=="Select option":
print("Click on the correct value for y.")
elif answer=="5":
ret="Correct!"
return ret
elif answer != "5" or answer != "Select Option":
ret = "Not quite.Recall y = x + 3. We know x = 2. What does 2 + 3 equal to?"
return ret
answer_q = interact(question_q,answer=widgets.Select(
options=["Select option","2",\
"10","3",\
"5"],
value='Select option',
description="y value",
disabled=False,
style=s
))
```
**Question**
Using the correct answer above, what is the corresponding ordered pair? Recall that an ordered pair is of the form $(x,y)$.
```
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_q(answer):
if answer=="Select option":
print("Click on the correct ordered pair (x,y).")
elif answer=="(2,5)":
ret="Correct!"
return ret
elif answer != "(2,5)" or answer != "Select Option":
ret = "Not quite.Recall x = 2, y = 5. The correct ordered pair is of the form (x,y)."
return ret
answer_q = interact(question_q,answer=widgets.Select(
options=["Select option","(2,5)",\
"(2,1)","(5,2)",\
"(5,3)"],
value='Select option',
description="Ordered pair (x,y)",
disabled=False,
style=s
))
```
Memorizing all different values for $x$ and $y$ is unnecessary.
We can organize the $x,y$ values along with the corresponding pairs $(x,y)$ in a table as follows.
```
### Create dataframe
#df_num_rows = int(dropdown_widget.value)
grid_features = { 'fullWidthRows': False,
'syncColumnCellResize': True,
'forceFitColumns': True,
'rowHeight': 40,
'enableColumnReorder': True,
'enableTextSelectionOnCells': True,
'editable': False,
'filterable': False,
'sortable': False,
'highlightSelectedRow': True}
# Set up data input for dataframe
x_values = np.array([0,1,2,3,4])
y_values = x_values + 3
ordered = [(x_values[i],y_values[i]) for i in range(len(x_values))]
y_equals = ["y = " + str(x_values[i]) + "+3" for i in range(len(x_values))]
df_num_rows = len(x_values)
empty_list = [ '' for i in range(df_num_rows) ]
category_list = [ i+1 for i in range(df_num_rows) ]
df_dict = {'Entry Number':category_list,\
'Values for x': empty_list, 'y = x + 3':empty_list,'Values for y': empty_list,\
'Ordered pairs': empty_list}
feature_list = ['Entry Number','Values for x','y = x + 3','Values for y','Ordered pairs']
student_df = pd.DataFrame(data = df_dict,columns=feature_list)
student_df.set_index('Entry Number',inplace=True)
student_df["Values for y"] = y_values
student_df["Values for x"] = x_values
student_df["y = x + 3"] = y_equals
student_df["Ordered pairs"] = ordered
# Set up & display as Qgrid
q_student_df = q.show_grid( student_df , grid_options = grid_features )
display(q_student_df)
```
Once we compute a few ordered pairs, we can represent them visually. We define the following two concepts.
<div class="alert alert-warning">
<font color="black"><b>Definition.</b> A **plane** is a flat surface that extends infinitely in all directions.
</font>
</div>
```
point = np.array([1, 1, 1])
normal = np.array([0, 0, 1])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
xx, yy = np.meshgrid(range(10), range(10))
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. /normal[2]
# plot the surface
plt3d = plt.figure(figsize=(15,10)).gca(projection='3d')
plt3d.plot_surface(xx, yy, z,color="#518900",edgecolor="white")
plt3d.grid(False)
plt3d.axis("Off")
plt.show()
```
<div class="alert alert-warning">
<font color="black"><b>Definition.</b> A **coordinate plane** is a plane formed by a horizontal number line (the x-axis) and a vertical number line (the y-axis) that intersect at a point called the origin.
</font>
</div>
We can plot points on the coordinate plane. We use ordered pairs to encode information on where points are located.
Recall that an ordered pair is of the form $(x,y)$. The first entry on the pair denotes how far from the origin along the x-axis the point is, the second entry denotes how far from the origin along the y-axis the point is.
Let's see a simple example for the ordered pair $(1,4)$.
```
fig = plt.figure(figsize=(16,5))
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
ax2.set_xticks(np.arange(-5,6)),ax2.set_yticks(np.arange(-5,6))
ax2.set_xlim(0,5)
ax2.set_ylim(0,5)
ax1.axis("Off"),ax2.axis("On"),ax3.axis("Off")
ax2.axhline(y=0, color='blue')
ax2.axvline(x=0, color='blue')
ax2.text(5.1,0.1,"x-axis",fontsize=20)
ax2.text(0.1,5.1,"y-axis",fontsize=20)
ax2.grid(True)
x_value,y_value = 1,4
x_or,y_or = 0,0
ax2.scatter(x_value,y_value,color="black",s=120)
ax2.scatter(x_or,y_or,color="black",s=220)
ax2.text(x_value + 0.1,y_value + 0.5,"(" +str(x_value) + "," + str(y_value) + ")")
ax2.text(x_or + 0.1,y_or + 0.3,"origin")
ax2.plot([-5,x_value], [y_value,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
ax2.plot([x_value,x_value], [-5,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
plt.show()
```
Notice why the order matters. Indeed, if we consider the pair $(4,1)$ we see that it is different.
```
fig = plt.figure(figsize=(16,5))
ax1 = fig.add_subplot(1, 3, 1)
ax2 = fig.add_subplot(1, 3, 2)
ax3 = fig.add_subplot(1, 3, 3)
ax2.set_xticks(np.arange(-5,6)),ax2.set_yticks(np.arange(-5,6))
ax2.set_xlim(0,5)
ax2.set_ylim(0,5)
ax1.axis("Off"),ax2.axis("On"),ax3.axis("Off")
ax2.axhline(y=0, color='blue')
ax2.axvline(x=0, color='blue')
ax2.text(5.1,0.1,"x-axis",fontsize=20)
ax2.text(0.1,5.1,"y-axis",fontsize=20)
ax2.grid(True)
x_value,y_value = 4,1
x_or,y_or = 0,0
ax2.scatter(x_value,y_value,color="black",s=120)
ax2.scatter(x_or,y_or,color="black",s=220)
ax2.text(x_value + 0.1,y_value + 0.5,"(" +str(x_value) + "," + str(y_value) + ")")
ax2.text(x_or + 0.1,y_or + 0.3,"origin")
ax2.plot([-5,x_value], [y_value,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
ax2.plot([x_value,x_value], [-5,y_value], color='green', marker='o', linestyle='dashed',
linewidth=2, markersize=2)
plt.show()
```
Let us take the table we computed previously for the relation
$$y = x +3$$
along with the ordered pairs we computed.
We can then represent the ordered pairs in the coordinate plane.
**Activity**
Use the widget below to see the relationship between the different ordered pairs and the points on the coordinate plane.
```
%matplotlib inline
@interact(x_value=widgets.IntSlider(value=0,
min=0,
max=5,
step=1,
description='Value for x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
))
def show_points(x_value):
x_values = np.array([0,1,2,3,4,5])
y_values = x_values + 3
fig = plt.figure()
plt.subplots_adjust(left=14, bottom=0.2, right=16, top=1.5,
wspace=0.1, hspace=0.2)
ax1 = fig.add_subplot(1, 2, 1)
ax1.text(0.1,0.8,"x = " + str(x_value),fontsize=20)
ax1.text(0.1,0.6,"y = " + str(x_value) +"+ 3 = " + str(x_value + 3),fontsize=20)
ax1.text(0.1,0.4,"Ordered pair (" + str(x_value) +"," + str(x_value + 3) + ")",fontsize=20)
ax1.set_title("Values for x and y", fontsize=25)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_xticks(np.arange(-6,11)),ax2.set_yticks(np.arange(-6,11))
ax2.set_xlim(0,6)
ax2.set_ylim(0,9)
ax1.axis("Off"),ax2.axis("On")
ax2.axhline(y=0, color='blue')
ax2.axvline(x=0, color='blue')
ax2.text(6.5,0.2,"x-axis",fontsize=20)
ax2.text(0.5,9.5,"y-axis",fontsize=20)
ax2.grid(True)
# for i in range(len(x_values)):
# ax2.text(x_values[i] - 0.5,y_values[i]-0.7,"(" + str(x_values[i]) + "," + str(y_values[i]) + ")")
points = ax2.scatter(x_values,y_values,color="black",s=60)
ax2.scatter(x_value,x_value + 3,color="red",s=120)
#datacursor(points)
plt.show()
```
### <h4>Conclusion</h4>
From this graph we conclude that the relation between $x$ and $y$ is linear. This makes sense given the equation is of the form
$$y = ax + b$$
where $a,b$ are integers and in this particular case, $a = 1, b =3$.
Points which are of interest are the intersection between $y$ and the x-axis as well as $x$ and the $y$ axis. The former happens exactly when $y = 0$ while the latter occurs when $x=0$.
We observe that $y$ does not intersect the x axis for positive values of $x$. We also observe that $x$ intersects the y-axis when $x=0$. Such intersection can be observed in the ordered pair $(0,3)$.
```
# Create button and dropdown widget
def rerun_cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,IPython.notebook.get_selected_index()+2)'))
style = {'description_width': 'initial'}
number_of_cat = 13
dropdown_options = [ str(i+1) for i in range(number_of_cat) ]
dropdown_widget = widgets.Dropdown( options = dropdown_options , value = '3' , description = 'Number of entries' , disabled=False,style=style )
categories_button = widgets.Button(button_style='info',description="Enter", layout=Layout(width='15%', height='30px'))
# Display widgets
#display(dropdown_widget)
#display(categories_button)
#categories_button.on_click( rerun_cell )
```
<h2 align='center'>Practice Area</h2>
<h4>Exercise</h4>
We will repeat a similar exercise as above, only this time, we will use a different linear relation.
$$y = 2x +4$$
Let us begin by building a simple table.
Answer the questions below to complete a similar table.
### Question 1
Knowing that $y = 2x + 4$, what is the value of $y$ when $x = 3$? In other words, what does $2(3) + 4$ equal to?
```
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_q(answer):
if answer=="Select option":
print("Click on the correct value of y.")
elif answer=="10":
ret="Correct!"
return ret
elif answer != "10" or answer != "Select Option":
ret = "You are close to the answer but need to improve your result.Recall 2(3) = 6. What does 6 + 4 equal to?"
return ret
answer_q = interact(question_q,answer=widgets.Select(
options=["Select option","1",\
"10","3",\
"0"],
value='Select option',
description="y value",
disabled=False,
style=s
))
```
### Question 2
Knowing that $y = 2x + 4$, what is the value of $y$ when $x=0$?
```
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_p(answer):
if answer=="Select option":
print("Click on the correct value of y.")
elif answer=="4":
ret="Correct!"
return ret
elif answer != "4" or answer != "Select Option":
ret = "You are close to the answer but need to improve your result.Recall y = x + 4. What does 0 + 4 equal to?"
return ret
answer_p = interact(question_p,answer=widgets.Select(
options=["Select option","-1",\
"10","4",\
"0"],
value='Select option',
description="y value",
disabled=False,
style=s
))
```
### Question 3
What is the ordered pair obtained when $x = 2$?
```
s = {'description_width': 'initial'}
from ipywidgets import interact_manual
def question_s(answer):
if answer=="Select option":
print("Click on the correct ordered pair (x,y)")
elif answer=="(2,8)":
ret="Correct!"
return ret
elif answer != "(2,8)" or answer != "Select Option":
ret = "You are close to the answer but need to improve your result.We know y = 8 and x = 2. We also know an ordered pair is of the form (x,y)."
return ret
answer_s = interact(question_s,answer=widgets.Select(
options=["Select option","(2,6)",\
"(2,8)","(8,2)",\
"(2,-2)"],
value='Select option',
description="Ordered pair (x,y)",
disabled=False,
style=s
))
def math_function(relation,x_val):
y_val = relation["+"](relation["Coef1"]*x_val,relation["Coef2"])
return y_val
def table_of_values_quad(range_val,relation):
empty_list = [ '' for i in range(range_val + 1) ]
category_list = [ i+1 for i in range(range_val + 1) ]
# Set up data input for dataframe
df_dict = {'Entry Number':category_list,\
'Values for x': empty_list, \
'y ='+ str(relation['Coef1']) + "x + " \
+ str(relation['Coef2']):empty_list,\
'Values for y': empty_list,\
'Ordered pairs': empty_list}
feature_list = ['Entry Number','Values for x',\
'y ='+ str(relation['Coef1']) \
+ "x + " + str(relation['Coef2']),\
'Values for y','Ordered pairs']
student_df = pd.DataFrame(data = df_dict,columns=feature_list)
student_df.set_index('Entry Number',inplace=True)
x_values = np.array(np.arange(range_val+1))
y_values = math_function(relation,x_values)
ordered = [(x_values[i],y_values[i]) for i in range(range_val+1)]
y_equals = ["y = " + str(relation['Coef1']) +"(" + str(x_values[i]) + ")" \
+ "+" + str(relation['Coef2'])
for i in range(len(x_values))]
student_df["Values for y"] = y_values
student_df["Values for x"] = x_values
student_df['y ='+ str(relation['Coef1']) + \
"x + " + str(relation['Coef2'])] = y_equals
student_df["Ordered pairs"] = ordered
q_student_df = q.show_grid( student_df , grid_options = grid_features )
display(q_student_df)
def generate_tab(value):
if value==True:
if "Correct!" in str(answer_p.widget.children)\
and "Correct!" in str(answer_q.widget.children)\
and "Correct!" in str(answer_s.widget.children):
relation_ar = {"Coef1":2,"Coef2":4,"+": operator.add}
table_of_values_quad(4,relation_ar)
else:
print("At least one of your answers is not correct. Compare your answers with the table.")
relation_ar = {"Coef1":2,"Coef2":4,"+": operator.add}
table_of_values_quad(4,relation_ar)
interact(generate_tab,value = widgets.ToggleButton(
value=False,
description='Generate Table',
disabled=False,
button_style='info', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Description',
icon='check'
));
```
### Question 4
Using the information on the table and the widget below, identify and select what ordered pairs belong to the relation
$$y = 2x + 4$$
Select one of the four following options. The correct answer will plot all points, the incorrect answer will print a message.
```
def plot_answer(relation):
x_values = np.array([0,1,2,3,4])
y_values = relation["Coef1"]*x_values + relation["Coef2"]
fig = plt.figure()
plt.subplots_adjust(left=14, bottom=0.2, right=16, top=1.5,
wspace=0.1, hspace=0.2)
ax2 = fig.add_subplot(1, 1, 1)
ax2.set_xticks(np.arange(-6,11))
ax2.set_yticks(np.arange(-6,relation["Coef1"]*x_values[-1] + relation["Coef2"]+2))
ax2.set_xlim(0,5)
ax2.set_ylim(0,relation["Coef1"]*x_values[-1] + relation["Coef2"]+1)
ax2.text(x_values[-1] + 1,0.001,"x-axis",fontsize=20)
ax2.text(0.1,y_values[-1] + 1,"y-axis",fontsize=20)
ax2.grid(True)
# for i in range(len(x_values)):
# ax2.text(x_values[i] - 0.5,y_values[i]-0.7,"(" + str(x_values[i]) + "," + str(y_values[i]) + ")")
points = ax2.scatter(x_values,y_values,color="black",s=60)
#ax2.scatter(x_value,x_value + 3,color="red",s=120)
#datacursor(points)
plt.show()
def choose_points(value):
if value=="(3,10),(5,14),(0,4)":
print("Correct!")
rel = {"Coef1":2,"Coef2":4,"+": operator.add}
plot_answer(rel)
else:
print("Those do not look like the ordered pairs in our table. Try again.")
interact(choose_points,
value = widgets.RadioButtons(
options=[
"(3,11),(5,11),(2,8)",\
"(0,0),(1,2),(2,2)",\
"(3,10),(5,14),(0,4)",\
"(10,10),(10,8),(1,6)"],
# value='pineapple',
description='Ordered Pairs:',
disabled=False,
style = style
));
```
### Question 5: Conclusions
What can you conclude from the table above? Use the following statements to guide your answer and add any other observations you make.
| Statement |
|-----------|
|The relation between $x$ and $y$ is linear|
|There is an intersection between the y-axis and $x$ at the ordered pair ... |
|There is an intersection between the x-axis and $y$ at the ordered pair ... |
```
emma1_text = widgets.Textarea( value='', placeholder='Write your answer here. Press Record Answer when you finish.', description='', disabled=False , layout=Layout(width='100%', height='75px') )
emma1_button = widgets.Button(button_style='info',description="Record Answer", layout=Layout(width='15%', height='30px'))
display(emma1_text)
display(emma1_button)
emma1_button.on_click( rerun_cell )
emma1_input = emma1_text.value
if(emma1_input != ''):
emma1_text.close()
emma1_button.close()
display(Markdown("### Your answer for Question 6: Conclusions"))
display(Markdown(emma1_input))
```
<h2 align='center'>Experiment</h2>
In this section you will have an opportunity to explore linear relations parameterized by you, to create their respective tables of values and to plot the ordered pairs. In the end, use what you learned in this notebook to make observations about your findings.
Recall that a linear equation is of the form
$$y = ax + b$$
Use the widget below to choose new values for $a,b$.
```
def choose(a,b):
print("Equation: " + str(a) + "x + " + str(b))
return [a,b]
coeff = interact(choose,a=widgets.IntSlider(value=0,
min=0,
max=15,
step=1,
description='Value for a',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style)
,b=widgets.IntSlider(value=0,
min=0,
max=15,
step=1,
description='Value for b',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style));
def rerun_cell( b ):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,IPython.notebook.get_selected_index()+3)'))
table_button = widgets.Button(button_style='info',description="Generate Table of Values and Plot", layout=Layout(width='25%', height='30px'))
display(table_button)
table_button.on_click( rerun_cell )
relation_ar = {"Coef1":coeff.widget.kwargs['a'],"Coef2":coeff.widget.kwargs['b'],"+": operator.add}
table_of_values_quad(4,relation_ar)
plot_answer(relation_ar)
```
<h2 align='center'>Interactive Example: Find the relation from a table of values</h2>
What if, instead of knowing what the relation is, we are only given a table of values or a plot?
If we know that the values belong to a linear relation, this along with the values is enough to determine what the relation is.
Consider the table and the plotted ordered pairs below.
```
def tabulate_to_eq(relation):
x_values = np.array([0,1,2,3,4])
y_values = relation["Coef1"]*x_values + relation["Coef2"]
ordered = [(x_values[i],y_values[i]) for i in range(len(x_values))]
df_num_rows = len(x_values)
empty_list = [ '' for i in range(df_num_rows) ]
category_list = [ i+1 for i in range(df_num_rows) ]
df_dict_2 = {'Entry Number':category_list,\
'Values for x': empty_list,'Values for y': empty_list,\
'Ordered pairs': empty_list}
feature_list = ['Entry Number','Values for x','Values for y','Ordered pairs']
student_df_2 = pd.DataFrame(data = df_dict_2,columns=feature_list)
student_df_2.set_index('Entry Number',inplace=True)
student_df_2["Values for y"] = y_values
student_df_2["Values for x"] = x_values
student_df_2["Ordered pairs"] = ordered
# Set up & display as Qgrid
q_student_df_2 = q.show_grid( student_df_2 , grid_options = grid_features )
display(q_student_df_2)
rels = {"Coef1":2,"Coef2":1,"+": operator.add}
tabulate_to_eq(rels)
plot_answer(rels)
```
Can you determine what the equation is based on the ordered pairs?
In the questions below we will walk towards the solution.
## Observation #1
Using the table or the plot, find what the value of $y$ is when $x = 0$. Enter your answer in the box. When you think you have the correct answer, press the Run Interact button.
```
s = {'description_width': 'initial'}
@interact_manual(answer =widgets.Textarea(
value=' ',
placeholder='Type something',
description='Your Answer:',
disabled=False,
style=s))
def get_answer_one(answer):
if "1" in answer:
print("Correct!")
else:
print("HINT: Look at Entry Number 1 in the table. What is the value for y?")
```
### Observation #2
Recall that a linear relation is of the form $$y = ax + b$$
Use this information along with the answer to Observation #1, to deduce the value of $b$.
Enter your answer in the box below. When you think you have found an answer, press the Run Interact button.
```
s = {'description_width': 'initial'}
@interact_manual(answer =widgets.Textarea(
value=' ',
placeholder='Type something',
description='Your Answer:',
disabled=False,
style=s))
def get_answer_one(answer):
if "1" in answer:
print("Correct!")
else:
print("HINT: y = ax + b. When x = 0, y = 0 + b = 1. This means that 0 + b = 1. What is the value of b?")
```
From the observation above, we determined that the value of $b = 1$, as
$$y = ax + b$$
and when $x =0$, we observe $y = 1$. Via algebraic manipulation, this means that $ 0 +b = 1$ which means $b = 1$.
We now know our equation is of the form
$$ y =ax + 1$$
There is only one loose end. We want to get the value of $a$.
### Observation #3
Observe Entry Number 2. In there we see that the ordered pair is $(1,3)$. This means that if $x = 1$, then $y = 3$.
In our equation, this looks as follows:
$$y = a(1) + 1 = 3$$
Which is equivalent to
$$y = a + 1 = 3$$
What is the value of $a$?
```
s = {'description_width': 'initial'}
@interact_manual(answer =widgets.Textarea(
value=' ',
placeholder='Type something',
description='Your Answer:',
disabled=False,
style=s))
def get_answer_one(answer):
if "2" in answer:
print("Correct!")
else:
print("HINT: a + 1 = 3. What value, when added to 1, results in 3? ")
```
### Recap
Observe that all we needed to find the linear equation were the first two entries in the table.
Indeed, we used Entry Number 1, x = 0, y = 1 to determine that b = 1.
We then used this, along with Entry Number 2 x = 1, y = 3, to determine that a = 2.
This yields to the linear equation
$$y = 2x + 1$$
Use the widget below to verify that this linear equation generates the adequate table of values.
```
@interact(x_value=widgets.IntSlider(value=0,
min=0,
max=4,
step=1,
description='Value for x',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='d',
style =style
))
def verify_points(x_value):
relation = {"Coef1":2,"Coef2":1,"+": operator.add}
x_values = np.array([0,1,2,3,4])
y_values = relation["Coef1"]*x_values + relation["Coef2"]
fig = plt.figure()
plt.subplots_adjust(left=14, bottom=0.2, right=16, top=1.5,
wspace=0.1, hspace=0.2)
ax1 = fig.add_subplot(1, 2, 1)
ax1.text(0.1,0.8,"x = " + str(x_value),fontsize=20)
ax1.text(0.1,0.6,"y = " + str(relation["Coef1"]) + "x + "+ str(relation["Coef2"]),fontsize=20)
ax1.text(0.1,0.4,"y = 2(" + str(x_value) + ") + 1 = " + str(2*x_value)+ " + 1 = " + str(relation["Coef1"]*x_value+ relation["Coef2"]),fontsize=20)
ax1.text(0.1,0.2,"Ordered pair (" +str(x_value) + "," + str(relation["Coef1"]*x_value+ relation["Coef2"]) + ")",fontsize=20)
ax1.set_title("Values for x and y", fontsize=25)
ax2 = fig.add_subplot(1, 2, 2)
ax2.set_xticks(np.arange(-6,x_values[-1]+2)),ax2.set_yticks(np.arange(-6,y_values[-1]+2))
ax2.set_xlim(0,x_values[-1]+1)
ax2.set_ylim(0,y_values[-1]+1)
ax1.axis("Off"),ax2.axis("On")
ax2.axhline(y=0, color='blue')
ax2.axvline(x=0, color='blue')
ax2.text(x_values[-1]+1,0.2,"x-axis",fontsize=20)
ax2.text(0.1,y_values[-1]+1,"y-axis",fontsize=20)
ax2.grid(True)
# for i in range(len(x_values)):
# ax2.text(x_values[i] - 0.5,y_values[i]-0.7,"(" + str(x_values[i]) + "," + str(y_values[i]) + ")")
points = ax2.scatter(x_values,y_values,color="black",s=60)
ax2.scatter(x_value,relation["Coef1"]*x_value+ relation["Coef2"] ,color="red",s=120)
#datacursor(points)
plt.show()
tabulate_to_eq({"Coef1":2,"Coef2":1,"+": operator.add})
```
Why do you think equations of the form
$$y = ax + b$$
are called "linear"?
Use the box below to enter your answer.
```
emma1_text = w.Textarea( value='', placeholder='Write your answer here. Press Record Answer when you finish.', description='', disabled=False , layout=Layout(width='100%', height='75px') )
emma1_button = w.Button(button_style='info',description="Record Answer", layout=Layout(width='15%', height='30px'))
display(emma1_text)
display(emma1_button)
emma1_button.on_click( rerun_cell )
```
<h2 align='center'>Conclusion</h2>
In this notebook we learned what an ordered pair is. We also learned what a table of values is as well as a plane and a coordinate plane. Furthermore, we learned that given a relation between x and y, we can track and represent the relation between x and y via a table of values or a coordinate plane.
We analyzed basic linear relations, tabulated their values and plotted on a coordinate plane. We explored the pairs that intersected the y and x axis and made remarks based on our observations.
[](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
|
github_jupyter
|
# Python code til udregning af data fra ATP
```
#Imports
```
# Udregninger
## Alder for at kunne blive tilbudt tidlig pension
```
#Årgange født i 1955-1960 har adgang til at søge i 2021.
#Man skal være fyldt 61 for at søge.
print(2021-61, "kan anmode om tidlig pension")
#Der indgår personer fra 6 1⁄2 årgange
print(2021-66, "sidste år inden folkepension indtræder")
#da personer født 1.halvår 1955 har nået folkepensionsalderen inden 1. januar 2022.
```
## Forventede ansøgning
```
#Omkring 38.000 helårspersoner forventes at opnå ret til at gå på tidligt pension i 2022.
#Det dækker over muligheden for at 1, 2 eller 3 år.
#22.000 vil benytte sig af retten i 2022, 6000 fra beskæftigelse.
ansøgning = 38100 #personer
benyttelse = 22000 #benytter tidlig pension
print("Det vil sige, at dem der afstår retten til tidlig pension er", ansøgning-benyttelse, "personer")
```
## Personer med 44+ på arbejdsmarkedet
```
#Personer med 44+ år på arbejdsmarkedet har automatisk fuld anciennitet.
automatisk_behandling = 1.7+7.1
berettiget = 33.9
procent = round(automatisk_behandling/berettiget*100)
print(automatisk_behandling, "af de", round(automatisk_behandling/berettiget*100),
"% af ansøgerne har automatisk fuld anciennitet er", round(ansøgning/100*procent), "personer")
#8,8 ud af 33,9 = 26 % af ansøgerne har automatisk fuld anciennitet. 26% af 38.100 = 9906 personer.
```
## Manuel håndtering
```
#Personer der har 42 eller 43 års anciennitet på arbejdsmarkedet kræver manuel håndtering.
resterende = 74
#74% af 38.100 ansøgere = 28.194 ansøgere kræver manuel håndtering
print(round(ansøgning/100*resterende),"ansøgere kræver manuel håndtering")
```
## Supplerende dokumentation
```
#Vi forventer, at 50% af ansøgerne der kræver manuel behandling indsender supplerende dokumentation.
halvdelen = 2
manuel_håndtering = 28194
#50% af 28.194 = 14.097 personer.
print(round(manuel_håndtering/halvdelen))
```
## Uger i ansøgningsperioden
```
#Hvor mange uger er der i perioden d. 1. august – 31. december?
antal_uger = 52 #uger
uger_i_perioden = 31 #uger
print(antal_uger-uger_i_perioden,"uger ansøgningsperioden")
```
## Arbejdstimer
```
#Hvor mange arbejdstimer er der behov for i perioden?
#ATP har et erfaringsbaseret estimat for tidsforbruget ifm. manuel behandling af en typisk ansøgning,
#svarende til 30 minutter.
supplerende = 14097 #antal ansøgning der kræver
halvtime = 2 #halv time
print(supplerende/halvtime," antal arbejdstimer")
```
## antal medarbejder
```
#Hvor mange timer om ugen er det rimeligt at sige at hver medarbejder bruger på sagsbehandling?
#Der går også timer på administration, interne møder og andet.
uger_ansøgning = 21 #uger
årsværk_ATP = 1356 #timer
procent_år = 40.38 #procent
print(uger_ansøgning/antal_uger*100, "% af et år, af de 1356 timer =", round(årsværk_ATP*0.404),"timer")
print(round((supplerende/halvtime)/548),"medarbejder")
```
|
github_jupyter
|
# Practice Notebook: Methods and Classes
The code below defines an *Elevator* class. The elevator has a current floor, it also has a top and a bottom floor that are the minimum and maximum floors it can go to. Fill in the blanks to make the elevator go through the floors requested.
```
class Elevator:
def __init__(self, bottom, top, current):
"""Initializes the Elevator instance."""
self.bottom=bottom
self.top=top
self.current=current
def __str__(self):
"""Information about Current floor"""
return "Current floor: {}".format(self.current)
def up(self):
"""Makes the elevator go up one floor."""
if self.current<10:
self.current+=1
def down(self):
"""Makes the elevator go down one floor."""
if self.current > 0:
self.current -= 1
def go_to(self, floor):
"""Makes the elevator go to the specific floor."""
if floor >= self.bottom and floor <= self.top:
self.current = floor
elif floor < 0:
self.current = 0
else:
self.current = 10
elevator = Elevator(-1, 10, 0)
```
This class is pretty empty and doesn't do much. To test whether your *Elevator* class is working correctly, run the code blocks below.
```
elevator.up()
elevator.current #should output 1
elevator.down()
elevator.current #should output 0
elevator.go_to(10)
elevator.current #should output 10
```
If you get a **<font color =red>NameError</font>** message, be sure to run the *Elevator* class definition code block first. If you get an **<font color =red>AttributeError</font>** message, be sure to initialize *self.current* in your *Elevator* class.
Once you've made the above methods output 1, 0 and 10, you've successfully coded the *Elevator* class and its methods. Great work!
<br><br>
For the up and down methods, did you take into account the top and bottom floors? Keep in mind that the elevator shouldn't go above the top floor or below the bottom floor. To check that out, try the code below and verify if it's working as expected. If it's not, then go back and modify the methods so that this code behaves correctly.
```
# Go to the top floor. Try to go up, it should stay. Then go down.
elevator.go_to(10)
elevator.up()
elevator.down()
print(elevator.current) # should be 9
# Go to the bottom floor. Try to go down, it should stay. Then go up.
elevator.go_to(-1)
elevator.down()
elevator.down()
elevator.up()
elevator.up()
print(elevator.current) # should be 1
```
Now add the __str__ method to your *Elevator* class definition above so that when printing the elevator using the **print( )** method, we get the current floor together with a message. For example, in the 5th floor it should say "Current floor: 5"
```
elevator.go_to(5)
print(elevator)
```
Remember, Python uses the default method, that prints the position where the object is stored in the computer’s memory. If your output is something like: <br>
> <__main__.Elevator object at 0x7ff6a9ff3fd0>
Then you will need to add the special __str__ method, which returns the string that you want to print. Try again until you get the desired output, "Current floor: 5".
Once you have successfully produced the desired output, you are all done with this practice notebook. Awesome!
|
github_jupyter
|
# Worksheet 0.1.2: Python syntax (`while` loops)
<div class="alert alert-block alert-info">
This worksheet will invite you to tinker with the examples, as they are live code cells. Instead of the normal fill-in-the-blank style of notebook, feel free to mess with the code directly. Remember that -- to test things out -- the <a href = "../sandbox/CMPSC%20100%20-%20Week%2000%20-%20Sandbox.ipynb"><b>Sandbox</b></a> is available to you as well.
</div>
<div class="alert alert-block alert-warning" id = "warning">
The work this week also offers the opportunity to tie up the server in loopish operations. Should you be unable to run cells, simply locate the <b>Kernel</b> menu at the top of the screen and click <b>Interrupt Kernel</b>. This should jumpstart the kernel again and clear out the infinite loop behavior.</div>
## Feeling a bit loopy?
If you're not, you might start to during this worksheet.
We've mostly covered cases in which events or calculations need to happen one at a time or just one time in total. Occasionally -- much more than occasionally, really -- programs need to repeat instructions more than once until some given condition is met.
If you read the word "condition" above and have started thinking `booleans` are involved: you're right yet again.
While casually referred to as a "loop" structure, the technical term for this repetition is _iteration_ -- the process of repeating a set of statements until a given condition is no longer `True`. In the case of `while` loops, we can rephrase the statement above to read "while the condition is true, repeat some statements"
`while` loops recall syntax similar to `if` statements:
```python
while CONDITION:
# Repeat
# these
# statements
```
Again, notice that indentation plays a part here: everything indented underneat the `while` statement "belongs to" that `while` loop, and will be subject to repetition.
For example, a simple countdown:
```
# Initialize starting number
seconds = 10
# Start while loop
while seconds > 0:
print(seconds)
seconds -= 1
print("Liftoff!")
```
In the above block of code, we start by telling the program where to, well, start. Then we print that number followed by an instruction `seconds -= 1` to _decrement_ (decrease) that number by one _each time the loop runs_ (on each _iteration_). By the time we reach the end, the last run notices that `seconds == 0`, therefore `seconds` _is not_ greater than `0` anymore and it breaks out of the loop, executing the next statement after the loop.
Like Worksheet 0 this week, we can use any combination of expressions that `boolean` values can muster, be they _relational operators_ testing `integers`, `floating point numbers`, `strings`, or _logical operators_ looking for combinations of `boolean` expressions.
### `while` loops and the flow of control
Here's that control topic back to haunt us.
Again, the technical flow of the program's instructions (i.e. code) doesn't change. It's still technically top-down. However, something interesting happens when we hit a loop or _iteration_. Consider our countdown above:

As the diagram points out, the flow of control changes when we encounter a `while` loop. Statements _in the loop_ execute from `top -> bottom -> top` until the condition cited is no longer true (in this case, until the moment that `seconds` dips below `1`.
## Detour into user input
Why user input and why now? As we'll see in future weeks, other kinds of loops can fulfill the same purpose as `while` loops. However, there's something unique that `while` loops can do that others aren't so well-suited to: handing user input.
This relies on our understanding of _functions_ as we're about to learn a new one: `input()` -- a function which allows us to prompt users to enter data so that our programs can do an operation called "parsing" (understanding/reading) it.
It's always helpful to put some `string` value in the parenthesis as the _argument_, to give a user some sense of what they're being requested to type. In fact, the general format of the function is, like others we've seen:
```python
# Where ARGUMENT will evaluate to a string
input(ARGUMENT)
```
In order to effectively use it, we need to _assign_ the result of the function to a variable to store it in memory. Run the following cell for an example:
```
name = input("What is your name: ")
print("Hello, " + name + ".")
```
_Because we stored the result_ in `name`, we can use it -- whether that's to `print` it or test it:
```
if name == "The Professor":
print("The Professor is in the house!")
else:
print("Oh, you're not the professor. Forget it.")
```
One thing of particular note:
```
# The identifier "str_value" is arbitrary here, no real significance
str_value = input("Enter a numeric value of any kind: ")
print(type(str_value))
```
No matter what we enter here, the result will always be a `string` type. That's because the Python language is rigged to do its best with _whatever_ a user writes in the prompt. This means handling characters/symbols (`!@#^!%#@$` -- I promise I'm not swearing here), letters (`a`,`b`,`c`), or numbers (either `integer` or `float`).
This means that, to make it useful, we might have to _convert_ it if we want a numeric value from it:
```
float_value = float(str_value)
print(type(float_value))
```
However, insofar as user input is concerned, we can test it _or_ we can use it as something called a "sentinel" value. Using word "sentinel" here means exactly what it means in normal speech -- something to watch out for, like the following:
```
# Setup choice
choice = ""
# Do the loop
while choice != "E":
# Print message to relay the user's choice
print("The loop is running.")
choice = input("[C]ontinue or [E]xit? ")
print("You chose to exit!")
```
## Mean`while`
The conclusion we should draw from our little detour is this: `while` statements are exceptionally good at handling all kinds of `boolean` conditions. When it's merely simple counting, like our countdown example above, it's OK, too -- but, as we'll see in the near future, counting and other more complex tests are better suited by other loop types.
## Infinite loops
<div class="alert alert-block alert-danger">
Most programmers fall prey to an infinite loop from time to time. The examples below are not code cells, because if you were to run them, they would -- well -- loop infinitely. If you think you're stuck in an infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop infinite loop, don't hesistate to take the advice in the <a href = "#warning">warning</a> above.
</div>
There are times when conditions _can't_ be met. For example:
```python
count = 0
while True:
count += 1
```
Now, there _is_ an application for something like this. However, note that if we say `while True`, the condition is literally hard-coded to be `True` _all the time_ -- it can never change. Another example:
```python
sum = 0
limit = 5
while sum < limit:
sum -=1
```
Here, we're actually _counting backwards_, so `sum` will never be `+5`. It might be `-5` (at some point), but it will continue on to `-∞`, and never stop. In essence, `sum` will always be less than `limit`.
## The sum of its parts
First things first: this program is a one-trick pony: it only adds numbers.
I'm asking you to use what we have learned in this worksheet to write a program whose sole purpose is to present the sum of a set of user-entered numbers. The user should be able to enter as many numbers as they want, provided that:
* all of the numbers are integers
* uses `number` to store user input
* users can choose to quit by entering an `0` at the prompt
* `if number = 0`, don't add the number to `count`
* non-hint hint: there are at least 3 ways to do this
* the program output the sum in the following format:
```
The sum of these # numbers -> ###
```
* The "proof" of this program is to add the following numbers when you grade the worksheet:
* `4`,`8`,`15`,`16`,`23`,`42`
I'll start you out.
```
# NOTE: YOU MUST RUN THIS CELL TO MAKE THESE VARIABLES AVAILABLE
# NOTE 2: RUN THIS CELL TO RESET ALL NUMBERS AS WELL
# Setup variable to handle input
number = ""
# Setup variable to keep running total
sum = 0
# Setup a count variable to track the count
count = 0
# TODO: Write code to complete activity using knowledge you've gained about while loops
```
## Finishing this activity
If the program above runs and you've finished the worksheet, be sure to run `gradle grade` to do one final check!
|
github_jupyter
|
# "# backtesting with grid search"
> "Easily backtest a grid of parameters in a given trading strategy"
- toc: true
- branch: master
- badges: true
- comments: true
- author: Jerome de Leon
- categories: [grid search, backtest]
<a href="https://colab.research.google.com/github/enzoampil/fastquant/blob/master/examples/2020-04-20-backtest_with_grid_search.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# uncomment to install in colab
# !pip3 install fastquant
```
## backtest SMAC
`fastquant` offers a convenient way to backtest several trading strategies. To backtest using Simple Moving Average Crossover (`SMAC`), we do the following.
```python
backtest('smac', dcv_data, fast_period=15, slow_period=40)
```
`fast_period` and `slow_period` are two `SMAC` parameters that can be changed depending on the user's preferences. A simple way to fine tune these parameters is to run `backtest` on a grid of values and find which combination of `fast_period` and `slow_period` yields the highest net profit.
First, we fetch `JFC`'s historical data comprised of date, close price, and volume.
```
from fastquant import get_stock_data, backtest
symbol='JFC'
dcv_data = get_stock_data(symbol,
start_date='2018-01-01',
end_date='2020-04-28',
format='cv',
)
dcv_data.head()
import matplotlib.pyplot as pl
pl.style.use("default")
from fastquant import backtest
results = backtest("smac",
dcv_data,
fast_period=15,
slow_period=40,
verbose=False,
plot=True
)
```
The plot above is optional. `backtest` returns a dataframe of parameters and corresponding metrics:
```
results.head()
```
## define the search space
Second, we specify the range of reasonable values to explore for `fast_period` and `slow_period`. Let's take between 1 and 20 trading days (roughly a month) in steps of 1 day for `fast_period`, and between 21 and 240 trading days (roughly a year) in steps of 5 days for `slow_period`.
```
import numpy as np
fast_periods = np.arange(1,20,1, dtype=int)
slow_periods = np.arange(20,241,5, dtype=int)
# make a grid of 0's (placeholder)
period_grid = np.zeros(shape=(len(fast_periods),len(slow_periods)))
period_grid.shape
```
## run grid search
Third, we run backtest for each iteration over each pair of `fast_period` and `slow_period`, saving each time the net profit to the `period_grid` variable.
Note: Before running backtest over a large grid, try measuring how long it takes your machine to run one backtest instance.
```python
%timeit
backtest(...)
```
In my machine with 8 cores, `backtest` takes
```
101 ms ± 8.3 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
```
```
from time import time
init_cash=100000
start_time = time()
for i,fast_period in enumerate(fast_periods):
for j,slow_period in enumerate(slow_periods):
results = backtest('smac',
dcv_data,
fast_period=fast_period,
slow_period=slow_period,
init_cash=100000,
verbose=False,
plot=False
)
net_profit = results.final_value.values[0]-init_cash
period_grid[i,j] = net_profit
end_time = time()
time_basic = end_time-start_time
print("Basic grid search took {:.1f} sec".format(time_basic))
```
## visualize the period grid
Next, we visualize `period_grid` as a 2D matrix.
```
import matplotlib.colors as mcolors
import matplotlib.pyplot as pl
pl.style.use("default")
fig, ax = pl.subplots(1,1, figsize=(8,4))
xmin, xmax = slow_periods[0],slow_periods[-1]
ymin, ymax = fast_periods[0],fast_periods[-1]
#make a diverging color map such that profit<0 is red and blue otherwise
cmap = pl.get_cmap('RdBu')
norm = mcolors.TwoSlopeNorm(vmin=period_grid.min(),
vmax = period_grid.max(),
vcenter=0
)
#plot matrix
cbar = ax.imshow(period_grid,
origin='lower',
interpolation='none',
extent=[xmin, xmax, ymin, ymax],
cmap=cmap,
norm=norm
)
pl.colorbar(cbar, ax=ax, shrink=0.9,
label='net profit', orientation="horizontal")
# search position with highest net profit
y, x = np.unravel_index(np.argmax(period_grid), period_grid.shape)
best_slow_period = slow_periods[x]
best_fast_period = fast_periods[y]
# mark position
# ax.annotate(f"max profit={period_grid[y, x]:.0f}@({best_slow_period}, {best_fast_period}) days",
# (best_slow_period+5,best_fast_period+1)
# )
ax.axvline(best_slow_period, 0, 1, c='k', ls='--')
ax.axhline(best_fast_period+0.5, 0, 1, c='k', ls='--')
# add labels
ax.set_aspect(5)
pl.setp(ax,
xlim=(xmin,xmax),
ylim=(ymin,ymax),
xlabel='slow period (days)',
ylabel='fast period (days)',
title='JFC w/ SMAC',
);
print(f"max profit={period_grid[y, x]:.0f} @ ({best_slow_period},{best_fast_period}) days")
```
From the plot above, there are only a few period combinations which we can guarantee non-negative net profit using SMAC strategy. The best result is achieved with (105,30) for period_slow and period_fast, respectively.
In fact SMAC strategy is so bad such that there is only 9% chance it will yield profit when using any random period combinations in our grid, which is smaller than the 12% chance it will yield break even at least.
```
percent_positive_profit=(period_grid>0).sum()/np.product(period_grid.shape)*100
percent_positive_profit
percent_breakeven=(period_grid==0).sum()/np.product(period_grid.shape)*100
percent_breakeven
```
Anyway, let's check the results of backtest using the `best_fast_period` and `best_slow_period`.
```
results = backtest('smac',
dcv_data,
fast_period=best_fast_period,
slow_period=best_slow_period,
verbose=True,
plot=True
)
net_profit = results.final_value.values[0]-init_cash
net_profit
```
There are only 6 cross-over events of which only the latest transaction yielded positive gains resulting to a 7% net profit. Is 7% profit over a ~two-year baseline better than the market benchmark?
## built-in grid search in fastquant
The good news is `backtest` provides a built-in grid search if strategy parameters are lists. Let's re-run `backtest` with a grid we used above.
```
from fastquant import backtest
start_time = time()
results = backtest("smac",
dcv_data,
fast_period=fast_periods,
slow_period=slow_periods,
verbose=False,
plot=False
)
end_time = time()
time_optimized = end_time-start_time
print("Optimized grid search took {:.1f} sec".format(time_optimized))
```
`results` is automatically ranked based on `rnorm` which is a proxy for performance. In this case, the best `fast_period`,`slow_period`=(8,200) d.
The returned parameters are should have `len(fast_periods)`x`len(slow_periods)` (19x45=855 in this case).
```
results.shape
results.head()
```
Now, we recreate the 2D matrix before, but this time using scatter plot.
```
fig, ax = pl.subplots(1,1, figsize=(8,4))
#make a diverging color map such that profit<0 is red and blue otherwise
cmap = pl.get_cmap('RdBu')
norm = mcolors.TwoSlopeNorm(vmin=period_grid.min(),
vmax = period_grid.max(),
vcenter=0
)
#plot scatter
results['net_profit'] = results['final_value']-results['init_cash']
df = results[['slow_period','fast_period','net_profit']]
ax2 = df.plot.scatter(x='slow_period', y='fast_period', c='net_profit',
norm=norm, cmap=cmap, ax=ax
)
ymin,ymax = df.fast_period.min(), df.fast_period.max()
xmin,xmax = df.slow_period.min(), df.slow_period.max()
# best performance (instead of highest profit)
best_fast_period, best_slow_period, net_profit = df.loc[0,['fast_period','slow_period','net_profit']]
# mark position
# ax.annotate(f"max profit={net_profit:.0f}@({best_slow_period}, {best_fast_period}) days",
# (best_slow_period-100,best_fast_period+1), color='r'
# )
ax.axvline(best_slow_period, 0, 1, c='r', ls='--')
ax.axhline(best_fast_period+0.5, 0, 1, c='r', ls='--')
ax.set_aspect(5)
pl.setp(ax,
xlim=(xmin,xmax),
ylim=(ymin,ymax),
xlabel='slow period (days)',
ylabel='fast period (days)',
title='JFC w/ SMAC',
);
# fig.colorbar(ax2, orientation="horizontal", shrink=0.9, label='net profit')
print(f"max profit={net_profit:.0f} @ ({best_slow_period},{best_fast_period}) days")
```
Note also that built-in grid search in `backtest` is optimized and slightly faster than the basic loop-based grid search.
```
#time
time_basic/time_optimized
```
## Final notes
While it is tempting to do a grid search over larger search space and finer resolutions, it is computationally expensive, inefficient, and prone to overfitting. There are better methods than brute force grid search which we will tackle in the next example.
As an exercise, it is good to try the following:
* Use different trading strategies and compare their results
* Use a longer data baseline
|
github_jupyter
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Stop-Reinventing-Pandas" data-toc-modified-id="Stop-Reinventing-Pandas-1"><span class="toc-item-num">1 </span>Stop Reinventing Pandas</a></span></li><li><span><a href="#First-Hacks!" data-toc-modified-id="First-Hacks!-2"><span class="toc-item-num">2 </span>First Hacks!</a></span><ul class="toc-item"><li><span><a href="#Beautiful-pipes!" data-toc-modified-id="Beautiful-pipes!-2.1"><span class="toc-item-num">2.1 </span>Beautiful pipes!</a></span></li><li><span><a href="#The-Penny-Drops" data-toc-modified-id="The-Penny-Drops-2.2"><span class="toc-item-num">2.2 </span>The Penny Drops</a></span></li><li><span><a href="#Map-with-dict" data-toc-modified-id="Map-with-dict-2.3"><span class="toc-item-num">2.3 </span>Map with dict</a></span></li></ul></li><li><span><a href="#Time-Series" data-toc-modified-id="Time-Series-3"><span class="toc-item-num">3 </span>Time Series</a></span><ul class="toc-item"><li><span><a href="#Resample" data-toc-modified-id="Resample-3.1"><span class="toc-item-num">3.1 </span>Resample</a></span><ul class="toc-item"><li><span><a href="#The-Old-Way" data-toc-modified-id="The-Old-Way-3.1.1"><span class="toc-item-num">3.1.1 </span>The Old Way</a></span></li><li><span><a href="#A-Better-Way" data-toc-modified-id="A-Better-Way-3.1.2"><span class="toc-item-num">3.1.2 </span>A Better Way</a></span></li></ul></li><li><span><a href="#Slice-Easily" data-toc-modified-id="Slice-Easily-3.2"><span class="toc-item-num">3.2 </span>Slice Easily</a></span></li><li><span><a href="#Time-Windows:-Rolling,-Expanding,-EWM" data-toc-modified-id="Time-Windows:-Rolling,-Expanding,-EWM-3.3"><span class="toc-item-num">3.3 </span>Time Windows: Rolling, Expanding, EWM</a></span><ul class="toc-item"><li><span><a href="#With-Apply" data-toc-modified-id="With-Apply-3.3.1"><span class="toc-item-num">3.3.1 </span>With Apply</a></span></li></ul></li><li><span><a href="#Combine-with-GroupBy-🤯" data-toc-modified-id="Combine-with-GroupBy-🤯-3.4"><span class="toc-item-num">3.4 </span>Combine with GroupBy 🤯</a></span></li></ul></li><li><span><a href="#Sorting" data-toc-modified-id="Sorting-4"><span class="toc-item-num">4 </span>Sorting</a></span><ul class="toc-item"><li><span><a href="#By-Values" data-toc-modified-id="By-Values-4.1"><span class="toc-item-num">4.1 </span>By Values</a></span></li><li><span><a href="#By-Index" data-toc-modified-id="By-Index-4.2"><span class="toc-item-num">4.2 </span>By Index</a></span></li><li><span><a href="#By-Both-(New-in-0.23)" data-toc-modified-id="By-Both-(New-in-0.23)-4.3"><span class="toc-item-num">4.3 </span>By Both <span style="color: red">(New in 0.23)</span></a></span></li></ul></li><li><span><a href="#Stack,-Unstack" data-toc-modified-id="Stack,-Unstack-5"><span class="toc-item-num">5 </span>Stack, Unstack</a></span><ul class="toc-item"><li><span><a href="#Unstack" data-toc-modified-id="Unstack-5.1"><span class="toc-item-num">5.1 </span>Unstack</a></span><ul class="toc-item"><li><span><a href="#The-Old-way" data-toc-modified-id="The-Old-way-5.1.1"><span class="toc-item-num">5.1.1 </span>The Old way</a></span></li><li><span><a href="#A-better-way" data-toc-modified-id="A-better-way-5.1.2"><span class="toc-item-num">5.1.2 </span>A better way</a></span></li></ul></li><li><span><a href="#Unstack" data-toc-modified-id="Unstack-5.2"><span class="toc-item-num">5.2 </span>Unstack</a></span><ul class="toc-item"><li><span><a href="#Some-More-Hacks" data-toc-modified-id="Some-More-Hacks-5.2.1"><span class="toc-item-num">5.2.1 </span>Some More Hacks</a></span></li></ul></li></ul></li><li><span><a href="#GroupBy" data-toc-modified-id="GroupBy-6"><span class="toc-item-num">6 </span>GroupBy</a></span><ul class="toc-item"><li><span><a href="#Old-Ways" data-toc-modified-id="Old-Ways-6.1"><span class="toc-item-num">6.1 </span>Old Ways</a></span><ul class="toc-item"><li><span><a href="#List-Aggregates" data-toc-modified-id="List-Aggregates-6.1.1"><span class="toc-item-num">6.1.1 </span>List Aggregates</a></span></li><li><span><a href="#Dict-aggregate" data-toc-modified-id="Dict-aggregate-6.1.2"><span class="toc-item-num">6.1.2 </span>Dict aggregate</a></span></li><li><span><a href="#With-Rename" data-toc-modified-id="With-Rename-6.1.3"><span class="toc-item-num">6.1.3 </span>With Rename</a></span></li></ul></li><li><span><a href="#Named-Aggregations-(New-in-0.25)" data-toc-modified-id="Named-Aggregations-(New-in-0.25)-6.2"><span class="toc-item-num">6.2 </span>Named Aggregations <span style="color: red">(New in 0.25)</span></a></span></li></ul></li><li><span><a href="#Clip" data-toc-modified-id="Clip-7"><span class="toc-item-num">7 </span>Clip</a></span><ul class="toc-item"><li><span><a href="#The-Old-Way" data-toc-modified-id="The-Old-Way-7.1"><span class="toc-item-num">7.1 </span>The Old Way</a></span></li><li><span><a href="#A-better-way" data-toc-modified-id="A-better-way-7.2"><span class="toc-item-num">7.2 </span>A better way</a></span></li></ul></li><li><span><a href="#Reindex" data-toc-modified-id="Reindex-8"><span class="toc-item-num">8 </span>Reindex</a></span></li><li><span><a href="#Method-Chaining" data-toc-modified-id="Method-Chaining-9"><span class="toc-item-num">9 </span>Method Chaining</a></span><ul class="toc-item"><li><span><a href="#Assign" data-toc-modified-id="Assign-9.1"><span class="toc-item-num">9.1 </span>Assign</a></span><ul class="toc-item"><li><span><a href="#With-a-callable" data-toc-modified-id="With-a-callable-9.1.1"><span class="toc-item-num">9.1.1 </span>With a callable</a></span></li></ul></li><li><span><a href="#Pipe" data-toc-modified-id="Pipe-9.2"><span class="toc-item-num">9.2 </span>Pipe</a></span></li></ul></li><li><span><a href="#Beautiful-Code-Tells-a-Story" data-toc-modified-id="Beautiful-Code-Tells-a-Story-10"><span class="toc-item-num">10 </span>Beautiful Code Tells a Story</a></span></li><li><span><a href="#Bonus!" data-toc-modified-id="Bonus!-11"><span class="toc-item-num">11 </span>Bonus!</a></span><ul class="toc-item"><li><span><a href="#Percent-Change" data-toc-modified-id="Percent-Change-11.1"><span class="toc-item-num">11.1 </span>Percent Change</a></span></li><li><span><a href="#Interval-Index" data-toc-modified-id="Interval-Index-11.2"><span class="toc-item-num">11.2 </span>Interval Index</a></span></li><li><span><a href="#Split-Strings" data-toc-modified-id="Split-Strings-11.3"><span class="toc-item-num">11.3 </span>Split Strings</a></span></li><li><span><a href="#Toy-Examples-with-Pandas-Testing" data-toc-modified-id="Toy-Examples-with-Pandas-Testing-11.4"><span class="toc-item-num">11.4 </span>Toy Examples with Pandas Testing</a></span></li></ul></li><li><span><a href="#Research-with-Style!" data-toc-modified-id="Research-with-Style!-12"><span class="toc-item-num">12 </span>Research with Style!</a></span><ul class="toc-item"><li><span><a href="#Basic" data-toc-modified-id="Basic-12.1"><span class="toc-item-num">12.1 </span>Basic</a></span></li><li><span><a href="#Gradient" data-toc-modified-id="Gradient-12.2"><span class="toc-item-num">12.2 </span>Gradient</a></span></li><li><span><a href="#Custom" data-toc-modified-id="Custom-12.3"><span class="toc-item-num">12.3 </span>Custom</a></span></li><li><span><a href="#Bars" data-toc-modified-id="Bars-12.4"><span class="toc-item-num">12.4 </span>Bars</a></span></li></ul></li><li><span><a href="#You-don't-have-to-memorize-this" data-toc-modified-id="You-don't-have-to-memorize-this-13"><span class="toc-item-num">13 </span>You don't have to memorize this</a></span></li><li><span><a href="#Resources" data-toc-modified-id="Resources-14"><span class="toc-item-num">14 </span>Resources</a></span></li></ul></div>
# Stop Reinventing Pandas
The following post was presented as a talk for the [IE@DS](https://www.facebook.com/groups/173376299978861/) community, and for the [PyData meetup](https://www.meetup.com/PyData-Tel-Aviv/events/256232456/).
All the resources for this post, including a runable notebook, can be found in the [github repo](https://github.com/DeanLa/dont_reinvent_pandas)
blog post version here:
<span style="font-size:2em"> [DeanLa.com](http://deanla.com/)</span>

This notebook aims to show some nice ways modern Pandas makes your life easier. It is not about efficiency. I'm Pandas' built-in methods will be more efficient than reinventing pandas, but the main goal is to make the code easier to read, and more imoprtant - easier to write.

```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use(['classic', 'ggplot', 'seaborn-poster', 'dean.style'])
%load_ext autoreload
%autoreload 2
import my_utils
import warnings
warnings.simplefilter("ignore")
```
# First Hacks!
Reading the data and a few housekeeping tasks. is the first place we can make our code more readable.
```
df_io = pd.read_csv('./bear_data.csv', index_col=0, parse_dates=['date_'])
df_io.head()
df = df_io.copy().sort_values('date_').set_index('date_').drop(columns='val_updated')
df.head()
```
## Beautiful pipes!
One line method chaining is hard to read and prone to human error, chaining each method in its own line makes it a lot more readable.
```
df_io\
.copy()\
.sort_values('date_')\
.set_index('date_')\
.drop(columns='val_updated')\
.head()
```
But it has a problem. You can't comment out and even comment in between
```
# This block will result in an error
df_io\
.copy()\ # This is an inline comment
# This is a regular comment
.sort_values('date_')\
# .set_index('date_')\
.drop(columns='val_updated')\
.head()
```
Even an unnoticeable space character may break everything
```
# This block will result in an error
df_io\
.copy()\
.sort_values('date_')\
.set_index('date_')\
.drop(columns='val_updated')\
.head()
```
## The Penny Drops
I like those "penny dropping" moments, when you realize you knew everything that is presented, yet it is presented in a new way you never thought of.
```
# We can split these value inside ()
users = (134856, 195373, 295817, 294003, 262166, 121066, 129678, 307120, 258759, 277922, 220794, 192312,
318486, 314631, 306448, 297059,206892,
169046, 181703, 146200, 199876, 247904, 250884, 282989, 234280, 202520,
138064, 133577, 301053, 242157)
# Penny Drop: We can also Split here
df = (df_io
.copy() # This is an inline comment
# This is a regular comment
.sort_values('date_')
.set_index('date_')
.drop(columns='val_updated')
)
df.head()
```
## Map with dict
A dict is a callable with $f(key) = value$, there for you can call `.map` with it. In this example I want to make int key codes into letter.
```
df.bear_type.map(lambda x: x+3).head()
# A dict is also a callable
bears = {
1: 'Grizzly',
2: 'Sun',
3: 'Pizzly',
4: 'Sloth',
5: 'Polar',
6: 'Cave',
7: 'Black',
8: 'Panda'
}
df['bear_type'] = df.bear_type.map(bears)
df.head()
```
# Time Series
## Resample
Task: How many events happen each hour?
### The Old Way
```
bad = df.copy()
bad['day'] = bad.index.date
bad['hour'] = bad.index.hour
(bad
.groupby(['day','hour'])
.count()
)
```
* Many lines of code
* unneeded columns
* Index is not a time anymore
* **missing rows** (Did you notice?)
### A Better Way
```
df.resample('H').count() # H is for Hour
```
But it's even better on non-round intervals
```
rs = df.resample('10T').count()
# T is for Minute, and pandas understands 10 T, it will also under stand 11T if you wonder
rs.head()
```
[Complete list of Pandas' time abbrevations](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Period.strftime.html)
## Slice Easily
Pandas will automatically make string into timestamps, and it will understand what you want it to do.
```
# Take only timestamp in the hour of 21:00.
rs.loc['2018-10-09 21',:]
# Take all time stamps before 18:31
rs.loc[:'2018-10-09 18:31',:]
```
## Time Windows: Rolling, Expanding, EWM
If your Dataframe is indexed on a time index (Which we have)
```
fig, ax = plt.subplots()
rs.rename(columns = {'bear_type':'bears'}).plot(ax=ax,linestyle='--')
(rs
.rolling('90T')
.mean()
.rename(columns = {'bear_type':'rolling mean'})
.plot(ax=ax)
)
rs.expanding().mean().rename(columns = {'bear_type':'expanding mean'}).plot(ax=ax)
rs.ewm(6).mean().rename(columns = {'bear_type':'ewm mean'}).plot(ax=ax)
plt.show()
```
### With Apply
Intuitively, windows are like GroupBy, so you can apply anything you want after the grouping, e.g.: geometric mean.
```
fig, ax = plt.subplots()
rs.plot(ax=ax,linestyle='--')
(rs
.rolling(6)
.apply(lambda x: np.power(np.product(x),1/len(x)),raw=True)
.rename(columns = {'bear_type':'Rolling Geometric Mean'})
.plot(ax=ax)
)
plt.show()
```
## Combine with GroupBy 🤯
Pandas has no problem with groupby and resample together. It's as simple as `groupby[col1,col2]`. In our specific case, we want to cound events in an interval per event type.
```
per_bear = (df
.groupby('bear_type')
.resample('15T')
.apply('count')
.rename(columns={'bear_type':'amount'})
)
per_bear.groupby('bear_type').head(2)
```
# Sorting
## By Values
```
per_bear.sort_values(by=['amount'], ascending=False).head(10)
```
## By Index
```
per_bear.sort_index().head(7)
per_bear.sort_index(level=1).head(7)
```
## By Both <span style="color:red">(New in 0.23)</span>
`Index` has a name. Modern Pandas knows to address this index by name just like a regular column.
```
per_bear.sort_values(['amount','bear_type'], ascending=(False, True)).head(10)
```
# Stack, Unstack
## Unstack
In this case, working with a wide format indexed on intervals, with event types as columns, will make a lot more sense.
### The Old way
Pivot table in modern pandas is more robust than it used to be. Still, it requires you to specify everything.
```
pt = pd.pivot_table(per_bear,values = 'amount',columns='bear_type',index='date_')
pt.head()
```
### A better way
When you have just one column of values, unstack does the same easily
```
pt = per_bear.unstack('bear_type')
pt.columns = pt.columns.droplevel() # Unstack creates a multiindex on columns
pt.head()
```
## Unstack
And some extra tricks
```
pt.stack().head()
```
This looks kind of what we had expected but:
* It's a series, not a DataFrame
* The levels of the index are "reversed" to before
* The main sort is on the date, yet it used to be on the event type
### Some More Hacks
```
stack_back = (pt
.stack()
.to_frame('amount') # Turn Series to DF without calling the DF constructor
.swaplevel() # Swaps the levels of the index
.sort_index()
)
stack_back.head()
stack_back.equals(per_bear)
```
# GroupBy
```sql
select min(B), avg(B), geometric_mean(B), min(C), max(C)
from pt
group by A
```
```
pt
```
## Old Ways
```
pt.groupby('Grizzly')['Polar'].agg(['min','mean']).head()
```
### List Aggregates
```
pt.groupby('Grizzly')[['Polar','Black']].agg(['min','mean',lambda x: x.prod()/len(x),'max']).head()
```
* Not what we wanted
* MultiIndex
* Names are not unique
* How do you access `<lambda_0>`
### Dict aggregate
```
pt.groupby('Grizzly').agg({'Polar':['min','mean',lambda x: x.prod()/len(x)],'Black':['min','max']})
```
### With Rename
```
pt.groupby('Grizzly').Polar.agg({'min_Polar':'min'})
warnings.simplefilter("ignore")
pt.groupby('Grizzly').agg({
'Polar':{'min_Polar':'min','avg_Polar':'mean','geo_Polar':lambda x: x.prod()/len(x)},
'Black':{'min_Black':'min','max_Black':'max'}
})
warnings.simplefilter("default")
```
Still a MultiIndex
## Named Aggregations <span style="color:red">(New in 0.25)</span>
This is also the way to go from `1.0.0` as others will be depracated
```
def geo(x):
return x.prod()/len(x)
pt.groupby('Grizzly').agg(
min_Polar = pd.NamedAgg(column='Polar', aggfunc='min'),
avg_Polar = pd.NamedAgg(column='Polar', aggfunc='mean'),
geo_Polar = pd.NamedAgg('Polar', geo),
# But actually NamedAgg is optional
min_Black = ('Black','min'),
max_Black = ('Black','max')
)
```
# Clip
Let's say, we know from domain knowledge the that an bear walks around a minimum of 3 and maximum of 12 times at each timestamp. We would like to fix that.
In a real world example, we many time want to turn negative numbers to zeroes or some truly big numbers to sum known max.
## The Old Way
Iterate over columns and change values that meet condition.
```
cl = pt.copy()
lb = 3
ub = 12
# Needed A loop of 3 lines
for col in ['Grizzly','Polar','Black']:
cl['clipped_{}'.format(col)] = cl[col]
cl.loc[cl[col] < lb,'clipped_{}'.format(col)] = lb
cl.loc[cl[col] > ub,'clipped_{}'.format(col)] = ub
my_utils.plot_clipped(cl) # my_utils can be found in the github repo
```
## A better way
`.clip(lb,ub)`
```
cl = pt.copy()
cl['Grizzly'] = cl.Grizzly.clip(3,12)
cl = pt.copy()
# Beutiful One Liner
cl[['clipped_Grizzly','clipped_Polar','clipped_Black']] = cl.clip(5,12)
my_utils.plot_clipped(cl) # my_utils can be found in the github repo
```
# Reindex
Now we have 3 types of bears 17:00 to 23:00. But we were at the the park from 16:00 to 00:00. We've also been told that this park as Panda bears and Cave bears.
In the old way we would have this column assignment with a loop, and for the rows we would have maybe create a columns and do some join. A lot of work.
```
etypes = ['Grizzly','Polar','Black','Panda','Cave'] # New columns
# Define a date range - Pandas will automatically make this into an index
idx = pd.date_range(start='2018-10-09 16:00:00',end='2018-10-09 23:59:00',freq=pt.index.freq,tz='UTC')
type(idx)
pt.reindex(index=idx, columns=etypes, fill_value=0).head(8)
### Let's put this in a function - This will help us later.
def get_all_types_and_timestamps(df, min_date='2018-10-09 16:00:00',
max_date='2018-10-09 23:59:00',
etypes=['Grizzly','Polar','Black','Panda','Cave']):
ret = df.copy()
time_idx = pd.date_range(start=min_date,end=max_date,freq='15T',tz='UTC')
# Indices work like set. This is a good practive so we don't override our intended index
idx = ret.index.union(time_idx)
etypes = df.columns.union(set(etypes))
ret = ret.reindex(idx, columns=etypes, fill_value=0)
return ret
```
# Method Chaining
## Assign
Assign is for creating new columns on the dataframes. This is instead of
`df[new_col] = function(df[old_col])`. They are both one lines, but `.assign` doesn't break the flow.
```
pt.assign(mean_all = pt.mean(axis=1)).head()
```
### With a callable
This is good when we have a filtering phase before.
```
pt.assign(mean_all = lambda x: x.mean(axis=1)).head()
```
## Pipe
Think R's `%>%`, `.pipe` is a method that accepts a function. `pipe`, by default, assumes the first argument of this function is a dataframe and passes the current dataframe down the pipeline. The function should return a dataframe also, if you want to continue with the chaining. Yet, it can also return any other value if you put it in the last step.
This is incredibly valueable because it takes you one step further from "sql" where you do things "in reverse".
$f(g(h($ `df` $)))$ = `df.pipe(h).pipe(g).pipe(f)`
```
def add_to_col(df, col='Grizzly', n = 200):
ret = df.copy()
# A dataframe is mutable, if you don't copy it first, this is prone to many errors.
# I always copy when I enter a function, even if I'm sure it shouldn't change anything.
ret[col] = ret[col] + n
return ret
add_to_col(add_to_col(add_to_col(pt), 'Polar', 100), 'Black',500).head()
(pt
.pipe(add_to_col)
.pipe(add_to_col, col='Polar', n=100)
.pipe(add_to_col, col='Black', n=500)
.head(5))
```
You can always do this with multiple lines of `df = do_something(df)` but I think this method is more elegant.
# Beautiful Code Tells a Story
Your code is not just about making the computer do things. It's about telling a story of what you wish to happen. Sometimes other people will want to read you code. Most time, it is you 3 monhts in the future who will want to read it. Some say good code documents itself. I'm not that extreme, yet storytelling with code may save you from many lines of unnecessary comments.
The next and final block tells the story in one block. It's elegant, it tells a story. If you build utility functions and `pipe` them while following meaningful naming, they help tell a story. if you `assign` columns with meaningful names, they tell a story. you `drop`, you `apply`, you `read`, you `groupby` and you `resample` - they all tell a story.
(Well... Maybe they could have gone with better naming for `resample`)
```
df = (pd
.read_csv ('./bear_data.csv', index_col=0, parse_dates=['date_'])
.assign (bear_type=lambda df: df.bear_type.map(bears))
.sort_values ('date_')
.set_index ('date_')
.drop (columns='val_updated')
.groupby ('bear_type')
.resample ('15T')
.apply ('count')
.rename (columns={'bear_type': 'amount'})
.unstack ('bear_type')
.pipe (my_utils.remove_multi_index)
.pipe (get_all_types_and_timestamps) # Remember this from before?
.assign (mean_bears=lambda x: x.mean(axis=1))
.loc [:, ['mean_bears']]
.pipe (my_utils.make_sliding_time_windows, steps_back=6)
.dropna ()
)
df.head()
```
# Bonus!
Cool methods I've found but did not fit in the talk's flow.
<span style="font-size:2em"> [No Time?](#You-don't-have-to-memorize-this)</span>
```
src = df.copy().loc[:,['mean_bears']]
```
## Percent Change
```
src.assign(pct = src.pct_change()).head(11)
```
## Interval Index
Helps creating a "common language" when talking about time series aggregations.
```
src = df.copy()
ir = pd.interval_range(start=df.index.min(),
end=df.index.max() + df.index.freq,
freq=df.index.freq)
type(ir)
ir
try:
df.loc['2018-10-09 18:37',:] # Datetime Index
except Exception as e:
print (type(e), e)
# Will result error
src.index = ir # Interval Index
src.loc['2018-10-09 18:37',:]
src.loc['2018-10-09 18:37':'2018-10-09 19:03',:]
```
## Split Strings
The entire concept of strings is different in `1.0.0`
```
txt = pd.DataFrame({'text':['hello','dean langsam','diving into pandas is better than reinventing it']})
txt
txt.text.str.split()
txt.text.str.split(expand = True) # Expand to make it a dataframe
```
## Toy Examples with Pandas Testing
```
import pandas.util.testing as tm
tm.N, tm.K = 15, 10
st = pd.util.testing.makeTimeDataFrame() * 100
st
```
# Research with Style!

```
stnan = st.copy()
stnan[np.random.rand(*stnan.shape) < 0.05] = np.nan # Put some nans in it
```
## Basic
```
(stnan
.style
.highlight_null('red')
.highlight_max(color='steelblue', axis = 0) # Max each row
.highlight_min(color ='gold', axis = 1) # Min each columns
)
```
## Gradient
```
st.clip(0,100).style.background_gradient( cmap='Purples')
```
## Custom
```
def custom_style(val):
if val < -100:
return 'background-color:red'
elif val > 100:
return 'background-color:green'
elif abs(val) <20:
return 'background-color:yellow'
else:
return ''
st.style.applymap(custom_style)
```
## Bars
```
(st.style
.bar(subset=['A','D'],color='steelblue')
.bar(subset=['J'],color=['indianred','limegreen'], align='mid')
)
```
# You don't have to memorize this
Just put this in the back of your mind and remember that modern Pandas has so many superpowers. Just remember they exist, and google them when you actually need them.
Always, when I feel I'm insecure about Pandas, I go back to [Greg Reda](https://twitter.com/gjreda)'s [tweet](https://twitter.com/gjreda/status/1049694953687924737):

# Resources
* [Modern Pandas](https://tomaugspurger.github.io/modern-1-intro.html) by Tom Augspurger
* [Basic Time Series Manipulation with Pandas](https://towardsdatascience.com/basic-time-series-manipulation-with-pandas-4432afee64ea) by Laura Fedoruk
* [Pandas Docs](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.clip.html). You don't have to thoroughly go over everything, just randomly open a page in the docs and you're sure to learn a new thing.
|
github_jupyter
|
<h2> 6. Bayes Classification </h2>
This notebook has the code for the charts in Chapter 6
### Install BigQuery module
You don't need this on AI Platform, but you need this on plain-old JupyterLab
```
!pip install google-cloud-bigquery
%load_ext google.cloud.bigquery
```
### Setup
```
import os
PROJECT = 'data-science-on-gcp-180606' # REPLACE WITH YOUR PROJECT ID
BUCKET = 'data-science-on-gcp' # REPLACE WITH YOUR BUCKET NAME
REGION = 'us-central1' # REPLACE WITH YOUR BUCKET REGION e.g. us-central1
os.environ['BUCKET'] = BUCKET
```
<h3> Exploration using BigQuery </h3>
```
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import google.cloud.bigquery as bigquery
bq = bigquery.Client()
sql = """
SELECT DISTANCE, DEP_DELAY
FROM `flights.tzcorr`
WHERE RAND() < 0.001 AND dep_delay > -20 AND dep_delay < 30 AND distance < 2000
"""
df = bq.query(sql).to_dataframe()
sns.set_style("whitegrid")
g = sns.jointplot(df['DISTANCE'], df['DEP_DELAY'], kind="hex", size=10, joint_kws={'gridsize':20})
```
<h3> Set up views in Spark SQL </h3>
Start a Spark Session if necessary and get a handle to it.
```
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Bayes classification using Spark") \
.getOrCreate()
print(spark)
```
Set up the schema to read in the CSV files on GCS
```
from pyspark.sql.types import StringType, FloatType, StructType, StructField
header = 'FL_DATE,UNIQUE_CARRIER,AIRLINE_ID,CARRIER,FL_NUM,ORIGIN_AIRPORT_ID,ORIGIN_AIRPORT_SEQ_ID,ORIGIN_CITY_MARKET_ID,ORIGIN,DEST_AIRPORT_ID,DEST_AIRPORT_SEQ_ID,DEST_CITY_MARKET_ID,DEST,CRS_DEP_TIME,DEP_TIME,DEP_DELAY,TAXI_OUT,WHEELS_OFF,WHEELS_ON,TAXI_IN,CRS_ARR_TIME,ARR_TIME,ARR_DELAY,CANCELLED,CANCELLATION_CODE,DIVERTED,DISTANCE,DEP_AIRPORT_LAT,DEP_AIRPORT_LON,DEP_AIRPORT_TZOFFSET,ARR_AIRPORT_LAT,ARR_AIRPORT_LON,ARR_AIRPORT_TZOFFSET,EVENT,NOTIFY_TIME'
def get_structfield(colname):
if colname in ['ARR_DELAY', 'DEP_DELAY', 'DISTANCE']:
return StructField(colname, FloatType(), True)
else:
return StructField(colname, StringType(), True)
schema = StructType([get_structfield(colname) for colname in header.split(',')])
print(schema)
```
Create a table definition (this is done lazily; the files won't be read until we issue a query):
```
inputs = 'gs://{}/flights/tzcorr/all_flights-00000-*'.format(BUCKET) # 1/30th
#inputs = 'gs://{}/flights/tzcorr/all_flights-*'.format(BUCKET) # FULL
flights = spark.read\
.schema(schema)\
.csv(inputs)
# this view can now be queried ...
flights.createOrReplaceTempView('flights')
```
Example query over the view (this will take a while; it's Spark SQL, not BigQuery):
```
results = spark.sql('SELECT COUNT(*) FROM flights WHERE dep_delay > -20 AND distance < 2000')
results.show()
```
<h2> Restrict to train days </h2>
Let's create a CSV file of the training days
```
sql = """
SELECT *
FROM `flights.trainday`
"""
df = bq.query(sql).to_dataframe()
df.to_csv('trainday.csv', index=False)
!head -3 trainday.csv
%%bash
gsutil cp trainday.csv gs://${BUCKET}/flights/trainday.csv
```
Create dataframe of traindays, but this time because the file has a header, and is a small file, we can have Spark infer the schema
```
traindays = spark.read \
.option("header", "true") \
.option("inferSchema", "true") \
.csv('gs://{}/flights/trainday.csv'.format(BUCKET))
traindays.createOrReplaceTempView('traindays')
results = spark.sql('SELECT * FROM traindays')
results.head(5)
statement = """
SELECT
f.FL_DATE AS date,
distance,
dep_delay
FROM flights f
JOIN traindays t
ON f.FL_DATE == t.FL_DATE
WHERE
t.is_train_day AND
f.dep_delay IS NOT NULL
ORDER BY
f.dep_delay DESC
"""
flights = spark.sql(statement)
```
<h3> Hexbin plot </h3>
Create a hexbin plot using Spark (repeat of what we did in BigQuery, except that we are now restricting to train days only).
```
df = flights[(flights['distance'] < 2000) & (flights['dep_delay'] > -20) & (flights['dep_delay'] < 30)]
df.describe().show()
```
Sample the dataframe so that it fits into memory (not a problem in development, but will be on full dataset); then plot it.
```
pdf = df.sample(False, 0.02, 20).toPandas() # to 100,000 rows approx on complete dataset
g = sns.jointplot(pdf['distance'], pdf['dep_delay'], kind="hex", size=10, joint_kws={'gridsize':20})
```
<h3> Quantization </h3>
Now find the quantiles
```
distthresh = flights.approxQuantile('distance', list(np.arange(0, 1.0, 0.1)), 0.02)
distthresh
delaythresh = flights.approxQuantile('dep_delay', list(np.arange(0, 1.0, 0.1)), 0.05)
delaythresh
results = spark.sql('SELECT COUNT(*) FROM flights WHERE dep_delay >= 3 AND dep_delay < 8 AND distance >= 447 AND distance < 557')
results.show()
```
<h2> Repeat, but on full dataset </h2>
You can launch the above processing on the full dataset from within JupyterLab if you want the statistics and graphs updated. I didn't, though, because this is not what I would have really done. Instead,
I would have created a standalone Python script and submitted it to the cluster -- there is no need to put JupyterLab in the middle of a production process. We'll submit a standalone Pig program to the cluster in the next section.
Steps:
<ol>
<li> Change the input variable to process all-flights-* </li>
<li> Increase cluster size (bash increase_cluster.sh from CloudShell) </li>
<li> Clear all cells from this notebook </li>
<li> Run all cells </li>
<li> Decrease cluster size (bash decrease_cluster.sh from CloudShell) </li>
</ol>
Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
github_jupyter
|
```
import pandas as pd
from datetime import timedelta, date
import matplotlib.pyplot as plt
def append_it(date, amount,treasury,Agency,MBS, duration):
append_data = {'Date':[date], 'Amount':[amount], 'Duration':[duration],'Treasury':[treasury],'Agency':[Agency], 'MBS':[MBS]}
append_df = pd.DataFrame(append_data)
return append_df
print(repos)
data = {'Date':[date(2019, 9, 17)], 'Amount':[53.15], 'Treasury':[40.85], 'Agency':[0.6], 'MBS':[11.7], 'Duration':[1]}
repos = pd.DataFrame(data)
repos = repos.append(append_it(date(2019, 9, 18),75,51.55,0.7,22.75,1))
repos = repos.append(append_it(date(2019, 9, 19),75,55.843,0,19.157,1))
repos = repos.append(append_it(date(2019, 9, 20),75,59.6,0.5,15.350,3))
repos = repos.append(append_it(date(2019, 9, 23),67.75,49.7,0.6,15.45,1))
repos = repos.append(append_it(date(2019, 9, 24),30,22.732,0,7.268,14))
repos = repos.append(append_it(date(2019, 9, 24),75,58.75,.36,15.49,1))
repos = repos.append(append_it(date(2019, 9, 25),75,44.35,1,29.65,1))
repos = repos.append(append_it(date(2019, 9, 26),60,35.75,0,24.25,14))
repos = repos.append(append_it(date(2019, 9, 26),50.1,34.55,0,15.55,1))
repos = repos.append(append_it(date(2019, 9, 27),49,34.55,0,14.45,14))
repos = repos.append(append_it(date(2019, 9, 27),22.7,14.45,0,8.25,3))
repos = repos.append(append_it(date(2019, 9, 30),63.5,49.75,0,13.75,1))
repos = repos.append(append_it(date(2019, 10, 1),54.85,50.0,0.1,4.75,1))
repos = repos.append(append_it(date(2019, 10, 2),42.05,35.0,0,7.05,1))
repos = repos.append(append_it(date(2019, 10, 3),33.55,28.0,0,5.55,1))
repos = repos.append(append_it(date(2019, 10, 4),38.55,29.5,0,9.05,3))
repos = repos.append(append_it(date(2019, 10, 7),38.85,36.0,0,11.05,1))
repos = repos.append(append_it(date(2019, 10, 8),38.85,29.3,0,9.55,14))
repos = repos.append(append_it(date(2019, 10, 8),37.5,31.75,0,5.75,1))
repos = repos.append(append_it(date(2019, 10, 9),30.8,26.25,0,4.55,1))
repos = repos.append(append_it(date(2019, 10, 10),42.6,30.7,0,11.9,14))
repos = repos.append(append_it(date(2019, 10, 10),45.5,37.6,0.5,7.4,1))
repos = repos.append(append_it(date(2019, 10, 11),21.15,13.15,0,8.0,6))
repos = repos.append(append_it(date(2019, 10, 11),61.55,58.35,0,3.2,4))
repos = repos.append(append_it(date(2019, 10, 15),20.1,10.6,0,9.5,14))
repos = repos.append(append_it(date(2019, 10, 15),67.6,59.95,0,7.65,1))
repos = repos.append(append_it(date(2019, 10, 16),75,72.592,0,2.408,1))
repos = repos.append(append_it(date(2019, 10, 17),30.65,18.15,0,12.5,15))
repos = repos.append(append_it(date(2019, 10, 17),73.5,67.7,0.1,5.7,1))
repos = repos.append(append_it(date(2019, 10, 18),56.65,47.95,0.5,8.2,3))
repos = repos.append(append_it(date(2019, 10, 21),58.15,50.95,0.5,6.7,1))
repos = repos.append(append_it(date(2019, 10, 22),35,31.141,0,3.859,14))
repos = repos.append(append_it(date(2019, 10, 22),64.904,54.404,0,9.5,1))
repos = repos.reset_index(drop=True)
repos.tail(10)
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
#repos_amount = pd.DataFrame(columns=['foo', 'bar'])
repos_amount = pd.DataFrame(columns=['Date', 'Amount','Treasury','Agency', 'MBS'])
start_date = date(2019, 9, 17)
# ***** Make it one higher than you need *****
# ***** Make it one higher than you need *****
# ***** Make it one higher than you need *****
end_date = date(2019, 10, 22+1)
for single_date in daterange(start_date, end_date):
append_data = {'Date':[single_date], 'Amount': 0,'Treasury':0,'Agency':0, 'MBS':0}
append_df = pd.DataFrame(append_data)
#print("Append:")
#print(append_df)
#print(repos_amount)
repos_amount = repos_amount.append(append_df)
repos_amount.set_index('Date', inplace=True)
print(repos_amount)
def update_row(row, df):
the_date = row['Date']
the_amount = row['Amount']
the_duration = row['Duration']
the_treasury = row['Treasury']
the_agency = row['Agency']
the_MBS = row['MBS']
end_date = the_date + timedelta(the_duration)
the_date = date(the_date.year, the_date.month, the_date.day)
#the_date =
end_date = date(end_date.year, end_date.month, end_date.day)
for date_var in daterange(the_date, end_date):
date_lookup = date(date_var.year, date_var.month, date_var.day)
last_date = df.tail(1).index[0]
if last_date >= date_lookup:
#current_amount = df.loc[date_lookup]['Amount']
#new_amount = df.loc[date_lookup]['Amount'] + the_amount
df.loc[date_lookup]['Amount'] = df.loc[date_lookup]['Amount'] + the_amount
df.loc[date_lookup]['Treasury'] = df.loc[date_lookup]['Treasury'] + the_treasury
df.loc[date_lookup]['Agency'] = df.loc[date_lookup]['Agency'] + the_agency
df.loc[date_lookup]['MBS'] = df.loc[date_lookup]['MBS'] + the_MBS
#current_treasury = df.loc[date_lookup]['Treasury']
#new_treasury = current_treasury + the_treasury
#df.loc[date_lookup]['Treasury'] = new_treasury
#current_agency = df.loc[date_lookup]['Agency']
#new_agency = current_agency + the_amount
#df.loc[date_lookup]['Agency'] = new_agency
#current_amount = df.loc[date_lookup]['Amount']
#new_amount = current_amount + the_amount
#df.loc[date_lookup]['Amount'] = new_amount
return df
for index, a_row in repos.iterrows():
repos_amount = update_row(a_row, repos_amount)
#print(repos_amount)
#repos_amount.plot(kind='bar',y='Amount',color='red')
colors = ["Green", "Red","Blue"]
repos_amount[['Agency','MBS','Treasury']].plot.bar(stacked=True, color=colors, figsize=(12,7))
plt.title('Total Outstanding Fed Repos', fontsize=16)
plt.ylabel('$ Billions', fontsize=12)
plt.show()
print(repos_amount)
print(repos_amount.loc[date(2019, 10, 11)])
repos_amount.set_index('Date', inplace=True)
repos_amount.info()
#print(repos_amount.iloc[1, :]['Amount'])
amount = repos_amount.loc[date(2019, 10, 11)]['Amount']
amount = amount + 10
repos_amount.set_value(date(2019, 10, 11), 'Amount', amount)
print(repos_amount)
```
|
github_jupyter
|
# Time series analysis (Pandas)
Nikolay Koldunov
[email protected]
================
Here I am going to show just some basic [pandas](http://pandas.pydata.org/) stuff for time series analysis, as I think for the Earth Scientists it's the most interesting topic. If you find this small tutorial useful, I encourage you to watch [this video](http://pyvideo.org/video/1198/time-series-data-analysis-with-pandas), where Wes McKinney give extensive introduction to the time series data analysis with pandas.
On the official website you can find explanation of what problems pandas solve in general, but I can tell you what problem pandas solve for me. It makes analysis and visualisation of 1D data, especially time series, MUCH faster. Before pandas working with time series in python was a pain for me, now it's fun. Ease of use stimulate in-depth exploration of the data: why wouldn't you make some additional analysis if it's just one line of code? Hope you will also find this great tool helpful and useful. So, let's begin.
As an example we are going to use time series of [Arctic Oscillation (AO)](http://en.wikipedia.org/wiki/Arctic_oscillation) and [North Atlantic Oscillation (NAO)](http://en.wikipedia.org/wiki/North_Atlantic_oscillation) data sets.
## Module import
First we have to import necessary modules:
```
import pandas as pd
import numpy as np
%matplotlib inline
pd.set_option('max_rows',15) # this limit maximum numbers of rows
np.set_printoptions(precision=3 , suppress= True) # this is just to make the output look better
pd.__version__
```
## Loading data
Now, when we are done with preparations, let's get some data.
Pandas has very good IO capabilities, but we not going to use them in this tutorial in order to keep things simple. For now we open the file simply with numpy loadtxt:
```
temp = np.loadtxt('../Week03/Ham_3column.txt')
```
Every line in the file consist of three elements: year, month, value:
```
temp[-1]
```
And here is the shape of our array (note that shape of the file might differ in your case, since data updated monthly):
```
temp.shape
```
## Time Series
We would like to convert this data in to time series, that can be manipulated naturally and easily. First step, that we have to do is to create the range of dates for our time series. From the file it is clear, that record starts at January 1891 and ends at August 2014 (at the time I am writing this, of course). Frequency of the data is one day (freq='D').
```
dates = pd.date_range('1891-01-01', '2014-08-31', freq='D')
```
As you see syntax is quite simple, and this is one of the reasons why I love Pandas so much :) You can check if the range of dates is properly generated:
```
dates
```
Now we are ready to create our first time series. Dates from the *dates* variable will be our index, and `temp` values will be our, hm... values:
```
ham = pd.Series(temp[:,3]/10., index=dates)
ham
```
Now we can plot complete time series:
```
ham.plot()
```
or its part:
```
ham['1980':'1990'].plot()
```
or even smaller part:
```
ham['1980-05-02':'1981-03-17'].plot()
```
Reference to the time periods is done in a very natural way. You, of course, can also get individual values. By number:
```
ham[120]
```
or by index (date in our case):
```
ham['1960-01']
```
And what if we choose only one year?
```
ham['1960']
```
Isn't that great? :)
##Exercise
What was temperature in Hampurg at your burthsday?
## One bonus example :)
```
ham[ham > 0]['1990':'2000'].plot(style='r*')
ham[ham < 0]['1990':'2000'].plot(style='b*')
```
##Exercise
- plot all positive temperatures (red stars) and negative temperatires (blue stars)
- limit this plot by 1990-2000 period
## Data Frame
Now let's make live a bit more interesting and get more data. This will be TMIN time series.
We use pandas function `read_csv` to parse dates and create Data Frame
```
hamm = pd.read_csv('Ham_tmin.txt', parse_dates=True, index_col=0, names=['Time','tmin'])
hamm
type(hamm)
```
Time period is the same:
```
hamm.index
```
Now we create Data Frame, that will contain both TMAX and TMIN data. It is sort of an Excel table where the first row contain headers for the columns and firs column is an index:
```
tmp = pd.DataFrame({'TMAX':ham, 'TMIN':hamm.tmin/10})
tmp
```
One can plot the data straight away:
```
tmp.plot()
```
Or have a look at the first several rows:
```
tmp.head()
```
We can reference each column by its name:
```
tmp['TMIN']
```
or as method of the Data Frame variable (if name of the variable is a valid python name):
```
tmp.TMIN
```
We can simply add column to the Data Frame:
```
tmp['Diff'] = tmp['TMAX'] - tmp['TMIN']
tmp.head()
```
##Exercise
Find and plot all differences that are larger than 20
```
tmp['Diff'][tmp['Diff']>20].plot(style='r*')
```
And delete it:
```
del tmp['Diff']
tmp.tail()
```
Slicing will also work:
```
tmp['1981-03'].plot()
```
## Statistics
Back to simple stuff. We can obtain statistical information over elements of the Data Frame. Default is column wise:
```
tmp.mean()
tmp.max()
tmp.min()
```
You can also do it row-wise:
```
tmp.mean(1)
```
Or get everything at once:
```
tmp.describe()
```
By the way getting correlation coefficients for members of the Data Frame is as simple as:
```
tmp.corr()
```
##Exercise
Find mean of all temperatures larger than 5
```
tmp[tmp>5].mean()
```
## Resampling
Pandas provide easy way to resample data to different time frequency. Two main parameters for resampling is time period you resemple to and the method that you use. By default the method is mean. Following example calculates monthly ('M'):
```
tmp_mm = tmp.resample("M")
tmp_mm['2000':].plot()
```
median:
```
tmp_mm = tmp.resample("M", how='median')
tmp_mm['2000':].plot()
```
You can use your methods for resampling, for example np.max (in this case we change resampling frequency to 3 years):
```
tmp_mm = tmp.resample("3M", how=np.max)
tmp_mm['2000':].plot()
```
You can specify several functions at once as a list:
```
tmp_mm = tmp.resample("M", how=['mean', np.min, np.max])
tmp_mm['1900':'2020'].plot(subplots=True, figsize=(10,10))
tmp_mm['2000':].plot(figsize=(10,10))
```
##Exercise
Define function that will find difference between maximum and minimum values of the time series, and resample our `tmp` variable with this function.
```
def satardays(x):
xmin = x.min()
xmax = x.max()
diff = xmin - xmax
return diff
tmp_mm = tmp.resample("A", how=satardays)
tmp_mm['2000':].plot()
tmp_mm
```
That's it. I hope you at least get a rough impression of what pandas can do for you. Comments are very welcome (below). If you have intresting examples of pandas usage in Earth Science, we would be happy to put them on [EarthPy](http://earthpy.org).
## Links
[Time Series Data Analysis with pandas (Video)](http://www.youtube.com/watch?v=0unf-C-pBYE)
[Data analysis in Python with pandas (Video)](http://www.youtube.com/watch?v=w26x-z-BdWQ)
[Python for Data Analysis](http://shop.oreilly.com/product/0636920023784.do)
|
github_jupyter
|
## 1. Google Play Store apps and reviews
<p>Mobile apps are everywhere. They are easy to create and can be lucrative. Because of these two factors, more and more apps are being developed. In this notebook, we will do a comprehensive analysis of the Android app market by comparing over ten thousand apps in Google Play across different categories. We'll look for insights in the data to devise strategies to drive growth and retention.</p>
<p><img src="https://assets.datacamp.com/production/project_619/img/google_play_store.png" alt="Google Play logo"></p>
<p>Let's take a look at the data, which consists of two files:</p>
<ul>
<li><code>apps.csv</code>: contains all the details of the applications on Google Play. There are 13 features that describe a given app.</li>
<li><code>user_reviews.csv</code>: contains 100 reviews for each app, <a href="https://www.androidpolice.com/2019/01/21/google-play-stores-redesigned-ratings-and-reviews-section-lets-you-easily-filter-by-star-rating/">most helpful first</a>. The text in each review has been pre-processed and attributed with three new features: Sentiment (Positive, Negative or Neutral), Sentiment Polarity and Sentiment Subjectivity.</li>
</ul>
```
# Read in dataset
import pandas as pd
apps_with_duplicates = pd.read_csv('datasets/apps.csv')
# Drop duplicates
apps = apps_with_duplicates.drop_duplicates()
# Print the total number of apps
print('Total number of apps in the dataset = ', len(apps['App']))
# Have a look at a random sample of 5 rows
n = 5
apps.sample(n)
```
## 2. Data cleaning
<p>The four features that we will be working with most frequently henceforth are <code>Installs</code>, <code>Size</code>, <code>Rating</code> and <code>Price</code>. The <code>info()</code> function (from the previous task) told us that <code>Installs</code> and <code>Price</code> columns are of type <code>object</code> and not <code>int64</code> or <code>float64</code> as we would expect. This is because the column contains some characters more than just [0,9] digits. Ideally, we would want these columns to be numeric as their name suggests. <br>
Hence, we now proceed to data cleaning and prepare our data to be consumed in our analyis later. Specifically, the presence of special characters (<code>, $ +</code>) in the <code>Installs</code> and <code>Price</code> columns make their conversion to a numerical data type difficult.</p>
```
# List of characters to remove
chars_to_remove = ['+' , ',' , '$']
# List of column names to clean
cols_to_clean = ['Installs' , 'Price']
# Loop for each column
for col in cols_to_clean:
# Replace each character with an empty string
for char in chars_to_remove:
apps[col] = apps[col].str.replace(char, '')
# Convert col to numeric
apps[col] = pd.to_numeric(apps[col])
```
## 3. Exploring app categories
<p>With more than 1 billion active users in 190 countries around the world, Google Play continues to be an important distribution platform to build a global audience. For businesses to get their apps in front of users, it's important to make them more quickly and easily discoverable on Google Play. To improve the overall search experience, Google has introduced the concept of grouping apps into categories.</p>
<p>This brings us to the following questions:</p>
<ul>
<li>Which category has the highest share of (active) apps in the market? </li>
<li>Is any specific category dominating the market?</li>
<li>Which categories have the fewest number of apps?</li>
</ul>
<p>We will see that there are <code>33</code> unique app categories present in our dataset. <em>Family</em> and <em>Game</em> apps have the highest market prevalence. Interestingly, <em>Tools</em>, <em>Business</em> and <em>Medical</em> apps are also at the top.</p>
```
import plotly
plotly.offline.init_notebook_mode(connected=True)
import plotly.graph_objs as go
# Print the total number of unique categories
num_categories = len(apps["Category"].unique())
print('Number of categories = ', num_categories)
# Count the number of apps in each 'Category' and sort them in descending order
num_apps_in_category = apps["Category"].value_counts().sort_values(ascending = False)
data = [go.Bar(
x = num_apps_in_category.index, # index = category name
y = num_apps_in_category.values, # value = count
)]
plotly.offline.iplot(data)
```
## 4. Distribution of app ratings
<p>After having witnessed the market share for each category of apps, let's see how all these apps perform on an average. App ratings (on a scale of 1 to 5) impact the discoverability, conversion of apps as well as the company's overall brand image. Ratings are a key performance indicator of an app.</p>
<p>From our research, we found that the average volume of ratings across all app categories is <code>4.17</code>. The histogram plot is skewed to the right indicating that the majority of the apps are highly rated with only a few exceptions in the low-rated apps.</p>
```
# Average rating of apps
avg_app_rating = apps['Rating'].mean()
print('Average app rating = ', avg_app_rating)
# Distribution of apps according to their ratings
data = [go.Histogram(
x = apps['Rating']
)]
# Vertical dashed line to indicate the average app rating
layout = {'shapes': [{
'type' :'line',
'x0': avg_app_rating,
'y0': 0,
'x1': avg_app_rating,
'y1': 1000,
'line': { 'dash': 'dashdot'}
}]
}
plotly.offline.iplot({'data': data, 'layout': layout})
```
## 5. Size and price of an app
<p>Let's now examine app size and app price. For size, if the mobile app is too large, it may be difficult and/or expensive for users to download. Lengthy download times could turn users off before they even experience your mobile app. Plus, each user's device has a finite amount of disk space. For price, some users expect their apps to be free or inexpensive. These problems compound if the developing world is part of your target market; especially due to internet speeds, earning power and exchange rates.</p>
<p>How can we effectively come up with strategies to size and price our app?</p>
<ul>
<li>Does the size of an app affect its rating? </li>
<li>Do users really care about system-heavy apps or do they prefer light-weighted apps? </li>
<li>Does the price of an app affect its rating? </li>
<li>Do users always prefer free apps over paid apps?</li>
</ul>
<p>We find that the majority of top rated apps (rating over 4) range from 2 MB to 20 MB. We also find that the vast majority of apps price themselves under \$10.</p>
```
%matplotlib inline
import seaborn as sns
sns.set_style("darkgrid")
apps_with_size_and_rating_present = apps[(apps['Rating'].notnull()) & (apps["Size"].notnull())]
# Subset for categories with at least 250 apps
large_categories = apps_with_size_and_rating_present.groupby('Category').filter(lambda x: len(x) >= 250).reset_index()
# Plot size vs. rating
plt1 = sns.jointplot(x = large_categories['Size'] , y = large_categories['Rating'] , kind = 'hex')
# Subset out apps whose type is 'Paid'
paid_apps = apps_with_size_and_rating_present[apps_with_size_and_rating_present['Type'] == 'Paid']
# Plot price vs. rating
plt2 = sns.jointplot(x = paid_apps['Price'] , y = paid_apps['Rating'] )
```
## 6. Relation between app category and app price
<p>So now comes the hard part. How are companies and developers supposed to make ends meet? What monetization strategies can companies use to maximize profit? The costs of apps are largely based on features, complexity, and platform.</p>
<p>There are many factors to consider when selecting the right pricing strategy for your mobile app. It is important to consider the willingness of your customer to pay for your app. A wrong price could break the deal before the download even happens. Potential customers could be turned off by what they perceive to be a shocking cost, or they might delete an app they’ve downloaded after receiving too many ads or simply not getting their money's worth.</p>
<p>Different categories demand different price ranges. Some apps that are simple and used daily, like the calculator app, should probably be kept free. However, it would make sense to charge for a highly-specialized medical app that diagnoses diabetic patients. Below, we see that <em>Medical and Family</em> apps are the most expensive. Some medical apps extend even up to \$80! All game apps are reasonably priced below \$20.</p>
```
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Select a few popular app categories
popular_app_cats = apps[apps.Category.isin(['GAME', 'FAMILY', 'PHOTOGRAPHY',
'MEDICAL', 'TOOLS', 'FINANCE',
'LIFESTYLE','BUSINESS'])]
# Examine the price trend by plotting Price vs Category
ax = sns.stripplot(x = popular_app_cats['Price'], y = popular_app_cats['Category'], jitter=True, linewidth=1)
ax.set_title('App pricing trend across categories')
# Apps whose Price is greater than 200
apps_above_200 = popular_app_cats[['Category', 'App', 'Price']][popular_app_cats['Price'] > 200]
apps_above_200
```
## 7. Filter out "junk" apps
<p>It looks like a bunch of the really expensive apps are "junk" apps. That is, apps that don't really have a purpose. Some app developer may create an app called <em>I Am Rich Premium</em> or <em>most expensive app (H)</em> just for a joke or to test their app development skills. Some developers even do this with malicious intent and try to make money by hoping people accidentally click purchase on their app in the store.</p>
<p>Let's filter out these junk apps and re-do our visualization. The distribution of apps under \$20 becomes clearer.</p>
```
# Select apps priced below $100
apps_under_100 = popular_app_cats[popular_app_cats['Price'] < 100]
fig, ax = plt.subplots()
fig.set_size_inches(15, 8)
# Examine price vs category with the authentic apps
ax = sns.stripplot(x=apps_under_100['Price'], y=apps_under_100['Category'], data=apps_under_100,
jitter=True, linewidth=1)
ax.set_title('App pricing trend across categories after filtering for junk apps')
```
## 8. Popularity of paid apps vs free apps
<p>For apps in the Play Store today, there are five types of pricing strategies: free, freemium, paid, paymium, and subscription. Let's focus on free and paid apps only. Some characteristics of free apps are:</p>
<ul>
<li>Free to download.</li>
<li>Main source of income often comes from advertisements.</li>
<li>Often created by companies that have other products and the app serves as an extension of those products.</li>
<li>Can serve as a tool for customer retention, communication, and customer service.</li>
</ul>
<p>Some characteristics of paid apps are:</p>
<ul>
<li>Users are asked to pay once for the app to download and use it.</li>
<li>The user can't really get a feel for the app before buying it.</li>
</ul>
<p>Are paid apps installed as much as free apps? It turns out that paid apps have a relatively lower number of installs than free apps, though the difference is not as stark as I would have expected!</p>
```
trace0 = go.Box(
# Data for paid apps
y=apps[apps['Type'] == 'Paid']['Installs'],
name = 'Paid'
)
trace1 = go.Box(
# Data for free apps
y=apps[apps['Type'] == 'Free']['Installs'],
name = 'Free'
)
layout = go.Layout(
title = "Number of downloads of paid apps vs. free apps",
yaxis = dict(
type = 'log',
autorange = True
)
)
# Add trace0 and trace1 to a list for plotting
data = [trace0 , trace1]
plotly.offline.iplot({'data': data, 'layout': layout})
```
## 9. Sentiment analysis of user reviews
<p>Mining user review data to determine how people feel about your product, brand, or service can be done using a technique called sentiment analysis. User reviews for apps can be analyzed to identify if the mood is positive, negative or neutral about that app. For example, positive words in an app review might include words such as 'amazing', 'friendly', 'good', 'great', and 'love'. Negative words might be words like 'malware', 'hate', 'problem', 'refund', and 'incompetent'.</p>
<p>By plotting sentiment polarity scores of user reviews for paid and free apps, we observe that free apps receive a lot of harsh comments, as indicated by the outliers on the negative y-axis. Reviews for paid apps appear never to be extremely negative. This may indicate something about app quality, i.e., paid apps being of higher quality than free apps on average. The median polarity score for paid apps is a little higher than free apps, thereby syncing with our previous observation.</p>
<p>In this notebook, we analyzed over ten thousand apps from the Google Play Store. We can use our findings to inform our decisions should we ever wish to create an app ourselves.</p>
```
# Load user_reviews.csv
reviews_df = pd.read_csv('datasets/user_reviews.csv')
# Join and merge the two dataframe
merged_df = pd.merge(apps, reviews_df, on = 'App', how = "inner")
# Drop NA values from Sentiment and Translated_Review columns
merged_df = merged_df.dropna(subset=['Sentiment', 'Translated_Review'])
sns.set_style('ticks')
fig, ax = plt.subplots()
fig.set_size_inches(11, 8)
# User review sentiment polarity for paid vs. free apps
ax = sns.boxplot(x = merged_df['Type'], y = merged_df['Sentiment_Polarity'], data = merged_df)
ax.set_title('Sentiment Polarity Distribution')
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/ksetdekov/HSE_DS/blob/master/07%20NLP/kaggle%20hw/solution.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
# !pip3 install kaggle
from google.colab import files
files.upload()
!mkdir ~/.kaggle
!cp kaggle.json ~/.kaggle/
!chmod 600 ~/.kaggle/kaggle.json
!kaggle competitions download -c toxic-comments-classification-apdl-2021
!ls
import pandas as pd
import numpy as np
from sklearn.metrics import *
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
train = pd.read_csv('train_data.csv.zip', compression='zip')
test = pd.read_csv('test_data.csv.zip', compression='zip')
train.toxic.describe()
train.sample(5)
test.sample(5)
x_train, x_test, y_train, y_test = train_test_split(train.comment, train.toxic, random_state=0, stratify=train.toxic)
y_train.describe()
y_test.describe()
```
## Bag of words
```
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import CountVectorizer
from nltk import ngrams
vec = CountVectorizer(ngram_range=(1, 2)) # строим BoW для слов
bow = vec.fit_transform(x_train)
vec2 = CountVectorizer(ngram_range=(1, 2)) # строим BoW для слов
bow2 = vec2.fit_transform(train.comment)
list(vec2.vocabulary_.items())[:10]
bow.mean()
clf = LogisticRegression(random_state=0, max_iter=500, class_weight='balanced')
clf.fit(bow, y_train)
clf2 = LogisticRegression(random_state=0, max_iter=500, class_weight='balanced')
clf2.fit(bow2, train.toxic)
pred = clf.predict(vec.transform(x_test))
print(classification_report(pred, y_test))
test
bow_test_pred = test.copy()
bow_test_pred['toxic'] = clf.predict(vec.transform(test.comment))
bow_test_pred['toxic'] = bow_test_pred['toxic'].astype(int)
bow_test_pred.drop('comment', axis=1, inplace=True)
bow_test_pred
bow_test_pred2 = test.copy()
bow_test_pred2['toxic'] = clf2.predict(vec2.transform(test.comment))
bow_test_pred2['toxic'] = bow_test_pred2['toxic'].astype(int)
bow_test_pred2.drop('comment', axis=1, inplace=True)
bow_test_pred2
bow_test_pred.to_csv('bow_v1.csv', index=False)
bow_test_pred2.to_csv('bow_v2.csv', index=False)
confusion_matrix(bow_test_pred.toxic, bow_test_pred2.toxic)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f bow_v2.csv -m "kirill_setdekov first bow v2 submission all data"
```
## TF-IDF
```
from sklearn.feature_extraction.text import TfidfVectorizer
vec = TfidfVectorizer(ngram_range=(1, 1))
bow = vec.fit_transform(x_train)
clf2 = LogisticRegression(random_state=1, max_iter = 500)
clf2.fit(bow, y_train)
pred = clf2.predict(vec.transform(x_test))
print(classification_report(pred, y_test))
tf_idf = test.copy()
tf_idf['toxic'] = clf2.predict(vec.transform(test.comment))
tf_idf['toxic'] = tf_idf['toxic'].astype(int)
tf_idf.drop('comment', axis=1, inplace=True)
tf_idf
tf_idf.to_csv('tf_idf_v1.csv', index=False)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f tf_idf_v1.csv -m "kirill_setdekov tfidf v1 submission"
```
## Symbol n-Grams
```
vec = CountVectorizer(analyzer='char', ngram_range=(1, 5))
bowsimb = vec.fit_transform(x_train)
from sklearn.preprocessing import MaxAbsScaler
scaler = MaxAbsScaler()
scaler.fit(bowsimb)
bowsimb = scaler.transform(bowsimb)
clf3 = LogisticRegression(random_state=0, max_iter=1000)
clf3.fit(bowsimb, y_train)
pred = clf3.predict(scaler.transform(vec.transform(x_test)))
print(classification_report(pred, y_test))
importances = list(zip(vec.vocabulary_, clf.coef_[0]))
importances[0]
sorted_importances = sorted(importances, key = lambda x: -abs(x[1]))
sorted_importances[:20]
symbol_ngrams = test.copy()
symbol_ngrams['toxic'] = clf3.predict(scaler.
transform(vec.transform(test.comment)))
symbol_ngrams['toxic'] = tf_idf['toxic'].astype(int)
symbol_ngrams.drop('comment', axis=1, inplace=True)
symbol_ngrams
symbol_ngrams.to_csv('symbol_ngrams_v1.csv', index=False)
from sklearn.metrics import confusion_matrix
confusion_matrix(symbol_ngrams.toxic, tf_idf.toxic)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f symbol_ngrams_v1.csv -m "kirill_setdekov symbol_ngrams_v1 v1 submission"
```
#FastText
```
!pip3 install fasttext
import fasttext
with open('ft_train_data.txt', 'w') as f:
for pair in list(zip(x_train, y_train)):
text, label = pair
f.write(f'__label__{int(label)} {text.lower()}\n')
with open('ft_test_data.txt', 'w') as f:
for pair in list(zip(x_test, y_test)):
text, label = pair
f.write(f'__label__{int(label)} {text.lower()}\n')
with open('ft_all.txt', 'w') as f:
for pair in list(zip(train.comment, train.toxic)):
text, label = pair
f.write(f'__label__{int(label)} {text.lower()}\n')
classifier = fasttext.train_supervised('ft_train_data.txt')#, 'model')
result = classifier.test('ft_test_data.txt')
print('P@1:', result[1])#.precision)
print('R@1:', result[2])#.recall)
print('Number of examples:', result[0])#.nexamples)
classifier2 = fasttext.train_supervised('ft_all.txt')#, 'model')
k = 0
for item in [i.lower() for i in test.comment]:
item = item.replace("\n"," ")
k +=1
k
prediction = []
for item in [i.lower() for i in test.comment]:
item = item.replace("\n"," ")
prediction.append(classifier.predict(item))
prediction2 = []
for item in [i.lower() for i in test.comment]:
item = item.replace("\n"," ")
prediction2.append(classifier2.predict(item))
pred = [int(label[0][0].split('__')[2][0]) for label in prediction]
pred2 = [int(label[0][0].split('__')[2][0]) for label in prediction2]
fasttext_pred = test.copy()
fasttext_pred['toxic'] = pred
fasttext_pred.drop('comment', axis=1, inplace=True)
fasttext_pred
fasttext_pred2 = test.copy()
fasttext_pred2['toxic'] = pred2
fasttext_pred2.drop('comment', axis=1, inplace=True)
fasttext_pred2
confusion_matrix(symbol_ngrams.toxic, fasttext_pred.toxic)
confusion_matrix(fasttext_pred2.toxic, fasttext_pred.toxic)
fasttext_pred.to_csv('fasttext_pred_v1.csv', index=False)
fasttext_pred2.to_csv('fasttext_pred_v2.csv', index=False)
!kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f fasttext_pred_v2.csv -m "kirill_setdekov fasttext_pred v2 submission"
```
## CNN
```
from torchtext.legacy import data
pd.read_csv('train_data.csv.zip', compression='zip')
!unzip train_data.csv.zip
!unzip test_data.csv.zip
# классы Field и LabelField отвечают за то, как данные будут храниться и обрабатываться при считывании
TEXT = data.Field(tokenize='spacy') # spacy -- значит, токенизацию будет делать модуль
LABEL = data.LabelField()
ds = data.TabularDataset(
path='train_data.csv', format='csv',
skip_header=True,
fields=[('comment', TEXT),
('toxic', LABEL)]
)
pd.read_csv('test_data.csv')
test = data.TabularDataset(
path='test_data.csv', format='csv',
skip_header=True,
fields=[('id', TEXT), ('comment', TEXT)]
)
next(ds.comment)
next(ds.toxic)
TEXT.build_vocab(ds, max_size=25000, vectors="glove.6B.100d")
LABEL.build_vocab(ds)
TEXT.vocab.itos[:20]
len(TEXT.vocab.itos)
train, val = ds.split(split_ratio=0.9, stratified=True, strata_field='toxic') # дефолтное соотношение 0.7
print(len(train))
print(len(val))
print(len(test))
BATCH_SIZE = 64
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train, val, test),
batch_size=BATCH_SIZE,
sort=True,
sort_key=lambda x: len(x.comment), # сорируем тексты по длине, чтобы рядом оказывались предложения с одинаковой длиной и добавлялось меньше паддинга
repeat=False)
for i, batch in enumerate(valid_iterator):
print(batch.batch_size)
# pass
batch.fields
batch.batch_size
batch.comment
batch.toxic
len(batch.toxic)
import torch.nn as nn
class CNN(nn.Module):
def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, dropout_proba):
super().__init__()
self.embedding = nn.Embedding(vocab_size, embedding_dim)
self.conv_0 = nn.Conv2d(in_channels=1, out_channels=n_filters, kernel_size=(filter_sizes[0], embedding_dim))
self.conv_1 = nn.Conv2d(in_channels=1, out_channels=n_filters, kernel_size=(filter_sizes[1], embedding_dim))
self.conv_2 = nn.Conv2d(in_channels=1, out_channels=n_filters, kernel_size=(filter_sizes[2], embedding_dim))
self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)
self.dropout = nn.Dropout(dropout_proba)
def forward(self, x):
#x = [sent len, batch size]
# print(x.shape)
x = x.permute(1, 0)
#x = [batch size, sent len]
embedded = self.embedding(x)
#print(embedded.shape)
#embedded = [batch size, sent len, emb dim]
embedded = embedded.unsqueeze(1)
#embedded = [batch size, 1, sent len, emb dim]
conv_0 = self.conv_0(embedded)
#print(conv_0.shape)
conv_0 = conv_0.squeeze(3)
#print(conv_0.shape)
conved_0 = F.relu(conv_0)
conved_1 = F.relu(self.conv_1(embedded).squeeze(3))
conved_2 = F.relu(self.conv_2(embedded).squeeze(3))
#conv_n = [batch size, n_filters, sent len - filter_sizes[n]]
# print(conved_0.shape)
pool_0 = F.max_pool1d(conved_0, conved_0.shape[2])
# print(pool_0.shape)
pooled_0 = pool_0.squeeze(2)
# print(pooled_0.shape)
pooled_1 = F.max_pool1d(conved_1, conved_1.shape[2]).squeeze(2)
pooled_2 = F.max_pool1d(conved_2, conved_2.shape[2]).squeeze(2)
#pooled_n = [batch size, n_filters]
cat = self.dropout(torch.cat((pooled_0, pooled_1, pooled_2), dim=1))
#cat = [batch size, n_filters * len(filter_sizes)]
return self.fc(cat)
import torch.nn.functional as F
def binary_accuracy(preds, y):
rounded_preds = torch.round(F.sigmoid(preds))
correct = (rounded_preds == y).float()
acc = correct.sum() / len(correct)
return acc
def train_func(model, iterator, optimizer, criterion):
epoch_loss = 0
epoch_acc = 0
model.train()
for batch in iterator:
optimizer.zero_grad()
predictions = model(batch.comment.cuda()).squeeze(1)
loss = criterion(predictions.float(), batch.toxic.float().cuda())
acc = binary_accuracy(predictions.float(), batch.toxic.float().cuda())
loss.backward()
optimizer.step()
epoch_loss += loss
epoch_acc += acc
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate_func(model, iterator, criterion):
epoch_loss = 0
epoch_acc = 0
model.eval()
with torch.no_grad():
for batch in iterator:
predictions = model(batch.comment.cuda()).squeeze(1)
loss = criterion(predictions.float(), batch.toxic.float().cuda())
acc = binary_accuracy(predictions.float(), batch.toxic.float().cuda())
epoch_loss += loss
epoch_acc += acc
return epoch_loss / len(iterator), epoch_acc / len(iterator)
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100
N_FILTERS = 100
FILTER_SIZES = [2,3,4]
OUTPUT_DIM = 1
DROPOUT_PROBA = 0.5
model = CNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM, DROPOUT_PROBA)
INPUT_DIM
model
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
import torch.optim as optim
optimizer = optim.Adam(model.parameters()) # мы подали оптимизатору все параметры -- значит, эмбеддиги тоже будут дообучаться
criterion = nn.BCEWithLogitsLoss() # бинарная кросс-энтропия с логитами
model = model.cuda() # будем учить на gpu! =)
model.embedding
from torchsummary import summary
# summary(model, (14))
import torch
N_EPOCHS = 8
for epoch in range(N_EPOCHS):
train_loss, train_acc = train_func(model, train_iterator, optimizer, criterion)
valid_loss, valid_acc = evaluate_func(model, valid_iterator, criterion)
print(f'Epoch: {epoch+1:02}, Train Loss: {train_loss:.3f}, Train Acc: {train_acc*100:.2f}%, Val. Loss: {valid_loss:.3f}, Val. Acc: {valid_acc*100:.2f}%')
test.examples
model.eval()
cnn_res = []
with torch.no_grad():
for batch in test_iterator:
predictions = model(batch.comment.cuda())
cnn_res.append(predictions)
testout = pd.read_csv('test_data.csv.zip', compression='zip')
cnnpred = testout.copy()
cnnpred['toxic'] = [float(item) for sublist in cnn_res for item in sublist]
cnnpred.drop('comment', axis=1, inplace=True)
cnnpred
cnnpred['toxic'] = (cnnpred['toxic'] > 0).astype(int)
cnnpred
cnnpred.to_csv('cnnpred_v4.csv', index=False)
!kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f cnnpred_v4.csv -m "kirill_setdekov cnn v4 with threshold 0"
```
# word2vec
> not done, skip this model
```
! wget https://nlp.stanford.edu/data/glove.6B.zip
with open("alice.txt", 'r', encoding='utf-8') as f:
text = f.read()
text = re.sub('\n', ' ', text)
sents = sent_tokenize(text)
punct = '!"#$%&()*+,-./:;<=>?@[\]^_`{|}~„“«»†*—/\-‘’'
clean_sents = []
for sent in sents:
s = [w.lower().strip(punct) for w in sent.split()]
clean_sents.append(s)
print(clean_sents[:2])
model_path = "movie_reviews.model"
print("Saving model...")
model_en.save(model_path)
model = word2vec.Word2Vec.load(model_path)
model.build_vocab(clean_sents, update=True)
model.train(clean_sents, total_examples=model.corpus_count, epochs=5)
```
# bow on random forest

```
! pip install pymystem3
! pip install --force-reinstall pymorphy2
!pip install pymorphy2-dicts-ru
import pymorphy2
import re
morph = pymorphy2.MorphAnalyzer()
# убираем все небуквенные символы
regex = re.compile("[А-Яа-яA-z]+")
def words_only(text, regex=regex):
try:
return regex.findall(text.lower())
except:
return []
for i in train.comment[10].split():
lemmas = morph.parse(i)
print(lemmas[0])
from functools import lru_cache
@lru_cache(maxsize=128)
def lemmatize_word(token, pymorphy=morph):
return pymorphy.parse(token)[0].normal_form
def lemmatize_text(text):
return [lemmatize_word(w) for w in text]
tokens = words_only(train.comment[10])
print(lemmatize_text(tokens))
from nltk.corpus import stopwords
import nltk
nltk.download('stopwords')
mystopwords = stopwords.words('russian')
def remove_stopwords(lemmas, stopwords = mystopwords):
return [w for w in lemmas if not w in stopwords]
lemmas = lemmatize_text(tokens)
print(*remove_stopwords(lemmas))
def remove_stopwords(lemmas, stopwords = mystopwords):
return [w for w in lemmas if not w in stopwords and len(w) > 3]
print(*remove_stopwords(lemmas))
def clean_text(text):
tokens = words_only(text)
lemmas = lemmatize_text(tokens)
return remove_stopwords(lemmas)
for i in range(20):
print(* clean_text(train.comment[i]))
from tqdm.auto import trange
new_comments = []
for i in trange(len(train.comment), desc='loop'):
new_comments.append(" ".join(clean_text(train.comment[i])))
new_comments[:10]
vec3 = CountVectorizer(ngram_range=(1, 2)) # строим BoW для слов
bow3 = vec3.fit_transform(new_comments)
list(vec3.vocabulary_.items())[100:120]
bow3
clf3 = LogisticRegression(random_state=0, max_iter=500, class_weight='balanced')
clf3.fit(bow3, train.toxic)
pred = clf3.predict(bow3)
print(classification_report(pred, train.toxic))
test
new_commentstest = []
for i in trange(len(test.comment), desc='loop'):
new_commentstest.append(" ".join(clean_text(test.comment[i])))
bow_test_pred3 = test.copy()
bow_test_pred3['newcomment'] = new_commentstest
bow_test_pred3.tail()
bow_test_pred3['toxic'] = clf3.predict(vec3.transform(bow_test_pred3.newcomment))
bow_test_pred3['toxic'] = bow_test_pred3['toxic'].astype(int)
bow_test_pred3.drop('comment', axis=1, inplace=True)
bow_test_pred3.drop('newcomment', axis=1, inplace=True)
bow_test_pred3
confusion_matrix(bow_test_pred2.toxic, bow_test_pred3.toxic)
bow_test_pred3.to_csv('bow_v3.csv', index=False)
# !kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f bow_v3.csv -m "kirill_setdekov bow3 with preprocessing"
!pip install scikit-learn==0.24
from sklearn.ensemble import RandomForestClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import HalvingGridSearchCV
```
nor run -too slow
```
# rnd_reg = RandomForestClassifier( )
# # hyper-parameter space
# param_grid_RF = {
# 'n_estimators' : [10,20,50,100,200,500,1000],
# 'max_features' : [0.6,0.8,"auto","sqrt"],
# }
# search_two = HalvingGridSearchCV(rnd_reg, param_grid_RF, factor=5, scoring='accuracy',
# n_jobs=-1, random_state=0, verbose=2).fit(bow3, train.toxic)
# search_two.best_params_
rnd_reg_2 = RandomForestClassifier(n_estimators=1000, verbose=5, n_jobs=-1)
search_no = rnd_reg_2.fit(bow3, train.toxic)
bow_test_pred4 = test.copy()
bow_test_pred4['newcomment'] = new_commentstest
bow_test_pred4.tail()
bow_test_pred4['toxic'] = search_no.predict(vec3.transform(bow_test_pred4.newcomment))
bow_test_pred4['toxic'] = bow_test_pred4['toxic'].astype(int)
bow_test_pred4.drop('comment', axis=1, inplace=True)
bow_test_pred4.drop('newcomment', axis=1, inplace=True)
bow_test_pred4
confusion_matrix(bow_test_pred4.toxic, bow_test_pred3.toxic)
bow_test_pred4.to_csv('bow_v4.csv', index=False)
!kaggle competitions submit -c toxic-comments-classification-apdl-2021 -f bow_v4.csv -m "kirill_setdekov bow4 with preprocessing and RF"
```
|
github_jupyter
|
# Minimal end-to-end causal analysis with ```cause2e```
This notebook shows a minimal example of how ```cause2e``` can be used as a standalone package for end-to-end causal analysis. It illustrates how we can proceed in stringing together many causal techniques that have previously required fitting together various algorithms from separate sources with unclear interfaces. Additionally, the numerous techniques have been packed into only two easy-to-use functions for causal discovery and causal estimation. Hopefully, you will find this notebook helpful in guiding you through the process of setting up your own causal analyses for custom problems. The overall structure should always be the same regardless of the application domain. For more advanced features, check out the other notebooks.
### Imports
By the end of this notebook, you will probably be pleasantly surprised by the fact that we did not have to import lots of different packages to perform a full causal analysis consisting of different subtasks.
```
import os
from cause2e import path_mgr, knowledge, discovery
```
## Set up paths to data and output directories
This step is conveniently handled by the ```PathManager``` class, which avoids having to wrestle with paths throughout the multistep causal analysis. If we want to perform the analysis in a directory ```'dirname'``` that contains ```'dirname/data'``` and ```'dirname/output'``` as subdirectories, we can also use ```PathManagerQuick``` for an even easier setup. The ```experiment_name``` argument is used for generating output files with meaningful names, in case we want to study multiple scenarios (e.g. with varying model parameters).
For this analysis, we use the sprinkler dataset. Unfortunately, there are still some problems to be sorted out with categorical data in the estimation step, but continuous and discrete data work fine. Therefore, we use a version of the dataset where only the seasons ```'Spring'``` and ```'Summer'``` are present, such that we can replace these values by 0 and 1.
```
cwd = os.getcwd()
wd = os.path.dirname(cwd)
paths = path_mgr.PathManagerQuick(experiment_name='sprinkler',
data_name='sprinkler.csv',
directory=wd
)
```
## Learn the causal graph from data and domain knowledge
Model-based causal inference leverages qualitative knowledge about pairwise causal connections to obtain unbiased estimates of quantitative causal effects. The qualitative knowledge is encoded in the causal graph, so we must recover this graph before we can start actually estimating the desired effects. For learning the graph from data and domain knowledge, we use the ```StructureLearner``` class.
```
learner = discovery.StructureLearner(paths)
```
### Read the data
The ```StructureLearner``` has reading methods for csv and parquet files.
```
learner.read_csv(index_col=0)
```
The first step in the analysis should be an assessment of which variables we are dealing with. In the sprinkler dataset, each sample tells us
- the current season
- whether it is raining
- whether our lawn sprinkler is activated
- whether our lawn is slippery
- whether our lawn is wet.
```
learner.variables
```
### Preprocess the data
As mentioned above, currently there are problems in the estimation step with categorical data, so we use this occasion to showcase ```cause2e```'s built-in preprocessing functionalities. We define a function that replaces instances of ```'Summer'``` by 1, and instances of ```'Spring'``` by 0. Afterwards we apply it to our data and throw out the categorical ```'Season'``` column. For more preprocessing options, check out the pertaining notebook.
```
def is_summer(data, col_name):
return (data[col_name] == 'Summer').apply(int)
learner.combine_variables(name='Season_binary', func=is_summer, input_cols=['Season'], keep_old=False)
```
It necessary to communicate to the ```StructureLearner``` if the variables are discrete, continuous, or both. We check how many unique values each variable takes on in our sample and deduce that all variables are discrete.
```
learner.data.nunique()
```
This information is passed to the ```StructureLearner``` by indicating the exact sets of discrete and continuous variables.
```
learner.discrete = learner.variables
learner.continuous = set()
```
### Provide domain knowledge
Humans can often infer parts of the causal graph from domain knowledge. The nodes are always just the variables in the data, so the problem of finding the right graph comes down to selecting the right edges between them.
As a reminder: The correct causal graph has an edge from variable A to variable B if and only if variable A directly influences variable B (changing the value of variable A changes the value of variable B if we keep all other variables fixed).
There are three ways of passing domain knowledge for the graph search:
- Indicate which edges must be present in the causal graph.
- Indicate which edges must not be present in the causal graph.
- Indicate a temporal order in which the variables have been created. This is then used to generate forbidden edges, since the future can never influence the past.
In this example, we use the ```knowledge.EdgeCreator``` to prescribe that
- no variables are direct causes of the season,
- the lawn being slippery is not a direct cause of any other variable
- turning the sprinkler on or off directly affects the wetness of the lawn,
- turning the sprinkler on or off does not directly affect the weather.
```
edge_creator = knowledge.EdgeCreator()
edge_creator.forbid_edges_from_groups({'Season_binary'}, incoming=learner.variables)
edge_creator.forbid_edges_from_groups({'Slippery'}, outgoing=learner.variables)
edge_creator.require_edge('Sprinkler', 'Wet')
edge_creator.forbid_edge('Sprinkler', 'Rain')
```
There is a fourth way of passing knowledge which is not used in learning the graph, but in validating the quantitative estimates resulting from our end-to-end causal analysis. We often know beforehand what some of the quantitative effects should look like, e.g.
- turning the sprinkler on should have a positive overall effect (-> average treatment effect; read below if you are not familiar with types of causal effects) on the lawn being wet and
- making the lawn wet should have a positive overall effect on the lawn being slippery.
Instead of checking manually at the end if our expectations have been met, we can automate this validation by using the ```knowledge.ValidationCreator```. For instructiveness, we also add two more validations that should fail:
- the sprinkler has a negative natural direct effect on the weather and
- the natural indirect effect of the lawn being slippery on the season is between 0.2 and 0.4 (remember to normalize your data before such a validation if they are not measured on the same scale).
```
validation_creator = knowledge.ValidationCreator()
validation_creator.add_expected_effect(('Sprinkler', 'Wet', 'nonparametric-ate'), ('greater', 0))
validation_creator.add_expected_effect(('Wet', 'Slippery', 'nonparametric-ate'), ('greater', 0))
validation_creator.add_expected_effect(('Sprinkler', 'Rain', 'nonparametric-nde'), ('less', 0))
validation_creator.add_expected_effect(('Slippery', 'Season_binary', 'nonparametric-nie'), ('between', 0.2, 0.4))
```
We pass the knowledge to the ```StructureLearner``` and check if it has been correctly received.
```
learner.set_knowledge(edge_creator=edge_creator, validation_creator=validation_creator)
```
### Apply a structure learning algorithm
Now that the ```StructureLearner``` has received the data and the domain knowledge, we can try to recover the original graph using causal discovery methods provided by the internally called ```py-causal``` package. There are many parameters that can be tuned (choice of algorithm, search score, independence test, hyperparameters, ...) and we can get an overview by calling some informative methods of the learner. Reasonable default arguments are provided (FGES with CG-BIC score for possibly mixed datatypes and respecting domain knowledge), so we use these for our minimal example.
```
learner.run_quick_search()
```
The output of the search is a proposed causal graph. We can ignore the warning about stopping the Java Virtual Machine (needed by ```py-causal``` which is a wrapper around the ```TETRAD``` software that is written in Java) if we do not run into any problems. If the algorithm cannot orient all edges, we need to do this manually. Therefore, the output includes a list of all undirected edges, so we do not miss them in complicated graphs with many variables and edges. In our case, all the edges are already oriented.
The result seems reasonable:
- The weather depends on the season.
- The sprinkler use also depends on the season.
- The lawn will be wet if it rains or if the sprinkler is activated.
- The lawn will be slippery if it is wet.
### Saving the graph
```Cause2e``` allows us to save the result of our search to different file formats with the ```StructureLearner.save_graphs``` method. The name of the file is determined by the ```experiment_name``` parameter from the ```PathManager```. If the result of the graph search is already a directed acyclic graph that respects our domain knowledge, the graph is automatically saved, as we can see from the above output. Check out the graph postprocessing notebook for information on how to proceed when the result of the search needs further adjustments.
## Estimate causal effects from the graph and the data
After we have successfully recovered the causal graph from data and domain knowledge, we can use it to estimate quantitative causal effects between the variables in the graph. It is pleasant that we can use the same graph and data to estimate multiple causal effects, e.g. the one that the Sprinkler has on the lawn being slippery, as well as the one that the season has on the rain probability, without having to repeat the previous steps. Once we have managed to qualitatively model the data generating process, we are already in a very good position. The remaining challenges can be tackled with the core functionality from the ```DoWhy``` package, which we have wrapped into a single easy-to-use convenience method. Usually, all estimation topics are handled by the ```estimator.Estimator```, but the ```StructureLearner``` has the possibility to run a quick analysis of all causal effects with preset parameters. For more detailed analyses, check out the other notebooks that describe the causal identification and estimation process step by step.
```
learner.run_all_quick_analyses()
```
The output consists of a detailed analysis of the causal effects in our system.
### Heatmaps
The first three images are heatmaps, where the (i, j)-entry shows the causal effect of variable i on variable j. The three heatmaps differ in the type of causal effect that they are describing:
- **Average Treatment Effect (ATE)**: Shows how the outcome variable varies if we vary the treatment variable. This comprises direct and indirect effects. The sprinkler influences the lawn being slippery, even if this does not happen directly, but via its influence on the lawn being wet.
- **Natural Direct Effect (NDE)**: Shows how the outcome variable varies if we vary the treatment variable and keep all other variables fixed. This comprises only direct effects. The sprinkler does not directly influence the lawn being slippery, as we can read off from the heatmap.
- **Natural Indirect Effect (NIE)**: Shows the difference between ATE and NDE. By definition, this comprises only indirect effects. The sprinkler has a strong indirect influence on the lawn being slippery, as we can read off from the heatmap.
In our example, we can easily identify from the graph if an effect is direct or indirect, but in examples where a variable simultaneously has a direct and an indirect influence on another variable, it is very challenging to separate the effects without resorting to the algebraic methods that ```cause2e``` uses internally.
### Validations
The next output shows if our model has passed each of the validations, based on the expected causal effects that we have communicated before running the causal end-to-end analysis. If we are interested in a specific effect, say, the effect of the sprinkler on the lawn being slippery, the estimation of this effect by our learnt causal model can be trusted more if the estimation for other effects match our expectations. We see that the results of the validations turned out exactly as described above (in practice we would not want validations to fail, this was only for demonstrative purposes).
### Numeric tables
The three numeric tables show the same information as the three previous heatmaps, only in quantitative instead of visual form.
### PDF report
```Cause2e``` automatically generates a pdf report that contains
- the causal graph indicating all qualitative relationships,
- the three heatmaps visualizing all quantitative causal effects,
- the results of the validations,
- the three numeric tables reporting all quantitative causal effects.
This is helpful if we want to communicate our findings to other people, or if we want to modify the analysis at a later time and compare the outcome for both methods.
## Discussion of the results
The heatmaps show the effects that we would expect given our causal graph:
- There is less rain in summer than in spring.
- Sprinklers are more often turned on in summer than in spring.
- Rain increases the wetness of the lawn.
- Turning the sprinkler on also increases the wetness of the lawn.
- Wetting the lawn causes it to be slippery.
It is interesting to see that the first two effects roughly cancel each other out, resulting in a small ATE of 0.1 that ```'Season_binary'``` has on ```'Slippery'``` and ```'Wet'```. In general, it is a good strategy to look at the heatmaps for discovering the qualitative nature of the different causal effects and then inspect the numeric tables for the exact numbers if needed.
Another noteworthy entry is the overall effect of ```'Sprinkler'``` on ```'Wet'```. The result is 0.638, so turning on the sprinkler makes it more likely for the lawn to be wet, as it should be. However, we might ask ourselves: "Why is the effect not 1? Whenever we turn on the sprinkler, the lawn will be wet!" This can be explained by looking at the definition of our chosen effect type, the nonparametric average treatment effect (ATE): The ATE tells us how much (on average) we change the outcome by changing the treatment. In our case, we can distinguish between two possible scenarios:
If it is raining, then the lawn is wet anyway, so turning the sprinkler on does not change the outcome at all. Only if it is not raining, the lawn state is changed to wet by turning on the sprinkler.
We can convince ourselves that this is the correct explanation by looking at the proportion of samples where it is not raining.
```
1 - sum(learner.data['Rain']) / len(learner.data)
```
We recover the same number of 0.638. Additionally, we can change our data to consist only of the instances where it is not raining. If we now repeat the causal analysis, the effect is indeed 1 (skip after the warnings that are caused by the now degenerate dataset). This procedure can be generalized to analyzing other conditional causal effects.
```
learner.data = learner.data[learner.data['Rain']==0]
learner.run_all_quick_analyses()
```
|
github_jupyter
|
# Training Keyword Spotting
This notebook builds on the Colab in which we used the pre-trained [micro_speech](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/lite/micro/examples/micro_speech) example as well as the HarvardX [3_5_18_TrainingKeywordSpotting.ipynb](https://github.com/tinyMLx/colabs) and [4_5_16_KWS_PretrainedModel](https://github.com/tinyMLx/colabs) from the [TinyML Specialization on edX](https://www.edx.org/professional-certificate/harvardx-tiny-machine-learning).
# Setup
<font color='red'>**This Notebook only works on Tensorflow 1.15 and was tested with Tensorflow 1.15.5**</font>
### Prerequisites
Clone the TensorFlow Github Repository with the relevant base code.
```
%%bash
rm -rf tensorflow log v2.4.1.zip logs models train dataset extract_loudest_section
apt-get update -qq && apt-get install -y wget unzip
wget https://github.com/tensorflow/tensorflow/archive/v2.4.1.zip
unzip v2.4.1.zip &> log
mv tensorflow-2.4.1/ tensorflow/
rm -rf v2.4.1.zip log
```
### Import Packages
Import standard packages as well as the additional packages from the cloned Github Repo.
```
import tensorflow as tf
import sys
# We add this path so we can import the speech processing modules.
sys.path.append("./tensorflow/tensorflow/examples/speech_commands/")
import input_data
import models
import numpy as np
import pickle
import shutil
import os
```
### Check GPU availability
The code will also work without GPU acceleration, but it will be significantly slower.
```
tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None)
```
### Configure Your Model!
Select your keywords and model settings with which to train!
**This is where you need to make choices and input data!**
```WANTED_WORDS``` = A comma-delimited string of the words you want to train for (e.g., "yes,no"). All the other words you do not select will be used to train an "unknown" label so that the model does not just recognize speech but your specific words. Audio data with no spoken words will be used to train a "silence" label. We suggest picking 2-4 words for best results.
Options for target words are (PICK FROM THIS LIST FOR BEST RESULTS): "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "yes", "no", "up", "down", "left", "right", "on", "off", "stop", "go", “backward”, “forward”, “follow”, “learn”,
Additional words that will be used to help train the "unkown" label are: "bed", "bird", "cat", "dog", "happy", "house", "marvin", "sheila", "tree", "wow"
```
WANTED_WORDS = "stop,go"
```
The number of training steps and learning rates can be specified as comma-separated strings to define the amount/rate at each stage. For example, ```TRAINING_STEPS="12000,3000"``` and ```LEARNING_RATE="0.001,0.0001"``` will run 12,000 training steps with a rate of 0.001 followed by 3,000 final steps with a learning rate of 0.0001. These are good default values to work off of when you choose your values as the course staff has gotten this to work well with those values in the past!
```
TRAINING_STEPS = "12000,3000"
LEARNING_RATE = "0.001,0.0001"
```
We suggest you leave the ```MODEL_ARCHITECTURE``` as tiny_conv the first time but if you would like to do this again and explore additional models some options are: ```single_fc, conv, low_latency_conv, low_latency_svdf, tiny_embedding_conv```
```
MODEL_ARCHITECTURE = 'tiny_conv'
# Calculate the total number of steps, which is used to identify the checkpoint
# file name.
TOTAL_STEPS = str(sum(map(lambda string: int(string), TRAINING_STEPS.split(","))))
# Print the configuration to confirm it
print("Training these words: %s" % WANTED_WORDS)
print("Training steps in each stage: %s" % TRAINING_STEPS)
print("Learning rate in each stage: %s" % LEARNING_RATE)
print("Total number of training steps: %s" % TOTAL_STEPS)
```
**DO NOT MODIFY** the following constants as they include filepaths used in this notebook and data that is shared during training and inference.
```
# Calculate the percentage of 'silence' and 'unknown' training samples required
# to ensure that we have equal number of samples for each label.
number_of_labels = WANTED_WORDS.count(',') + 1
number_of_total_labels = number_of_labels + 2 # for 'silence' and 'unknown' label
equal_percentage_of_training_samples = int(100.0/(number_of_total_labels))
SILENT_PERCENTAGE = equal_percentage_of_training_samples
UNKNOWN_PERCENTAGE = equal_percentage_of_training_samples
# Constants which are shared during training and inference
PREPROCESS = 'micro'
WINDOW_STRIDE = 20
# Constants used during training only
VERBOSITY = 'DEBUG'
EVAL_STEP_INTERVAL = '1000'
SAVE_STEP_INTERVAL = '1000'
# Constants for training directories and filepaths
DATASET_DIR = 'dataset/'
LOGS_DIR = 'logs/'
TRAIN_DIR = 'train/' # for training checkpoints and other files.
# Constants for inference directories and filepaths
import os
MODELS_DIR = 'models'
if not os.path.exists(MODELS_DIR):
os.mkdir(MODELS_DIR)
MODEL_TF = os.path.join(MODELS_DIR, 'model.pb')
MODEL_TFLITE = os.path.join(MODELS_DIR, 'model.tflite')
FLOAT_MODEL_TFLITE = os.path.join(MODELS_DIR, 'float_model.tflite')
MODEL_TFLITE_MICRO = os.path.join(MODELS_DIR, 'model.cc')
SAVED_MODEL = os.path.join(MODELS_DIR, 'saved_model')
# Constants for Quantization
QUANT_INPUT_MIN = 0.0
QUANT_INPUT_MAX = 26.0
QUANT_INPUT_RANGE = QUANT_INPUT_MAX - QUANT_INPUT_MIN
# Constants for audio process during Quantization and Evaluation
SAMPLE_RATE = 16000
CLIP_DURATION_MS = 1000
WINDOW_SIZE_MS = 30.0
FEATURE_BIN_COUNT = 40
BACKGROUND_FREQUENCY = 0.8
BACKGROUND_VOLUME_RANGE = 0.1
TIME_SHIFT_MS = 100.0
# URL for the dataset and train/val/test split
DATA_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz'
VALIDATION_PERCENTAGE = 10
TESTING_PERCENTAGE = 10
# Calculate the correct flattened input data shape for later use in model conversion
# since the model takes a flattened version of the spectrogram. The shape is number of
# overlapping windows times the number of frequency bins. For the default settings we have
# 40 bins (as set above) times 49 windows (as calculated below) so the shape is (1,1960)
def window_counter(total_samples, window_size, stride):
'''helper function to count the number of full-length overlapping windows'''
window_count = 0
sample_index = 0
while True:
window = range(sample_index,sample_index+stride)
if window.stop < total_samples:
window_count += 1
else:
break
sample_index += stride
return window_count
OVERLAPPING_WINDOWS = window_counter(CLIP_DURATION_MS, int(WINDOW_SIZE_MS), WINDOW_STRIDE)
FLATTENED_SPECTROGRAM_SHAPE = (1, OVERLAPPING_WINDOWS * FEATURE_BIN_COUNT)
```
# Train the model
### Load in TensorBoard to visulaize the training process.
As training progresses you should see the training status show up in the Tensorboard area. If this works it is very helpful for analyzing your training progress. Unfortunately, the staff has found that it sometimes doesn't start showing data for a while (~15 minutes) and sometimes doesn't show data until training completes (and instead shows ```No dashboards are active for the current data set```.). If it is working and then stops updating look to the top of the cell and click reconnect.
```
%load_ext tensorboard
logs_base_dir='./logs/'
os.makedirs(logs_base_dir, exist_ok=True)
%tensorboard --logdir {logs_base_dir} --host 0.0.0.0 --port 6006
```
### Launch Training
If you would like to get more information on the training script you can find the source code for the script [here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/speech_commands/train.py). In short it sets up the optimizer and preprocessor based on all of the flags we pass in!
Finally, by setting the ```VERBOSITY = 'DEBUG'``` above be aware that the training cell will print A LOT of information. Specifically you will get the accuracy and loss at each step as well as a confusion matrix every 1000 steps. We hope that is helpful in case TensorBoard fails to work. If you would like to run with less printouts you can change the setting to ```WARN``` or ```FATAL```. You will find this in the "Configure Your Model!" section.
```
!python tensorflow/tensorflow/examples/speech_commands/train.py \
--data_dir={DATASET_DIR} \
--wanted_words={WANTED_WORDS} \
--silence_percentage={SILENT_PERCENTAGE} \
--unknown_percentage={UNKNOWN_PERCENTAGE} \
--preprocess={PREPROCESS} \
--window_stride={WINDOW_STRIDE} \
--model_architecture={MODEL_ARCHITECTURE} \
--how_many_training_steps={TRAINING_STEPS} \
--learning_rate={LEARNING_RATE} \
--train_dir={TRAIN_DIR} \
--summaries_dir={LOGS_DIR} \
--verbosity={VERBOSITY} \
--eval_step_interval={EVAL_STEP_INTERVAL} \
--save_step_interval={SAVE_STEP_INTERVAL}
```
# Generating your Model
Just like with the pre-trained model we will now take the final checkpoint and convert it into a quantized TensorFlow Lite model.
### Generate a TensorFlow Model for Inference
Combine relevant training results (graph, weights, etc) into a single file for inference. This process is known as freezing a model and the resulting model is known as a frozen model/graph, as it cannot be further re-trained after this process.
```
!rm -rf {SAVED_MODEL}
!python tensorflow/tensorflow/examples/speech_commands/freeze.py \
--wanted_words=$WANTED_WORDS \
--window_stride_ms=$WINDOW_STRIDE \
--preprocess=$PREPROCESS \
--model_architecture=$MODEL_ARCHITECTURE \
--start_checkpoint=$TRAIN_DIR$MODEL_ARCHITECTURE'.ckpt-'{TOTAL_STEPS} \
--save_format=saved_model \
--output_file={SAVED_MODEL}
```
### Generate a TensorFlow Lite Model
Convert the frozen graph into a TensorFlow Lite model, which is fully quantized for use with embedded devices.
The following cell will also print the model size, which will be under 20 kilobytes.
We download the dataset to use as a representative dataset for more thoughtful post training quantization.
```
model_settings = models.prepare_model_settings(
len(input_data.prepare_words_list(WANTED_WORDS.split(','))),
SAMPLE_RATE, CLIP_DURATION_MS, WINDOW_SIZE_MS,
WINDOW_STRIDE, FEATURE_BIN_COUNT, PREPROCESS)
audio_processor = input_data.AudioProcessor(
DATA_URL, DATASET_DIR,
SILENT_PERCENTAGE, UNKNOWN_PERCENTAGE,
WANTED_WORDS.split(','), VALIDATION_PERCENTAGE,
TESTING_PERCENTAGE, model_settings, LOGS_DIR)
with tf.Session() as sess:
float_converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)
float_tflite_model = float_converter.convert()
float_tflite_model_size = open(FLOAT_MODEL_TFLITE, "wb").write(float_tflite_model)
print("Float model is %d bytes" % float_tflite_model_size)
converter = tf.lite.TFLiteConverter.from_saved_model(SAVED_MODEL)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.inference_input_type = tf.lite.constants.INT8
converter.inference_output_type = tf.lite.constants.INT8
def representative_dataset_gen():
for i in range(100):
data, _ = audio_processor.get_data(1, i*1, model_settings,
BACKGROUND_FREQUENCY,
BACKGROUND_VOLUME_RANGE,
TIME_SHIFT_MS,
'testing',
sess)
flattened_data = np.array(data.flatten(), dtype=np.float32).reshape(FLATTENED_SPECTROGRAM_SHAPE)
yield [flattened_data]
converter.representative_dataset = representative_dataset_gen
tflite_model = converter.convert()
tflite_model_size = open(MODEL_TFLITE, "wb").write(tflite_model)
print("Quantized model is %d bytes" % tflite_model_size)
```
### Testing the accuracy after Quantization
Verify that the model we've exported is still accurate, using the TF Lite Python API and our test set.
```
# Helper function to run inference
def run_tflite_inference_testSet(tflite_model_path, model_type="Float"):
#
# Load test data
#
np.random.seed(0) # set random seed for reproducible test results.
with tf.Session() as sess:
test_data, test_labels = audio_processor.get_data(
-1, 0, model_settings, BACKGROUND_FREQUENCY, BACKGROUND_VOLUME_RANGE,
TIME_SHIFT_MS, 'testing', sess)
test_data = np.expand_dims(test_data, axis=1).astype(np.float32)
#
# Initialize the interpreter
#
interpreter = tf.lite.Interpreter(tflite_model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
#
# For quantized models, manually quantize the input data from float to integer
#
if model_type == "Quantized":
input_scale, input_zero_point = input_details["quantization"]
test_data = test_data / input_scale + input_zero_point
test_data = test_data.astype(input_details["dtype"])
#
# Evaluate the predictions
#
correct_predictions = 0
for i in range(len(test_data)):
interpreter.set_tensor(input_details["index"], test_data[i])
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
top_prediction = output.argmax()
correct_predictions += (top_prediction == test_labels[i])
print('%s model accuracy is %f%% (Number of test samples=%d)' % (
model_type, (correct_predictions * 100) / len(test_data), len(test_data)))
# Compute float model accuracy
run_tflite_inference_testSet(FLOAT_MODEL_TFLITE)
# Compute quantized model accuracy
run_tflite_inference_testSet(MODEL_TFLITE, model_type='Quantized')
```
# Testing the model with your own data!
Now comes the fun part. It's time to test your model with your own realworld data. We'll proceed in the same way we tested the pre-trained model. Have fun!
### Importing packages
```
!apt-get update -qqq && apt-get -y -qqq install apt-utils gcc libpq-dev libsndfile-dev git
!python3 -m pip install --upgrade --no-cache-dir --quiet pip ffmpeg-python scipy librosa google-colab
from IPython.display import HTML, Audio
from google.colab.output import eval_js
from base64 import b64decode
import numpy as np
from scipy.io.wavfile import read as wav_read
import io
import ffmpeg
#!pip install librosa
import librosa
import scipy.io.wavfile
!git clone https://github.com/petewarden/extract_loudest_section.git
!make -C extract_loudest_section/
print("Packages Imported, Extract_Loudest_Section Built")
```
### Define the helper function to run inference
```
# Helper function to run inference (on a single input this time)
# Note: this also includes additional manual pre-processing
TF_SESS = tf.compat.v1.InteractiveSession()
def run_tflite_inference_singleFile(tflite_model_path, custom_audio, sr_custom_audio, model_type="Float"):
#
# Preprocess the sample to get the features we pass to the model
#
# First re-sample to the needed rate (and convert to mono if needed)
custom_audio_resampled = librosa.resample(librosa.to_mono(np.float64(custom_audio)), sr_custom_audio, SAMPLE_RATE)
# Then extract the loudest one second
scipy.io.wavfile.write('custom_audio.wav', SAMPLE_RATE, np.int16(custom_audio_resampled))
!/tmp/extract_loudest_section/gen/bin/extract_loudest_section custom_audio.wav ./trimmed
# Finally pass it through the TFLiteMicro preprocessor to produce the
# spectrogram/MFCC input that the model expects
custom_model_settings = models.prepare_model_settings(
0, SAMPLE_RATE, CLIP_DURATION_MS, WINDOW_SIZE_MS,
WINDOW_STRIDE, FEATURE_BIN_COUNT, PREPROCESS)
custom_audio_processor = input_data.AudioProcessor(None, None, 0, 0, '', 0, 0,
model_settings, None)
custom_audio_preprocessed = custom_audio_processor.get_features_for_wav(
'trimmed/custom_audio.wav', model_settings, TF_SESS)
# Reshape the output into a 1,1960 matrix as that is what the model expects
custom_audio_input = custom_audio_preprocessed[0].flatten()
test_data = np.reshape(custom_audio_input,(1,len(custom_audio_input)))
#
# Initialize the interpreter
#
interpreter = tf.lite.Interpreter(tflite_model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()[0]
output_details = interpreter.get_output_details()[0]
#
# For quantized models, manually quantize the input data from float to integer
#
if model_type == "Quantized":
input_scale, input_zero_point = input_details["quantization"]
test_data = test_data / input_scale + input_zero_point
test_data = test_data.astype(input_details["dtype"])
#
# Run the interpreter
#
interpreter.set_tensor(input_details["index"], test_data)
interpreter.invoke()
output = interpreter.get_tensor(output_details["index"])[0]
top_prediction = output.argmax()
#
# Translate the output
#
top_prediction_str = ''
if top_prediction >= 2:
top_prediction_str = WANTED_WORDS.split(',')[top_prediction-2]
elif top_prediction == 0:
top_prediction_str = 'silence'
else:
top_prediction_str = 'unknown'
print('%s model guessed the value to be %s' % (model_type, top_prediction_str))
```
### Define the audio importing function
Adapted from: https://ricardodeazambuja.com/deep_learning/2019/03/09/audio_and_video_google_colab/ and https://colab.research.google.com/drive/1Z6VIRZ_sX314hyev3Gm5gBqvm1wQVo-a#scrollTo=RtMcXr3o6gxN
```
def get_audio():
"""Records audio from your local microphone inside a colab notebook
Returns
-------
tuple
audio (numpy.ndarray), sample rate (int)
Obs:
To write this piece of code I took inspiration/code from a lot of places.
It was late night, so I'm not sure how much I created or just copied o.O
Here are some of the possible references:
https://blog.addpipe.com/recording-audio-in-the-browser-using-pure-html5-and-minimal-javascript/
https://stackoverflow.com/a/18650249
https://hacks.mozilla.org/2014/06/easy-audio-capture-with-the-mediarecorder-api/
https://air.ghost.io/recording-to-an-audio-file-using-html5-and-js/
https://stackoverflow.com/a/49019356
"""
AUDIO_HTML = """
<script>
var my_div = document.createElement("DIV");
var my_p = document.createElement("P");
var my_btn = document.createElement("BUTTON");
var t = document.createTextNode("Press to start recording");
my_btn.appendChild(t);
//my_p.appendChild(my_btn);
my_div.appendChild(my_btn);
document.body.appendChild(my_div);
var base64data = 0;
var reader;
var recorder, gumStream;
var recordButton = my_btn;
var handleSuccess = function(stream) {
gumStream = stream;
var options = {
//bitsPerSecond: 8000, //chrome seems to ignore, always 48k
mimeType : 'audio/webm;codecs=opus'
//mimeType : 'audio/webm;codecs=pcm'
};
//recorder = new MediaRecorder(stream, options);
recorder = new MediaRecorder(stream);
recorder.ondataavailable = function(e) {
var url = URL.createObjectURL(e.data);
var preview = document.createElement('audio');
preview.controls = true;
preview.src = url;
document.body.appendChild(preview);
reader = new FileReader();
reader.readAsDataURL(e.data);
reader.onloadend = function() {
base64data = reader.result;
//console.log("Inside FileReader:" + base64data);
}
};
recorder.start();
};
recordButton.innerText = "Recording... press to stop";
navigator.mediaDevices.getUserMedia({audio: true}).then(handleSuccess);
function toggleRecording() {
if (recorder && recorder.state == "recording") {
recorder.stop();
gumStream.getAudioTracks()[0].stop();
recordButton.innerText = "Saving the recording... pls wait!"
}
}
// https://stackoverflow.com/a/951057
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}
var data = new Promise(resolve=>{
//recordButton.addEventListener("click", toggleRecording);
recordButton.onclick = ()=>{
toggleRecording()
sleep(2000).then(() => {
// wait 2000ms for the data to be available...
// ideally this should use something like await...
//console.log("Inside data:" + base64data)
resolve(base64data.toString())
});
}
});
</script>
"""
display(HTML(AUDIO_HTML))
data = eval_js("data")
binary = b64decode(data.split(',')[1])
process = (ffmpeg
.input('pipe:0')
.output('pipe:1', format='wav')
.run_async(pipe_stdin=True, pipe_stdout=True, pipe_stderr=True, quiet=True, overwrite_output=True)
)
output, err = process.communicate(input=binary)
riff_chunk_size = len(output) - 8
# Break up the chunk size into four bytes, held in b.
q = riff_chunk_size
b = []
for i in range(4):
q, r = divmod(q, 256)
b.append(r)
# Replace bytes 4:8 in proc.stdout with the actual size of the RIFF chunk.
riff = output[:4] + bytes(b) + output[8:]
sr, audio = wav_read(BytesIO(riff))
return audio, sr
```
### Record your own audio and test the model!
After you run the record cell wait for the stop button to appear then start recording and then press the button to stop the recording once you have said the word!
```
custom_audio, sr_custom_audio = get_audio()
print("DONE")
# Then test the model
run_tflite_inference_singleFile(MODEL_TFLITE, custom_audio, sr_custom_audio, model_type="Quantized")
```
### Generate a TensorFlow Lite for Microcontrollers Model
To convert the TensorFlow Lite quantized model into a C source file that can be loaded by TensorFlow Lite for Microcontrollers on Arduino we simply need to use the ```xxd``` tool to convert the ```.tflite``` file into a ```.cc``` file.
```
!apt-get update -qqq && apt-get -qqq install xxd
MODEL_TFLITE = './models/model.tflite'
MODEL_TFLITE_MICRO = './models/model.cc'
!xxd -i {MODEL_TFLITE} > {MODEL_TFLITE_MICRO}
REPLACE_TEXT = MODEL_TFLITE.replace('/', '_').replace('.', '_')
!sed -i 's/'{REPLACE_TEXT}'/g_model/g' {MODEL_TFLITE_MICRO}
```
The generated Tensorflow Lite for Microcontroller model can now be used in the Arduino IDE. There are **two options** to do this:
1. Copy the screen output directly from the Jupyter Notebook into the **micro_features_model.cpp** file (in the Arduino IDE)
2. Download the **model.cc** file for later use to copy its content into the **micro_features_model.cpp** file (in the Arduino IDE)
### Option 1: Copy Output directly
```
!cat {MODEL_TFLITE_MICRO}
```
### Option 2: Download Model File
```
from IPython.display import FileLink
local_file = FileLink('./models/model.cc', result_html_prefix="Click here to download: ")
display(local_file)
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import seaborn as sns
%matplotlib inline
cc = pd.read_csv('./posts_ccompare_raw.csv', index_col=0, encoding='utf-8')
cc['Timestamp'] = pd.to_datetime(cc['Timestamp'])
```
# Reaction features
```
features_reactions = pd.DataFrame(index=cc.index)
features_reactions['n_up'] = cc['Actions.Agree.Total']
features_reactions['n_down'] = cc['Actions.Disagree.Total']
features_reactions['n_reply'] = cc['Actions.Comment.Total']
sns.pairplot(features_reactions)
```
# Post date features
```
features_date = pd.DataFrame(index=cc.index)
features_date['t_week'] = cc.Timestamp.dt.week
features_date['t_dow'] = cc.Timestamp.dt.dayofweek
features_date['t_hour'] = cc.Timestamp.dt.hour
features_date['t_day'] = cc.Timestamp.dt.day
sns.pairplot(features_date)
```
# Spacy NLP ...
```
import spacy # See "Installing spaCy"
nlp = spacy.load('en') # You are here.
spacy_docs = pd.DataFrame(index=cc.index)
docs = cc.Body.apply(nlp)
vec = docs.apply(lambda x: x.vector)
feature_word_vec = pd.DataFrame(vec.tolist(), columns=['spacy_%s'%i for i in range(300)])
feature_word_vec['spacy_sent'] = docs.apply(lambda x: x.sentiment)
# tfidf
'''
Author: Giovanni Kastanja
Python: 3.6.0
Date: 24/6/2017
'''
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import cross_val_score
from scipy.sparse import csr_matrix
text = cc['Body']
# create a stopset (words that occur to many times)
stopset = set(stopwords.words('english'))
vectorizer = TfidfVectorizer(use_idf=True, lowercase=True, strip_accents='ascii', stop_words=stopset)
features_tfidf = pd.DataFrame(vectorizer.fit_transform(text).toarray())
```
# Target
```
targets = pd.read_csv('./btc-ind.csv')
targets['date'] = pd.to_datetime(targets['Date'])
targets = targets.set_index('date')
del targets['Date']
targets.tail()
join_by_date = pd.DataFrame(index=cc.index)
join_by_date['date'] = cc.Timestamp.dt.round(freq="d")
Y_all = join_by_date.join(targets, on='date').dropna()
groups = Y_all['date']
del Y_all['date']
cols = Y_all.columns
index = Y_all.index
#Y_all = pd.DataFrame(normalize(Y_all, axis=1, norm='l2'), columns=cols, index=index)
Y_all = Y_all - Y_all.mean()
Y_all = Y_all/Y_all.std()
#Y_all.plot()
```
# Combine features
```
#features = pd.concat([features_date, features_tfidf, features_reactions, feature_word_vec], axis=1)
features = pd.concat([features_date, features_reactions, feature_word_vec], axis=1)
X_all = features.ix[Y_all.index]
X_all.shape
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import normalize
from xgboost.sklearn import XGBRegressor
from sklearn.linear_model import LinearRegression, Lasso
rf = RandomForestRegressor(n_estimators=10, max_depth=3, criterion='mse')
xgb = XGBRegressor(n_estimators=10)
regressors = [rf, Lasso()]
target_scores = {}
for indicator in targets.columns:
Y =Y_all[indicator]
for reg in regressors:
tag = indicator+':'+str(reg)[:15]
scores = cross_val_score(reg, X_all, Y, cv=4, groups=groups, scoring='neg_mean_squared_error')
print np.mean(scores), tag
target_scores[tag] = scores
cv_score = pd.DataFrame(target_scores)
ms = cv_score.mean(axis=0)
ms.sort_values(ascending=False)
indicator = 'BTC_cbrt_dv_T1:Lasso(alpha=1.0'
indicator = indicator.split(":")[0]
Y = Y_all[indicator]
reg = XGBRegressor(n_estimators=100)
reg.fit(X_all, Y)
Y_t = reg.predict(X_all)
error = abs(Y - Y_t)
error.hist()
# DROP THE BULL$HIT
itruth = error < error.quantile(0.3)
X = X_all[itruth]
Y = Y_all[indicator][itruth]
G = groups[itruth]
reg = XGBRegressor(n_estimators=100, max_depth=8)
scores = cross_val_score(reg, X, Y, cv=4, groups=G, scoring='neg_mean_squared_error')
print sorted(scores)
ax = groups.hist(figsize=(12,5))
G.hist(ax=ax)
reg = XGBRegressor(n_estimators=100, max_depth=8)
reg.fit(X,Y)
Y_ = reg.predict(X)
truth_df = pd.DataFrame({'date': G, 'Y': Y_})
def get_stats(group):
return {'min': group.min(), 'max': group.max(), 'count': group.count(), 'mean': group.mean()}
ax = targets.BTC_cbrt_dv_T1.plot()
truth.plot(ax=ax)
truth
def drop_bs(indicator, q=0.3):
Y = Y_all[indicator]
reg = XGBRegressor(n_estimators=100)
reg.fit(X_all, Y)
Y_t = reg.predict(X_all)
error = abs(Y - Y_t)
error.hist()
itruth = error < error.quantile(q)
X = X_all[itruth]
Y = Y_all[indicator][itruth]
G = groups[itruth]
reg = XGBRegressor(n_estimators=30, max_depth=5)
scores = cross_val_score(reg, X, Y, cv=4, groups=G, scoring='neg_mean_squared_error')
print sorted(scores)
print "MEAN CV SCORE: ", np.mean(scores)
reg = XGBRegressor(n_estimators=100, max_depth=8)
reg.fit(X,Y)
Y_ = reg.predict(X)
agg = pd.Series(Y_).groupby(G)
truthscore = agg.mean()
impact_count = agg.count()
truth_max = agg.max()
return pd.DataFrame(dict(truthscore=truthscore, impact_count=impact_count, truth_max=truth_max, date=truthscore.index))
dv = drop_bs('BTC_cbrt_dv_T1', 0.4)
import json
def to_json(df, path):
a = []
for i,d in list(df.iterrows()):
d = d.to_dict()
d['date'] = str(d['date'])
a.append(d)
with open(path, 'w') as f:
json.dump(a, f)
to_json(dv, '../bitcoin-daily-bars/out-truth-volume.json')
impactfull = cc.ix[itruth.index][itruth]
impactfull.head()
f = 'Cryptopian.Name'
a = impactfull.groupby(f).size()
b = cc.groupby(f).size()
c = pd.DataFrame(dict(a=a,b=b))
c = c[c.a>1]
c['impact'] = c.a/c.b
c.sort_values('impact', ascending=False)
dv.truthscore.plot()
target_sc
```
|
github_jupyter
|
```
import os
import json
import pandas as pd
from tqdm import tqdm_notebook
df_larval = pd.read_csv(os.path.join('..', 'data', 'breeding-sites', 'larval-survey-en.csv'))
df_larval.head()
```
## Shapefile
```
with open(os.path.join('..', 'data','shapefiles','Nakhon-Si-Thammarat.geojson')) as f:
data = json.load(f)
for i, feature in enumerate(data['features']):
prop = feature['properties']
province = prop['PV_TN']
district = prop['AP_TN']
subdist = prop['TB_TN']
df_tmp = df_larval.loc[(df_larval['province'] == province) &
(df_larval['district'] == district)]
province_en, district_en = df_tmp[['province_en','district_en']].values[0]
prop['PV_EN'] = province_en
prop['AP_EN'] = district_en
data['features'][2]['properties']
with open(os.path.join('..', 'data', 'shapefiles', 'Nakhon-Si-Thammarat-en.geojson'), 'w') as FILE:
json.dump(data, FILE, indent=4, ensure_ascii=False, sort_keys=True)
```
## Dictonary file
```
province_entry = []
for feature in data['features']:
prop = feature['properties']
province_entry.append([
prop['PV_TN'],
prop['AP_TN'],
prop['TB_TN'],
prop['PV_EN'],
prop['AP_EN'],
prop['TB_EN'],
])
province_entry = pd.DataFrame.from_records(province_entry, columns=['province_th', 'district_th', 'subdist_th',
'province_en', 'district_en', 'subdist_en'])
province_entry.to_csv(os.path.join('..', 'data', 'shapefiles', 'Nakhon-Si-Thammarat-dictionary.csv'))
province_entry.head()
```
## Detection file
```
with open(os.path.join('..', 'data','breeding-sites','detection.geojson')) as f:
detection = json.load(f)
for feature in tqdm_notebook(detection['features']):
prop = feature['properties']
province = prop['province']
district = prop['district']
subdist = prop['subdist']
df_tmp = province_entry.loc[
(province_entry['province_th'] == province) &
(province_entry['district_th'] == district) &
(province_entry['subdist_th'] == subdist)
]
province_en, district_en, subdist_en = df_tmp[['province_en','district_en', 'subdist_en']].values[0]
prop['province_en'] = province_en
prop['district_en'] = district_en
prop['subdist_en'] = subdist_en
with open(os.path.join('..', 'data','breeding-sites','detection-en.geojson'), 'w') as FILE:
json.dump(detection, FILE, indent=4, ensure_ascii=False, sort_keys=True)
```
|
github_jupyter
|
```
# Les imports pour l'exercice
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import string
import random
from collections import deque
```
## Partie 1 : Code de César
### Implementation
Le code suivant contient deux fonctions principales : `encryptMessage` et `decryptMessage`.
Ces fonctions permet d'encoder et de decoder un string donnée.
L'encryption se fait de facon classique par decalage d'alphabet. La clé donnée est le nombre de decalage.
La fonction `convertAlphabets` permet de convertir un texte entre deux alphabets.
La fonction `shiftAlphabet` permet de décaler les éléments d'une matrice en faisant une rotation. C'est l'équivalent de decaler la roue interne du code de César.
```
alphabet_string = string.printable
alphabetListOrdered = list(alphabet_string)
numberListOrdered = list(map(ord, alphabetListOrdered))
def convertAlphabets(messageCharNumList, numList, numListToConvert, encrypt = True) :
index = 0
newList = []
for val in messageCharNumList:
indexOfLetter = numListToConvert.index(val)
newList.append(numList[indexOfLetter])
index += 1
if encrypt :
return ' '.join(map(chr,newList))
else :
return ''.join(map(chr,newList))
def shiftAlphabet(listToShift, keyShift):
keyShift = keyShift % len(listToShift)
return listToShift[keyShift:] + listToShift[:keyShift]
```
Pour la fonction d'encryption, on utilisera l'alphabet de tout les caracteres imprimable en ascii donnée par `string.printable`.
```
string.printable
def encryptMessage(m, shiftKey):
alphabet_string = string.printable
alphabetListOrdered = list(alphabet_string)
numberListOrdered = list(map(ord, alphabetListOrdered))
alphabetListShuffled = shiftAlphabet(list(alphabetListOrdered), shiftKey)
numberListShuffled = list(map(ord, alphabetListShuffled))
return convertAlphabets(list(map(ord, list(m))), numberListShuffled, numberListOrdered)
def decryptMessage(m, shiftKey):
m = m.replace(' ', '')
alphabet_string = string.printable
alphabetListOrdered = list(alphabet_string)
numberListOrdered = list(map(ord, alphabetListOrdered))
alphabetListShuffled = list(alphabetListOrdered)
alphabetListShuffled = shiftAlphabet(list(alphabetListOrdered), shiftKey)
numberListShuffled = list(map(ord, alphabetListShuffled))
return convertAlphabets(list(map(ord, list(m))), numberListOrdered, numberListShuffled, False)
```
En dessous est un exemple d'encryption et de decryption.
```
m = "Vous savez, moi je ne crois pas qu'il y ait de bonne ou de mauvaise situation. Moi, si je devais resumer ma vie aujourd'hui avec vous, je dirais que c'est d'abord des rencontres. Des gens qui m'ont tendu la main, peut-etre a un moment ou je ne pouvais pas, ou j'etais seul chez moi"
m
```
On encrypte le texte plus haut au moins d'une clé de valeur `4051`
```
e = encryptMessage(m, 4501)
e
```
On verifie que la decryption marche bien quand on remet la clé.
```
d = decryptMessage(e, 4501)
d
```
### Exercice : Cracker un cypher par décalage avec l'analyse de fréquence
Voici un cypher généré avec l'algorithme d'encryption de cypher encryptMessage.
Le but est d'arriver a decrypter le message grace a l'analyse de frequence des caracteres du message.
Le message code est en anglais, donc un morceau de texte anglais vous est donne pour que vous puissez comparer les frequences.
```
# Ceci est le message codé à cracker.
# Des espaces sont laissés expres pour permettre de bien reconnaitre les caracteres
# Attention aux caracteres speciaux d'ascii. E.g. `\r` est bien compté comme 1 caractere de lalphabet.
cypher = "Z C D M P C i W M S i B G Q A S Q Q i R F C i P C Q M J S R G M L \r i J C R i K C i N J y A C i z C D M P C i W M S i M L C i M P i R U M i R F G L E Q \r i ' i U y L R i W M S i R M i S L B C P Q R y L B i R U M i R F G L E Q i T C P W i A J C y P J W i y L B i R M i A M L Q G B C P i R F C K i D P M K i R F C i Q y K C i N M G L R i M D i T G C U i D P M K i U F G A F i ' i y K i N J y A G L E i R F C K i z C D M P C i W M S \x0c i ' i y Q I i W M S i R M i A M L Q G B C P i G R i D P M K i K W i N M G L R i M D i T G C U \r i z C A y S Q C i G D i W M S i y N N P M T C i M D i G R \r i W M S i U G J J i z C i C L H M G L C B i R M i A y P P W i M S R i y J J i ' i Q y W \x0c i ' R i U G J J i z C i y i E P C y R i P C Q N M L Q G z G J G R W \x0c i < F C P C i y P C i N C M N J C i U F M i y Q I i K C i U F C R F C P i ' i y K i R F C i Q y K C i K y L i R F y R i ' i U y Q i G L i p x q o \r i M P i U F C R F C P i R F C P C i F y Q i z C C L i y L W i A F y L E C i G L i K C \x0c i [ M S i y P C i P G E F R i G L i y Q I G L E i R F y R i O S C Q R G M L \x0c i * C R i K C \r i F M U C T C P \r i F y Q R C L i R M i y Q Q S P C i R F y R i ' i y K i R F C i Q y K C i % y L B F G i y Q i ' i U y Q i G L i p x q o \x0c i ' i F y T C i L M R i A F y L E C B i G L i y L W i D S L B y K C L R y J i P C Q N C A R \x0c i ' i y R R y A F i R F C i Q y K C i G K N M P R y L A C i R M i L M L T G M J C L A C i R F y R i ' i B G B i R F C L \x0c i ' D i y R i y J J \r i K W i C K N F y Q G Q i M L i G R i F y Q i E P M U L i Q R P M L E C P \x0c i < F C P C i G Q i L M i P C y J i A M L R P y B G A R G M L i z C R U C C L i R F C i N P C Q C L R i P C Q M J S R G M L i y L B i K W i N P C T G M S Q i U P G R G L E Q i y L B i S R R C P y L A C Q \x0c i - A A y Q G M L Q i J G I C i R F C i N P C Q C L R i B M i L M R i M A A S P i G L i C T C P W z M B W } Q i y L B i z S R i P y P C J W i G L i y L W z M B W } Q i J G D C \x0c i ' i U y L R i W M S i R M i I L M U i y L B i D C C J i R F y R i R F C P C i G Q i L M R F G L E i z S R i N S P C Q R i Y F G K Q y i G L i y J J i R F y R i ' i y K i Q y W G L E i y L B i B M G L E i R M B y W \x0c i < F C i B P y D R i P C Q M J S R G M L i M D i R F C i ? M P I G L E i ! M K K G R R C C i G Q i z y Q C B i M L i Y F G K Q y \r i R F C i A M L R C K N J y R C B i Q R P S E E J C i Q G K G J y P J W i F y Q i G R Q i P M M R Q i G L i Y F G K Q y \x0c i ' D \r i R F C P C D M P C \r i R F C P C i G Q i y L W i y K M L E i W M S i U F M i F y Q i J M Q R i D y G R F i G L i Y F G K Q y i M P i G Q i U C y P G C B i M D i G R \r i J C R i F G K i L M R i T M R C i D M P i R F G Q i P C Q M J S R G M L \x0c`"
# Le code ici permet de transformer le cypher en tableau, enlevant au passage tout les espaces entre chaque caracteres
cypherList = list(cypher)[0::2]
cypherList
```
Pour vous aider, voici un morceau de discours en anglais sur lequel vous pouvez faire une premiere analyse de frequences de caracteres.
Le texte est assez long pour etre representatif d'une distribution classique de la langue anglaise.
Votre histogramme devrait ressembler a celui sur la page web suivante :
https://www3.nd.edu/~busiforc/handouts/cryptography/letterfrequencies.html
```
englishText = "I am the First Accused.I hold a Bachelor's Degree in Arts and practised as an attorney in Johannesburg for a number of years in partnership with Oliver Tambo. I am a convicted prisoner serving five years for leaving the country without a permit and for inciting people to go on strike at the end of May 1961. At the outset, I want to say that the suggestion made by the State in its opening that the struggle in South Africa is under the influence of foreigners or communists is wholly incorrect. I have done whatever I did, both as an individual and as a leader of my people, because of my experience in South Africa and my own proudly felt African background, and not because of what any outsider might have said. In my youth in the Transkei I listened to the elders of my tribe telling stories of the old days. Amongst the tales they related to me were those of wars fought by our ancestors in defence of the fatherland. The names of Dingane and Bambata, Hintsa and Makana, Squngthi and Dalasile, Moshoeshoe and Sekhukhuni, were praised as the glory of the entire African nation. I hoped then that life might offer me the opportunity to serve my people and make my own humble contribution to their freedom struggle. This is what has motivated me in all that I have done in relation to the charges made against me in this case. Having said this, I must deal immediately and at some length with the question of violence. Some of the things so far told to the Court are true and some are untrue. I do not, however, deny that I planned sabotage. I did not plan it in a spirit of recklessness, nor because I have any love of violence. I planned it as a result of a calm and sober assessment of the political situation that had arisen after many years of tyranny, exploitation, and oppression of my people by the Whites."
```
**Consigne** :
Comparer la frequence d'apparition des caracteres du cypher et du discours.
Par simplicité, triez les tableaux dans l'ordre alphabetique.
Il faut ensuite que vous decaliez l'alphabet du cypher jusqu'a que les deux distributions se superposent.
Vous pouvez utiliser la fonction `shiftAlphabet` donnée plus haut.
Ce décalage sera donc la clé d'encryption et de decryption!
```
# Zone a coder en dessous
```
## Partie 2 : Code de César Aléatoire
## Implementation
Voici une legere modification des fonctions d'encryptions du debut de l'exercice.
La seule difference etant qu'au lieu de mélanger par décalage, la position de chaque lettre est aléatoire
```
def convertAlphabetsRand(messageCharNumList, numList, numListToConvert, encrypt = True) :
index = 0
newList = []
for val in messageCharNumList:
indexOfLetter = numListToConvert.index(val)
newList.append(numList[indexOfLetter])
index += 1
if encrypt :
return ' '.join(map(chr,newList))
else :
return ''.join(map(chr,newList))
def encryptMessageRand(m, seedKey):
alphabet_string = string.printable
alphabetListOrdered = list(alphabet_string)
numberListOrdered = list(map(ord, alphabetListOrdered))
alphabetListShuffled = list(alphabetListOrdered)
def seed():
return seedKey
random.shuffle(alphabetListShuffled, seed)
numberListShuffled = list(map(ord, alphabetListShuffled))
return convertAlphabetsRand(list(map(ord, list(m))), numberListShuffled, numberListOrdered)
def decryptMessageRand(m, seedKey):
m = m.replace(' ', '')
alphabet_string = string.printable
alphabetListOrdered = list(alphabet_string)
numberListOrdered = list(map(ord, alphabetListOrdered))
alphabetListShuffled = list(alphabetListOrdered)
def seed():
return seedKey
random.shuffle(alphabetListShuffled, seed)
numberListShuffled = list(map(ord, alphabetListShuffled))
return convertAlphabets(list(map(ord, list(m))), numberListOrdered, numberListShuffled, False)
m = "Vous savez, moi je ne crois pas qu'il y ait de bonne ou de mauvaise situation. Moi, si je devais resumer ma vie aujourd'hui avec vous, je dirais que c'est d'abord des rencontres. Des gens qui m'ont tendu la main, peut-etre a un moment ou je ne pouvais pas, ou j'etais seul chez moi"
m
e = encryptMessageRand(m, 0.42)
e
d = decryptMessageRand(e, 0.42)
d
```
## Cracking a Random Cypher
Voici un cypher généré avec l'algorithme d'encryption de cypher encryptMessageRand.
Le but est d'arriver a decrypter le message grace a l'analyse de frequence des caracteres du message.
Le message code est en anglais, donc un morceau de texte anglais vous est donne pour que vous puissez comparer les frequences.
```
random_cypher = 'J \t _ n \t i _ q q z \t u p \t k p j 2 \t x j u i \t z p v \t u p e _ z \t j 2 \t x i _ u \t x j m m \t h p \t e p x 2 \t j 2 \t i j t u p s z \t _ t \t u i f \t h s f _ u f t u \t e f n p 2 t u s _ u j p 2 \t 1 p s \t 1 s f f e p n \t j 2 \t u i f \t i j t u p s z \t p 1 \t p v s \t 2 _ u j p 2 / \t G j 3 f \t t d p s f \t z f _ s t \t _ h p - \t _ \t h s f _ u \t B n f s j d _ 2 - \t j 2 \t x i p t f \t t z n \n p m j d \t t i _ e p x \t x f \t t u _ 2 e \t u p e _ z - \t t j h 2 f e \t u i f \t F n _ 2 d j q _ u j p 2 \t Q s p d m _ n _ u j p 2 / \t 6 i j t \t n p n f 2 u p v t \t e f d s f f \t d _ n f \t _ t \t _ \t h s f _ u \t \n f _ d p 2 \t m j h i u \t p 1 \t i p q f \t u p \t n j m m j p 2 t \t p 1 \t O f h s p \t t m _ 3 f t \t x i p \t i _ e \t \n f f 2 \t t f _ s f e \t j 2 \t u i f \t 1 m _ n f t \t p 1 \t x j u i f s j 2 h \t j 2 k v t u j d f / \t J u \t d _ n f \t _ t \t _ \t k p z p v t \t e _ z \n s f _ l \t u p \t f 2 e \t u i f \t m p 2 h \t 2 j h i u \t p 1 \t u i f j s \t d _ q u j 3 j u z / \t C v u \t p 2 f \t i v 2 e s f e \t z f _ s t \t m _ u f s - \t u i f \t O f h s p \t t u j m m \t j t \t 2 p u \t 1 s f f / \t P 2 f \t i v 2 e s f e \t z f _ s t \t m _ u f s - \t u i f \t m j 1 f \t p 1 \t u i f \t O f h s p \t j t \t t u j m m \t t _ e m z \t d s j q q m f e \t \n z \t u i f \t n _ 2 _ d m f t \t p 1 \t t f h s f h _ u j p 2 \t _ 2 e \t u i f \t d i _ j 2 t \t p 1 \t e j t d s j n j 2 _ u j p 2 / \t P 2 f \t i v 2 e s f e \t z f _ s t \t m _ u f s - \t u i f \t O f h s p \t m j 3 f t \t p 2 \t _ \t m p 2 f m z \t j t m _ 2 e \t p 1 \t q p 3 f s u z \t j 2 \t u i f \t n j e t u \t p 1 \t _ \t 3 _ t u \t p d f _ 2 \t p 1 \t n _ u f s j _ m \t q s p t q f s j u z / \t P 2 f \t i v 2 e s f e \t z f _ s t \t m _ u f s - \t u i f \t O f h s p \t j t \t t u j m m \t m _ 2 h v j t i f e \t j 2 \t u i f \t d p s 2 f s t \t p 1 \t B n f s j d _ 2 \t t p d j f u z \t _ 2 e \t 1 j 2 e t \t i j n t f m 1 \t _ 2 \t f y j m f \t j 2 \t i j t \t p x 2 \t m _ 2 e / \t B 2 e \t t p \t x f ( 3 f \t d p n f \t i f s f \t u p e _ z \t u p \t e s _ n _ u j A f \t _ \t t i _ n f 1 v m \t d p 2 e j u j p 2 /'
random_cypherList = list(random_cypher)[0::2]
random_cypherList
englishText = "I am the First Accused.I hold a Bachelor's Degree in Arts and practised as an attorney in Johannesburg for a number of years in partnership with Oliver Tambo. I am a convicted prisoner serving five years for leaving the country without a permit and for inciting people to go on strike at the end of May 1961. At the outset, I want to say that the suggestion made by the State in its opening that the struggle in South Africa is under the influence of foreigners or communists is wholly incorrect. I have done whatever I did, both as an individual and as a leader of my people, because of my experience in South Africa and my own proudly felt African background, and not because of what any outsider might have said. In my youth in the Transkei I listened to the elders of my tribe telling stories of the old days. Amongst the tales they related to me were those of wars fought by our ancestors in defence of the fatherland. The names of Dingane and Bambata, Hintsa and Makana, Squngthi and Dalasile, Moshoeshoe and Sekhukhuni, were praised as the glory of the entire African nation. I hoped then that life might offer me the opportunity to serve my people and make my own humble contribution to their freedom struggle. This is what has motivated me in all that I have done in relation to the charges made against me in this case. Having said this, I must deal immediately and at some length with the question of violence. Some of the things so far told to the Court are true and some are untrue. I do not, however, deny that I planned sabotage. I did not plan it in a spirit of recklessness, nor because I have any love of violence. I planned it as a result of a calm and sober assessment of the political situation that had arisen after many years of tyranny, exploitation, and oppression of my people by the Whites."
```
|
github_jupyter
|
```
# Run in python console
import nltk; nltk.download('stopwords')
```
Import Packages
```
import re
import numpy as np
import pandas as pd
from pprint import pprint
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
%matplotlib inline
# Enable logging for gensim - optional
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
# NLTK Stop words
from nltk.corpus import stopwords
stop_words = stopwords.words('english')
stop_words.extend(['from', 'subject', 're', 'edu', 'use'])
```
Importing Lyrics data
```
# Import Dataset
df = pd.read_csv('')
df1.head()
# df = df1.head(10)
print(df.genre.unique())
print(df.artist.unique())
print(df.year.unique())
```
Remove newline characters
```
# Convert to list
# data = df.lyrics.values.tolist()
# data = [re.sub('[^a-zA-Z ]' ,'', str(sent)) for sent in data]
# pprint(data[:1])
# def sent_to_words(sentences):
# for sentence in sentences:
# yield(gensim.utils.simple_preprocess(str(sentence), deacc=True)) # deacc=True removes punctuations
# data_words = list(sent_to_words(data))
# print(data_words[:1])
```
### Creating Bigram and Trigram Models
Bigrams are two words frequently occurring together in the document. Trigrams are 3 words frequently occurring.
Some examples in our example are: ‘front_bumper’, ‘oil_leak’, ‘maryland_college_park’ etc.
Gensim’s Phrases model can build and implement the bigrams, trigrams, quadgrams and more. The two important arguments to Phrases are min_count and threshold. The higher the values of these param, the harder it is for words to be combined to bigrams.
```
# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# See trigram example
print(bigram_mod[data_words[0]])
```
### Remove Stopwords, Make Bigrams and Lemmatize
The bigrams model is ready. Let’s define the functions to remove the stopwords, make bigrams and lemmatization and call them sequentially.
```
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
return texts_out
```
Let’s call the functions in order.
```
# Remove Stop Words
data_words_nostops = remove_stopwords(data_words)
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Initialize spacy 'en' model, keeping only tagger component (for efficiency)
# python3 -m spacy download en
nlp = spacy.load('en', disable=['parser', 'ner'])
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
print(data_lemmatized[:1])
```
### Create the Dictionary and Corpus needed for Topic Modeling
The two main inputs to the LDA topic model are the dictionary(id2word) and the corpus. Let’s create them.
```
# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
```
Gensim creates a unique id for each word in the document. The produced corpus shown above is a mapping of (word_id, word_frequency).
For example, (0, 1) above implies, word id 0 occurs once in the first document. Likewise, word id 1 occurs twice and so on.
This is used as the input by the LDA model.
If you want to see what word a given id corresponds to, pass the id as a key to the dictionary.
```
id2word[10]
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
```
### Building the Topic Model
We have everything required to train the LDA model. In addition to the corpus and dictionary, you need to provide the number of topics as well.
Apart from that, alpha and eta are hyperparameters that affect sparsity of the topics. According to the Gensim docs, both defaults to 1.0/num_topics prior.
chunksize is the number of documents to be used in each training chunk. update_every determines how often the model parameters should be updated and passes is the total number of training passes.
```
# Build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=20,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
# Print the Keyword in the 10 topics
pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# Compute Perplexity
print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# Visualize the topics
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
vis
mallet_path = '/Users/neha/Downloads/mallet-2.0.8/bin/mallet' # update this path
ldamallet = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=20, id2word=id2word)
# Show Topics
pprint(ldamallet.show_topics(formatted=False))
# Compute Coherence Score
coherence_model_ldamallet = CoherenceModel(model=ldamallet, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_ldamallet = coherence_model_ldamallet.get_coherence()
print('\nCoherence Score: ', coherence_ldamallet)
```
```
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.wrappers.LdaMallet(mallet_path, corpus=corpus, num_topics=num_topics, id2word=id2word)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
# Can take a long time to run.
model_list, coherence_values = compute_coherence_values(dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=2, limit=40, step=6)
# Show graph
limit=40; start=2; step=6;
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Num Topics")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
# Print the coherence scores
for m, cv in zip(x, coherence_values):
print("Num Topics =", m, " has Coherence Value of", round(cv, 4))
# Select the model and print the topics
optimal_model = model_list[3]
model_topics = optimal_model.show_topics(formatted=False)
pprint(optimal_model.print_topics(num_words=10))
def format_topics_sentences(ldamodel=lda_model, corpus=corpus, texts=data):
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(ldamodel[corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => dominant topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant_Topic', 'Perc_Contribution', 'Topic_Keywords']
# Add original text to the end of the output
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return(sent_topics_df)
df_topic_sents_keywords = format_topics_sentences(ldamodel=optimal_model, corpus=corpus, texts=data)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
# Show
df_dominant_topic.head(10)
```
```
# Group top 5 sentences under each topic
sent_topics_sorteddf_mallet = pd.DataFrame()
sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant_Topic')
for i, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet,
grp.sort_values(['Perc_Contribution'], ascending=[0]).head(1)],
axis=0)
# Reset Index
sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)
# Format
sent_topics_sorteddf_mallet.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Text"]
# Show
sent_topics_sorteddf_mallet.head()
```
|
github_jupyter
|
# Evaluate AminoAcids Prediction
```
%matplotlib inline
import pylab
pylab.rcParams['figure.figsize'] = (15.0, 12.0)
import os
import sys
import numpy as np
from shutil import copyfile
from src.python.aa_predict import *
import src.python.aa_predict as AA
checkpoint_path = "../../data/trained/aapred_cnn_latest.tar"
emb_dim = 5
win_size = 10
model = GoodOldCNN(emb_dim, win_size)
if os.path.exists(checkpoint_path):
print("=> loading checkpoint '%s'" % checkpoint_path)
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '%s' (epoch %s)" %
(checkpoint_path, checkpoint['epoch'] + 1))
else:
print("=> no checkpoint found at '%s'" % checkpoint_path)
```
### Define Evaluation Function(s)
```
import torch
import torch.nn as nn
from torch.autograd import Variable
from pymongo import MongoClient
```
### 1 2 3 Predict...
```
class_names = sorted(dictionary.keys(), key=lambda aa: dictionary[aa])
client = MongoClient("mongodb://127.0.0.1:27017")
db = client['prot2vec']
global collection_test, size_test, verbose
AA.collection_test = db['sprot']
AA.size_test = 100
AA.verbose = True
AA.use_cuda = False
batch_size = 32
loader = WindowBatchLoader(win_size, batch_size, False)
y_test, y_pred, _ = predict(model, loader)
# data = []
# for i, (x, y) in enumerate(loader):
# data.append((np.random.permutation(x), np.random.permutation(y)))
# y_test, y_pred, _ = predict(model, data)
```
### Evaluate
```
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
# print(cm)
# print(cm.shape)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, y_pred, labels=list(range(25)))
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
# plt.figure()
# plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,
# title='Normalized confusion matrix')
plt.show()
```
### Plot Accuracy
```
###https://matplotlib.org/examples/api/barchart_demo.html
def plot_accuracy(title, scores):
N = len(scores)
acc = list(scores.values())
ind = np.arange(N) # the x locations for the groups
width = 0.2 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, acc, width, color='b')
# add some text for labels, title and axes ticks
ax.set_ylabel('Scores')
ax.set_title(title)
ax.set_xticks(ind)
ax.set_xticklabels(list(scores.keys()))
ax.legend((rects1,), ('acc',))
autolabel(rects1, ax)
def autolabel(rects, ax):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % height,
ha='center', va='bottom')
indx = [i for i, row in enumerate(cnf_matrix) if row[i] > 0]
acc_scores = {reverse_dictionary[i]:cnf_matrix[i, i]/np.sum(row)
for i, row in enumerate(cnf_matrix) if i in indx}
plot_accuracy("AA Prediction Accuracy", acc_scores)
plt.show()
import pandas as pd
aa_feat = pd.read_csv('Data/aa_feat.csv')
x = aa_feat["Occurrence.in.Bacteria.proteins....."][indx]
y = list(acc_scores.values())
labels = [reverse_dictionary[i] for i in indx]
def plot(x, y, labels, title):
xy = list(zip(x, y))
for i, label in enumerate(labels):
x, y = xy[i]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right', va='bottom')
plt.title(title)
plot(x, y, labels, "Prediction acc vs. % Occurrence in Data")
m, b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b, '-')
```
|
github_jupyter
|
# Nearest neighbors
This notebook illustrates the classification of the nodes of a graph by the [k-nearest neighbors algorithm](https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm), based on the labels of a few nodes.
```
from IPython.display import SVG
import numpy as np
from sknetwork.data import karate_club, painters, movie_actor
from sknetwork.classification import KNN
from sknetwork.embedding import GSVD
from sknetwork.visualization import svg_graph, svg_digraph, svg_bigraph
```
## Graphs
```
graph = karate_club(metadata=True)
adjacency = graph.adjacency
position = graph.position
labels_true = graph.labels
seeds = {i: labels_true[i] for i in [0, 33]}
knn = KNN(GSVD(3), n_neighbors=1)
labels_pred = knn.fit_transform(adjacency, seeds)
precision = np.round(np.mean(labels_pred == labels_true), 2)
precision
image = svg_graph(adjacency, position, labels=labels_pred, seeds=seeds)
SVG(image)
# soft classification (here probability of label 1)
knn = KNN(GSVD(3), n_neighbors=2)
knn.fit(adjacency, seeds)
membership = knn.membership_
scores = membership[:,1].toarray().ravel()
image = svg_graph(adjacency, position, scores=scores, seeds=seeds)
SVG(image)
```
## Directed graphs
```
graph = painters(metadata=True)
adjacency = graph.adjacency
position = graph.position
names = graph.names
rembrandt = 5
klimt = 6
cezanne = 11
seeds = {cezanne: 0, rembrandt: 1, klimt: 2}
knn = KNN(GSVD(3), n_neighbors=2)
labels = knn.fit_transform(adjacency, seeds)
image = svg_digraph(adjacency, position, names, labels=labels, seeds=seeds)
SVG(image)
# soft classification
membership = knn.membership_
scores = membership[:,0].toarray().ravel()
image = svg_digraph(adjacency, position, names, scores=scores, seeds=[cezanne])
SVG(image)
```
## Bipartite graphs
```
graph = movie_actor(metadata=True)
biadjacency = graph.biadjacency
names_row = graph.names_row
names_col = graph.names_col
inception = 0
drive = 3
budapest = 8
seeds_row = {inception: 0, drive: 1, budapest: 2}
knn = KNN(GSVD(3), n_neighbors=2)
labels_row = knn.fit_transform(biadjacency, seeds_row)
labels_col = knn.labels_col_
image = svg_bigraph(biadjacency, names_row, names_col, labels_row, labels_col, seeds_row=seeds_row)
SVG(image)
# soft classification
membership_row = knn.membership_row_
membership_col = knn.membership_col_
scores_row = membership_row[:,1].toarray().ravel()
scores_col = membership_col[:,1].toarray().ravel()
image = svg_bigraph(biadjacency, names_row, names_col, scores_row=scores_row, scores_col=scores_col,
seeds_row=seeds_row)
SVG(image)
```
|
github_jupyter
|
```
import pandas as pd
from matplotlib.ticker import FuncFormatter
from Cohort import CohortTable
import numpy as np
import altair as alt
import math
from IPython.display import display, Markdown
# Pulled from class module; need to remove self references
def print_all_tables(self):
display(Markdown('## Productivity Table'))
display(Markdown('The following table contains the percentage of productivity for each cohort by year.'))
display(Markdown('The maximum percentage for each cell is 100% or 1. Any value less than 1 is used to discount the \
productivity of that cohort class for that particular year.\n'))
self.print_table(self.productivity_df, 'Productivity Table')
display(Markdown('## Employee Count before Attrition'))
display(Markdown('This table for each year, by each cohort, if no attrition were to occur.\n'))
self.print_table(self.employee_count_df, 'Employee Count (Before Attrition) by Year', precision=0, create_sum=True, sum_title='Employees')
display(Markdown('## Attrition Mask Table'))
display(Markdown('This table represents the *percentage* of the cohort **population** that has left. The number for each cohort starts\
at 1 (or 100%) and decreases over time. If the argument *attrition_y0* is **TRUE**, the first year of the cohort\
is reduced by the annual attrition rate. Otherwise, attrition starts in the second year of each cohort.\n'))
self.print_table(pd.DataFrame(self.attrition_mask), 'Attrition Mask - 0% to 100% of Employee Count')
display(Markdown('## Retained Employees after Attrition'))
display(Markdown('This table contains the number of employees that remain with the company after accounting for attrition. This \
table contains only whole employees, not fractions, to illustrate when each person is expected to leave as opposed \
to the Full Time Equivalent (FTE) table below.\n'))
self.print_table(self.retained_employee_count_df, 'Employees, After Attrition, by Year', precision=0, create_sum=True, sum_title='Employees')
display(Markdown('## Full Time Equivalent Table'))
display(Markdown('This table takes the retained employees after attrition from the table above and calculates the \
number of FTE after applying mid-year hiring. We assume that hiring takes place throughout the year rather than have \
all employees hired on the first of the year. This results in a lower FTE figure for the first year of the cohort.\n'))
self.print_table(self.retained_fte_df, 'FTE Table', create_sum=True, sum_title='FTE')
display(Markdown('## Full Time Equivalent after Factoring Productivity Ramp Up'))
display(Markdown('This table takes the FTE figures from the table above and applies the ramp up in productivity.\n'))
self.print_table(self.retained_fte_factored_df, 'FTE After Applying Productivity Ramp', create_sum=True, sum_title='FTE')
display(Markdown('## Revenue Table'))
display(Markdown('This table takes the final FTE figures, after factoring for productivity ramp up periods, and calculates \
the total revenue per year and per cohort.\n'))
self.print_table(self.revenue_df, 'Total Revenue by Year', precision=0, create_sum=True, sum_title='Revenue')
def print_table(self, df, table_title, precision=2, create_sum=False, sum_title='Sum'):
df.index.name='Cohort'
if create_sum:
sum_title = 'Sum of '+sum_title
df.loc[sum_title] = df.sum()
format_string = '{:,.' + str(precision) + 'f}'
df_styled = df.style.format(format_string).set_caption(table_title)
display(df_styled)
myTable = CohortTable(forecast_period=10, n_years=3, hires_per_year=[1,2,2,3,4,6], \
revenue_goal=1000000, annual_attrition=.16, first_year_full_hire=True, attrition_y0=False)
myTable.print_all_tables()
ax = myTable.retained_fte_factored_df.loc['Sum of FTE'].plot(kind='bar', title='Revenue by Year')
ax.set_xlabel('Year')
ax.set_ylabel('Revenue')
ax.yaxis.set_major_formatter(FuncFormatter('{0:,.0f}'.format))
myTable.revenue_df.loc['Sum of Revenue'] = myTable.revenue_df.sum()
revenue_melt = myTable.revenue_df.loc[['Sum of Revenue']].melt(var_name='Year', value_name='Revenue')
chart = alt.Chart(revenue_melt).mark_area().encode(
x = alt.X('Year', sort=list(revenue_melt.index)),
y = alt.Y('Revenue'),
tooltip = ['Year', alt.Tooltip('Revenue', format=',.0f')]
).properties(title='Total Revenue by Year', width=600, height=400).interactive()
display(revenue_melt)
display(chart)
def size_list(l, length, pad=0):
if len(l) >= length:
del l[length:]
else:
l.extend([pad] * (length - len(l)))
return l
n_years = 5
forecast_period = 10
ramp_log = [math.log2(n) for n in np.delete(np.linspace(1,2,n_years+1),0)]
ramp_log_full = size_list(ramp_log, forecast_period, pad=1)
productivity_list = [np.roll(ramp_log_full, i) for i in range(forecast_period)]
productivity_list = np.triu(productivity_list)
pd.DataFrame(productivity_list)
ramp_exp = [math.exp(1-(1/n**2)) for n in np.delete(np.linspace(0,1,n_years+1),0)]
sns.lineplot(data=productivity_list[0])
def sigmoid(x, width, center):
return 1 / (1 + np.exp(width*(-x - center)))
sigmoid(-10, 0,0)
s_curve = [sigmoid(n, .1, 0) for n in np.linspace(-10,10,50)]
sns.lineplot(data=s_curve)
s_curve = [sigmoid(n, .3, -10) for n in np.linspace(-10,10,50)]
sns.lineplot(data=s_curve)
s_curve
```
|
github_jupyter
|
# WorkFlow
## Classes
## Load the data
## Test Modelling
## Modelling
**<hr>**
## Classes
```
NAME = "change the conv2d"
BATCH_SIZE = 32
import os
import cv2
import torch
import numpy as np
def load_data(img_size=112):
data = []
index = -1
labels = {}
for directory in os.listdir('./data/'):
index += 1
labels[f'./data/{directory}/'] = [index,-1]
print(len(labels))
for label in labels:
for file in os.listdir(label):
filepath = label + file
img = cv2.imread(filepath,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(img_size,img_size))
img = img / 255.0
data.append([
np.array(img),
labels[label][0]
])
labels[label][1] += 1
for _ in range(12):
np.random.shuffle(data)
print(len(data))
np.save('./data.npy',data)
return data
import torch
def other_loading_data_proccess(data):
X = []
y = []
print('going through the data..')
for d in data:
X.append(d[0])
y.append(d[1])
print('splitting the data')
VAL_SPLIT = 0.25
VAL_SPLIT = len(X)*VAL_SPLIT
VAL_SPLIT = int(VAL_SPLIT)
X_train = X[:-VAL_SPLIT]
y_train = y[:-VAL_SPLIT]
X_test = X[-VAL_SPLIT:]
y_test = y[-VAL_SPLIT:]
print('turning data to tensors')
X_train = torch.from_numpy(np.array(X_train))
y_train = torch.from_numpy(np.array(y_train))
X_test = torch.from_numpy(np.array(X_test))
y_test = torch.from_numpy(np.array(y_test))
return [X_train,X_test,y_train,y_test]
```
**<hr>**
## Load the data
```
REBUILD_DATA = True
if REBUILD_DATA:
data = load_data()
np.random.shuffle(data)
X_train,X_test,y_train,y_test = other_loading_data_proccess(data)
```
## Test Modelling
```
import torch
import torch.nn as nn
import torch.nn.functional as F
# class Test_Model(nn.Module):
# def __init__(self):
# super().__init__()
# self.conv1 = nn.Conv2d(1, 6, 5)
# self.pool = nn.MaxPool2d(2, 2)
# self.conv2 = nn.Conv2d(6, 16, 5)
# self.fc1 = nn.Linear(16 * 25 * 25, 120)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84, 36)
# def forward(self, x):
# x = self.pool(F.relu(self.conv1(x)))
# x = self.pool(F.relu(self.conv2(x)))
# x = x.view(-1, 16 * 25 * 25)
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
# return x
class Test_Model(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(1, 32, 5)
self.conv3 = nn.Conv2d(32,64,5)
self.conv2 = nn.Conv2d(64, 128, 5)
self.fc1 = nn.Linear(128 * 10 * 10, 512)
self.fc2 = nn.Linear(512, 256)
self.fc4 = nn.Linear(256,128)
self.fc3 = nn.Linear(128, 36)
def forward(self, x,shape=False):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv2(x)))
if shape:
print(x.shape)
x = x.view(-1, 128 * 10 * 10)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc4(x))
x = self.fc3(x)
return x
device = torch.device('cuda')
model = Test_Model().to(device)
preds = model(X_test.reshape(-1,1,112,112).float().to(device),True)
preds[0]
optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
criterion = nn.CrossEntropyLoss()
EPOCHS = 5
loss_logs = []
from tqdm import tqdm
PROJECT_NAME = "Sign-Language-Recognition"
def test(net,X,y):
correct = 0
total = 0
net.eval()
with torch.no_grad():
for i in range(len(X)):
real_class = torch.argmax(y[i]).to(device)
net_out = net(X[i].view(-1,1,112,112).to(device).float())
net_out = net_out[0]
predictied_class = torch.argmax(net_out)
if predictied_class == real_class:
correct += 1
total += 1
return round(correct/total,3)
import wandb
len(os.listdir('./data/'))
import random
# index = random.randint(0,29)
# print(index)
# wandb.init(project=PROJECT_NAME,name=NAME)
# for _ in tqdm(range(EPOCHS)):
# for i in range(0,len(X_train),BATCH_SIZE):
# X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
# y_batch = y_train[i:i+BATCH_SIZE].to(device)
# model.to(device)
# preds = model(X_batch.float())
# loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index])})
# wandb.finish()
import matplotlib.pyplot as plt
import pandas as pd
df = pd.Series(loss_logs)
df.plot.line(figsize=(12,6))
test(model,X_test,y_test)
test(model,X_train,y_train)
preds
X_testing = X_train
y_testing = y_train
correct = 0
total = 0
model.eval()
with torch.no_grad():
for i in range(len(X_testing)):
real_class = torch.argmax(y_testing[i]).to(device)
net_out = model(X_testing[i].view(-1,1,112,112).to(device).float())
net_out = net_out[0]
predictied_class = torch.argmax(net_out)
# print(predictied_class)
if str(predictied_class) == str(real_class):
correct += 1
total += 1
print(round(correct/total,3))
# for real,pred in zip(y_batch,preds):
# print(real)
# print(torch.argmax(pred))
# print('\n')
```
## Modelling
```
# conv2d_output
# conv2d_1_ouput
# conv2d_2_ouput
# output_fc1
# output_fc2
# output_fc4
# max_pool2d_keranl
# max_pool2d
# num_of_linear
# activation
# best num of epochs
# best optimizer
# best loss
## best lr
class Test_Model(nn.Module):
def __init__(self,conv2d_output=128,conv2d_1_ouput=32,conv2d_2_ouput=64,output_fc1=512,output_fc2=256,output_fc4=128,output=36,activation=F.relu,max_pool2d_keranl=2):
super().__init__()
print(conv2d_output)
print(conv2d_1_ouput)
print(conv2d_2_ouput)
print(output_fc1)
print(output_fc2)
print(output_fc4)
print(activation)
self.conv2d_output = conv2d_output
self.pool = nn.MaxPool2d(max_pool2d_keranl)
self.conv1 = nn.Conv2d(1, conv2d_1_ouput, 5)
self.conv3 = nn.Conv2d(conv2d_1_ouput,conv2d_2_ouput,5)
self.conv2 = nn.Conv2d(conv2d_2_ouput, conv2d_output, 5)
self.fc1 = nn.Linear(conv2d_output * 10 * 10, output_fc1)
self.fc2 = nn.Linear(output_fc1, output_fc2)
self.fc4 = nn.Linear(output_fc2,output_fc4)
self.fc3 = nn.Linear(output_fc4, output)
self.activation = activation
def forward(self, x,shape=False):
x = self.pool(self.activation(self.conv1(x)))
x = self.pool(self.activation(self.conv3(x)))
x = self.pool(self.activation(self.conv2(x)))
if shape:
print(x.shape)
x = x.view(-1, self.conv2d_output * 10 * 10)
x = self.activation(self.fc1(x))
x = self.activation(self.fc2(x))
x = self.activation(self.fc4(x))
x = self.fc3(x)
return x
# conv2d_output
# conv2d_1_ouput
# conv2d_2_ouput
# output_fc1
# output_fc2
# output_fc4
# max_pool2d_keranl
# max_pool2d
# num_of_linear
# best num of epochs
# best loss
## best lr
# batch size
EPOCHS = 3
BATCH_SIZE = 32
# conv2d_output
# conv2d_1_ouput
# conv2d_2_ouput
# output_fc1
# output_fc2
# output_fc4
# max_pool2d_keranl
# max_pool2d
# num_of_linear
# activation =
# best num of epochs
# best optimizer =
# best loss
## best lr
def get_loss(criterion,y,model,X):
preds = model(X.view(-1,1,112,112).to(device).float())
preds.to(device)
loss = criterion(preds,torch.tensor(y,dtype=torch.long).to(device))
loss.backward()
return loss.item()
optimizers = [torch.optim.SGD,torch.optim.Adadelta,torch.optim.Adagrad,torch.optim.Adam,torch.optim.AdamW,torch.optim.SparseAdam,torch.optim.Adamax]
for optimizer in optimizers:
model = Test_Model(activation=nn.ReLU())
criterion = optimizer(model.parameters(),lr=0.1)
wandb.init(project=PROJECT_NAME,name=f'optimizer-{optimizer}')
for _ in tqdm(range(EPOCHS)):
for i in range(0,len(X_train),BATCH_SIZE):
X_batch = X_train[i:i+BATCH_SIZE]
y_batch = y_train[i:i+BATCH_SIZE]
model.to(device)
preds = model(X_batch.float())
loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
optimizer.zero_grad()
loss.backward()
optimizer.step()
wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index]),'val_loss':get_loss(criterion,y_test,model,X_test)})
print(f'{torch.argmax(preds[index])} \n {y_batch[index]}')
print(f'{torch.argmax(preds[1])} \n {y_batch[1]}')
print(f'{torch.argmax(preds[2])} \n {y_batch[2]}')
print(f'{torch.argmax(preds[3])} \n {y_batch[3]}')
print(f'{torch.argmax(preds[4])} \n {y_batch[4]}')
wandb.finish()
# activations = [nn.ELU(),nn.LeakyReLU(),nn.PReLU(),nn.ReLU(),nn.ReLU6(),nn.RReLU(),nn.SELU(),nn.CELU(),nn.GELU(),nn.SiLU(),nn.Tanh()]
# for activation in activations:
# model = Test_Model(activation=activation)
# optimizer = torch.optim.SGD(model.parameters(),lr=0.1)
# criterion = nn.CrossEntropyLoss()
# index = random.randint(0,29)
# print(index)
# wandb.init(project=PROJECT_NAME,name=f'activation-{activation}')
# for _ in tqdm(range(EPOCHS)):
# for i in range(0,len(X_train),BATCH_SIZE):
# X_batch = X_train[i:i+BATCH_SIZE].view(-1,1,112,112).to(device)
# y_batch = y_train[i:i+BATCH_SIZE].to(device)
# model.to(device)
# preds = model(X_batch.float())
# loss = criterion(preds,torch.tensor(y_batch,dtype=torch.long))
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# wandb.log({'loss':loss.item(),'accuracy':test(model,X_train,y_train)*100,'val_accuracy':test(model,X_test,y_test)*100,'pred':torch.argmax(preds[index]),'real':torch.argmax(y_batch[index]),'val_loss':get_loss(criterion,y_test,model,X_test)})
# print(f'{torch.argmax(preds[index])} \n {y_batch[index]}')
# print(f'{torch.argmax(preds[1])} \n {y_batch[1]}')
# print(f'{torch.argmax(preds[2])} \n {y_batch[2]}')
# print(f'{torch.argmax(preds[3])} \n {y_batch[3]}')
# print(f'{torch.argmax(preds[4])} \n {y_batch[4]}')
# wandb.finish()
for real,pred in zip(y_batch,preds):
print(real)
print(torch.argmax(pred))
print('\n')
```
|
github_jupyter
|
# Modes of a Vibrating Building
In this notebook we will find the vibrational modes of a simple model of a building. We will assume that the mass of the floors are much more than the mass of the walls and that the lateral stiffness of the walls can be modeled by a simple linear spring. We will investigate how the building may vibrate under initial conditions that could be caused by a gust of wind and during ground vibration.
```
from IPython.display import YouTubeVideo
YouTubeVideo('g0cz-oDfUg0', width=600)
YouTubeVideo('hSwjkG3nv1c', width=600)
YouTubeVideo('kzVvd4Dk6sw', width=600)
import numpy as np
import matplotlib.pyplot as plt
from resonance.linear_systems import FourStoryBuildingSystem
```
This gives a bit nicer printing of large NumPy arrays.
```
np.set_printoptions(precision=5, linewidth=100, suppress=True)
%matplotlib notebook
```
# Simulate the four story building
```
sys = FourStoryBuildingSystem()
sys.constants
sys.coordinates
sys.plot_configuration();
traj = sys.free_response(30, sample_rate=10)
traj[list(sys.coordinates.keys())].plot(subplots=True);
sys.animate_configuration(fps=10)
M, C, K = sys.canonical_coefficients()
M
C
K
```
# Exercise
The system can be normalized by the mass matrix and transformed into a symmetric eigenvalue problem by introducing the new coordinate vector:
$$\mathbf{q}=\mathbf{L}^T\mathbf{x}$$
$\mathbf{L}$ is the Cholesky decomposition of the symmetric mass matrix, i.e. $\mathbf{M}=\mathbf{L}\mathbf{L}^T$.
The equation of motion becomes:
$$\ddot{\mathbf{q}} + \tilde{\mathbf{K}} \mathbf{q} = 0$$
Compute $\tilde{\mathbf{K}}$.
```
L = np.linalg.cholesky(M)
L
M**0.5
import numpy.linalg as la
from numpy.linalg import inv
K_tilde = inv(L) @ K @ inv(L.T)
K_tilde
```
Notice that $\tilde{\mathbf{K}}$ is symmetric, so we are guaranteed to get real eigenvalues and orthogonal eigenvectors when solving this system.
# Exercise
Find the eigenvalues and eigenvectors. Create the spectral matrix $\mathbf{\Lambda}$ and the matrix $P$ which contains the orthonormal eigenvectors of $\tilde{\mathbf{K}}$.
$$
\mathbf{P} = \left[ \mathbf{v}_1, \ldots, \mathbf{v}_4 \right]
$$
```
evals, evecs = np.linalg.eig(K_tilde)
evals
evecs
Lambda = np.diag(evals)
Lambda
P = evecs
```
# Exercise
Prove that the eigenvectors in $\mathbf{P}$ are orthonormal.
```
np.dot(P[:, 0], P[:, 1])
np.linalg.norm(P[:, 0])
P[:, 0].T @ P[:, 1]
P[:, 0].T @ P[:, 0]
```
An orthonormal matrix has the property that its transpose multiplied by itself is the identity matrix.
```
P.T @ P
```
# Exercise
Find the natural freqencies of the system in both radians per second and Hertz, store them in an array in the order of the eigenvalues with names `ws` and `fs`.
```
ws = np.sqrt(evals)
ws
fs = ws / 2 / np.pi
fs
```
# Exercise
Transform the eigenvectors back into the coordinate system associated with $\mathbf{x}$.
$$
\mathbf{S} = \left[ \mathbf{u}_1, \ldots, \mathbf{u}_4 \right]
$$
```
S = np.linalg.inv(L.T) @ P
S
sys.coordinates
```
# Exercise: visualize the modeshapes
The eigenmodes (mode shapes) are contained in each column of $\mathbf{S}$. Create a plot for each mode shape with these specifications:
- The title of each plot should be the frequency of the corresponding modeshape in Hz.
- The y axis should be made up of the values [0, 3, 6, 9, 12] meters.
- The x axis should plot the five values. The first should be zero and the remaining values should be the components of the mode shape in order of the component associated with the lowest floor to the highest.
- Plot lines with small circles at each data point.
```
S[:, 0]
np.hstack((0, S[:, 0]))
u1 = S[:, 0]
u1
u1[::-1]
S[:, 2]
fig, axes = plt.subplots(1, 4)
for i in range(4):
axes[i].plot(np.hstack((0, S[:, i])), [0, 3, 6, 9, 12], marker='o')
axes[i].set_title('{:1.2f} Hz'.format(fs[i]))
plt.tight_layout()
fs[0]
S[:, 0]
sys.coordinates['x1'] = S[0, 2]
sys.coordinates['x2'] = S[1, 2]
sys.coordinates['x3'] = S[2, 2]
sys.coordinates['x4'] = S[3, 2]
traj = sys.free_response(30, sample_rate=10)
traj[list(sys.coordinates.keys())].plot(subplots=True)
sys.animate_configuration(fps=10)
```
# Simulating the trajectory
The trajectory of building's coordinates can be found with:
$$
\mathbf{x}(t) = \sum_{i=1}^n c_i \sin(\omega_i t + \phi_i) \mathbf{u}_i
$$
where
$$
\phi_i = \arctan \frac{\omega_i \mathbf{v}_i^T \mathbf{q}_0}{\mathbf{v}_i^T \dot{\mathbf{q}}_0}
$$
and
$$
c_i = \frac{\mathbf{v}^T_i \mathbf{q}_0}{\sin\phi_i}
$$
$c_i$ are the modal participation factors and reflect what proportion of each mode is excited given specific initial conditions. If the initial conditions are the eigenmode, $\mathbf{u}_i$, the all but the $i$th $c_i$ will be zero.
# Exercise
Show that if $\mathbf{q}_0 = \mathbf{v}_i$ then $c_i = 1$ all other modal participation factors are 0. Also, report all of the phase angles, $\phi_i$, in degrees.
```
for i in range(4):
x0 = S[:, i]
xd0 = np.zeros(4)
print(x0)
q0 = L.T @ x0
qd0 = L.T @ xd0
phis = np.arctan2(ws * P.T @ q0, P.T @ xd0)
print(np.rad2deg(phis))
cs = P.T @ q0 / np.sin(phis)
print(cs)
print('=' * 40)
```
# Exercise
Create a function called `simulate()` that returns the trajectories of the coordinates given an array of monotonically increasing time values and the initial conditions of the system.
It should look like:
```python
def simulate(t, x0, xd0):
"""Returns the state trajectory.
Parameters
==========
t : ndarray, shape(m,)
Monotonic values of time.
x0 : ndarray, shape(n,)
The initial conditions of each coordinate.
xd0 : ndarray, shape(n,)
The initial conditions of each speed.
Returns
=======
x : ndarray, shape(m, n)
The trajectories of each state.
"""
# your code here
return x
```
```
def simulate(t, x0, xd0):
q0 = L.T @ x0
qd0 = L.T @ xd0
phis = np.arctan2(ws * P.T @ q0, P.T @ xd0)
cs = P.T @ q0 / np.sin(phis)
x = np.zeros((len(x0), len(t)))
for ci, wi, phii, ui in zip(cs, ws, phis, S.T):
x += ci * np.sin(wi * t + phii) * np.tile(ui, (len(t), 1)).T
return x
```
# Exercise
Using the plotting function below, show that the results found here are the same as the simulations from the `FourStoryBuildingSystem` given the same initial conditions.
```
def plot_trajectories(t, x):
fig, axes = plt.subplots(4, 1)
for i, ax in enumerate(axes.flatten()):
ax.plot(t, x[i])
ax.set_ylabel(r'$x_{}$ [m]'.format(i + 1))
ax.set_xlabel('Time [s]')
plt.tight_layout()
t = np.linspace(0, 50, num=50 * 60)
x0 = np.array([0.001, 0.010, 0.020, 0.025])
xd0 = np.zeros(4)
x = simulate(t, x0, xd0)
plot_trajectories(t, x)
```
This shows the plot of a single mode:
```
x = simulate(t, S[:, 0], np.zeros(4))
plot_trajectories(t, x)
```
|
github_jupyter
|
<!-- dom:TITLE: Week 2 January 11-15: Introduction to the course and start Variational Monte Carlo -->
# Week 2 January 11-15: Introduction to the course and start Variational Monte Carlo
<!-- dom:AUTHOR: Morten Hjorth-Jensen Email [email protected] at Department of Physics and Center fo Computing in Science Education, University of Oslo, Oslo, Norway & Department of Physics and Astronomy and Facility for Rare Ion Beams, Michigan State University, East Lansing, Michigan, USA -->
<!-- Author: -->
**Morten Hjorth-Jensen Email [email protected]**, Department of Physics and Center fo Computing in Science Education, University of Oslo, Oslo, Norway and Department of Physics and Astronomy and Facility for Rare Ion Beams, Michigan State University, East Lansing, Michigan, USA
Date: **Jan 14, 2021**
Copyright 1999-2021, Morten Hjorth-Jensen Email [email protected]. Released under CC Attribution-NonCommercial 4.0 license
## Overview of week 2
**Topics.**
* Introduction to the course and overview of topics to be covered
* Introduction to Variational Monte Carlo methods, Metropolis Algorithm, statistics and Markov Chain theory
**Teaching Material, videos and written material.**
* Asynchronuous vidoes
* Lecture notes and reading assignments
* Additional (often recommended) background material
## Textbook
There are no unique textbooks which cover the material to be discussed. For each week however, we will, in addition to our own lecture notes, send links to additional literature. This can be articles or chapters from other textbooks.
A useful textbook is however
* [Bernd A. Berg, *Markov Chain Monte Carlo Simulations and their Statistical Analysis*, World Scientific, 2004](https://www.worldscientific.com/worldscibooks/10.1142/5602), chapters 1, 2
This book has its main focus on spin-models, but many of the concepts are general. Chapters 1 and 2 contain a good discussion of the statistical foundation.
## Aims
* Be able to apply central many-particle methods like the Variational Monte Carlo method to properties of many-fermion systems and many-boson systems.
* Understand how to simulate quantum mechanical systems with many interacting particles. The methods are relevant for atomic, molecular, solid state, materials science, nanotechnology, quantum chemistry and nuclear physics.
* Learn to manage and structure larger projects, with unit tests, object orientation and writing clean code
* Learn about a proper statistical analysis of large data sets
* Learn to optimize with convex optimization methods functions that depend on many variables.
* Parallelization and code optimizations
## Lectures and ComputerLab
* Lectures: Thursday (2.15pm-4pm). First time January 14. Last lecture May 6.
* Computerlab: Thursday (4.15pm-7pm), first time January 14, last lab session May 6.
* Weekly plans and all other information are on the webpage of the course
* **First project to be handed in March 26**.
* **Second and final project to be handed in May 31.**
* There is no final exam, only project work.
## Course Format
* Two compulsory projects. Electronic reports only. You are free to choose your format. We use devilry to hand in the projects.
* Evaluation and grading: The two projects count 1/2 each of the final mark. No exam.
* The computer lab (room 397 in the Physics buidling) has no PCs, so please bring your own laptops. C/C++ is the default programming language, but programming languages like Fortran2008, Rust, Julia, and/or Python can also be used. All source codes discussed during the lectures can be found at the webpage of the course.
## Topics covered in this course
* Parallelization (MPI and OpenMP), high-performance computing topics. Choose between Python, Fortran2008 and/or C++ as programming languages.
* Algorithms for Monte Carlo Simulations (multidimensional integrals), Metropolis-Hastings and importance sampling algorithms. Improved Monte Carlo methods.
* Statistical analysis of data from Monte Carlo calculations, bootstrapping, jackknife and blocking methods.
* Eigenvalue solvers
* For project 2 there will be at least three variants:
a. Variational Monte Carlo for fermions
b. Hartree-Fock theory for fermions
c. Coupled cluster theory for fermions (iterative methods)
d. Neural networks and Machine Learning to solve the same problems as in project 1
e. Eigenvalue problems with deep learning methods
f. Possible project on quantum computing
## Topics covered in this course
* Search for minima in multidimensional spaces (conjugate gradient method, steepest descent method, quasi-Newton-Raphson, Broyden-Jacobian). Convex optimization, gradient methods
* Iterative methods for solutions of non-linear equations.
* Object orientation
* Data analysis and resampling techniques
* Variational Monte Carlo (VMC) for 'ab initio' studies of quantum mechanical many-body systems.
* Simulation of two- and three-dimensional systems like quantum dots or atoms and molecules or systems from solid state physics
* **Simulation of trapped bosons using VMC (project 1, default)**
* **Machine learning and neural networks (project 2, default, same system as in project 1)**
* Extension of project 1 to fermionic systems (project 2)
* Coupled cluster theory (project 2, depends on interest)
* Other quantum-mechanical methods and systems can be tailored to one's interests (Hartree-Fock Theory, Many-body perturbation theory, time-dependent theories and more).
## Quantum Monte Carlo Motivation
Most quantum mechanical problems of interest in for example atomic, molecular, nuclear and solid state
physics consist of a large number of interacting electrons and ions or nucleons.
The total number of particles $N$ is usually sufficiently large
that an exact solution cannot be found.
Typically,
the expectation value for a chosen hamiltonian for a system of $N$ particles is
$$
\langle H \rangle =
\frac{\int d\boldsymbol{R}_1d\boldsymbol{R}_2\dots d\boldsymbol{R}_N
\Psi^{\ast}(\boldsymbol{R_1},\boldsymbol{R}_2,\dots,\boldsymbol{R}_N)
H(\boldsymbol{R_1},\boldsymbol{R}_2,\dots,\boldsymbol{R}_N)
\Psi(\boldsymbol{R_1},\boldsymbol{R}_2,\dots,\boldsymbol{R}_N)}
{\int d\boldsymbol{R}_1d\boldsymbol{R}_2\dots d\boldsymbol{R}_N
\Psi^{\ast}(\boldsymbol{R_1},\boldsymbol{R}_2,\dots,\boldsymbol{R}_N)
\Psi(\boldsymbol{R_1},\boldsymbol{R}_2,\dots,\boldsymbol{R}_N)},
$$
an in general intractable problem.
This integral is actually the starting point in a Variational Monte Carlo calculation. **Gaussian quadrature: Forget it**! Given 10 particles and 10 mesh points for each degree of freedom
and an
ideal 1 Tflops machine (all operations take the same time), how long will it take to compute the above integral? The lifetime of the universe is of the order of $10^{17}$ s.
## Quantum Monte Carlo Motivation
As an example from the nuclear many-body problem, we have Schroedinger's equation as a differential equation
$$
\hat{H}\Psi(\boldsymbol{r}_1,..,\boldsymbol{r}_A,\alpha_1,..,\alpha_A)=E\Psi(\boldsymbol{r}_1,..,\boldsymbol{r}_A,\alpha_1,..,\alpha_A)
$$
where
$$
\boldsymbol{r}_1,..,\boldsymbol{r}_A,
$$
are the coordinates and
$$
\alpha_1,..,\alpha_A,
$$
are sets of relevant quantum numbers such as spin and isospin for a system of $A$ nucleons ($A=N+Z$, $N$ being the number of neutrons and $Z$ the number of protons).
## Quantum Monte Carlo Motivation
There are
$$
2^A\times \left(\begin{array}{c} A\\ Z\end{array}\right)
$$
coupled second-order differential equations in $3A$ dimensions.
For a nucleus like beryllium-10 this number is **215040**.
This is a truely challenging many-body problem.
Methods like partial differential equations can at most be used for 2-3 particles.
## Various many-body methods
* Monte-Carlo methods
* Renormalization group (RG) methods, in particular density matrix RG
* Large-scale diagonalization (Iterative methods, Lanczo's method, dimensionalities $10^{10}$ states)
* Coupled cluster theory, favoured method in quantum chemistry, molecular and atomic physics. Applications to ab initio calculations in nuclear physics as well for large nuclei.
* Perturbative many-body methods
* Green's function methods
* Density functional theory/Mean-field theory and Hartree-Fock theory
The physics of the system hints at which many-body methods to use.
## Quantum Monte Carlo Motivation
**Pros and Cons of Monte Carlo.**
* Is physically intuitive.
* Allows one to study systems with many degrees of freedom. Diffusion Monte Carlo (DMC) and Green's function Monte Carlo (GFMC) yield in principle the exact solution to Schroedinger's equation.
* Variational Monte Carlo (VMC) is easy to implement but needs a reliable trial wave function, can be difficult to obtain. This is where we will use Hartree-Fock theory to construct an optimal basis.
* DMC/GFMC for fermions (spin with half-integer values, electrons, baryons, neutrinos, quarks) has a sign problem. Nature prefers an anti-symmetric wave function. PDF in this case given distribution of random walkers.
* The solution has a statistical error, which can be large.
* There is a limit for how large systems one can study, DMC needs a huge number of random walkers in order to achieve stable results.
* Obtain only the lowest-lying states with a given symmetry. Can get excited states with extra labor.
## Quantum Monte Carlo Motivation
**Where and why do we use Monte Carlo Methods in Quantum Physics.**
* Quantum systems with many particles at finite temperature: Path Integral Monte Carlo with applications to dense matter and quantum liquids (phase transitions from normal fluid to superfluid). Strong correlations.
* Bose-Einstein condensation of dilute gases, method transition from non-linear PDE to Diffusion Monte Carlo as density increases.
* Light atoms, molecules, solids and nuclei.
* Lattice Quantum-Chromo Dynamics. Impossible to solve without MC calculations.
* Simulations of systems in solid state physics, from semiconductors to spin systems. Many electrons active and possibly strong correlations.
## Quantum Monte Carlo Motivation
We start with the variational principle.
Given a hamiltonian $H$ and a trial wave function $\Psi_T$, the variational principle states that the expectation value of $\langle H \rangle$, defined through
$$
E[H]= \langle H \rangle =
\frac{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})H(\boldsymbol{R})\Psi_T(\boldsymbol{R})}
{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})\Psi_T(\boldsymbol{R})},
$$
is an upper bound to the ground state energy $E_0$ of the hamiltonian $H$, that is
$$
E_0 \le \langle H \rangle .
$$
In general, the integrals involved in the calculation of various expectation values are multi-dimensional ones. Traditional integration methods such as the Gauss-Legendre will not be adequate for say the computation of the energy of a many-body system.
## Quantum Monte Carlo Motivation
The trial wave function can be expanded in the eigenstates of the hamiltonian since they form a complete set, viz.,
$$
\Psi_T(\boldsymbol{R})=\sum_i a_i\Psi_i(\boldsymbol{R}),
$$
and assuming the set of eigenfunctions to be normalized one obtains
$$
\frac{\sum_{nm}a^*_ma_n \int d\boldsymbol{R}\Psi^{\ast}_m(\boldsymbol{R})H(\boldsymbol{R})\Psi_n(\boldsymbol{R})}
{\sum_{nm}a^*_ma_n \int d\boldsymbol{R}\Psi^{\ast}_m(\boldsymbol{R})\Psi_n(\boldsymbol{R})} =\frac{\sum_{n}a^2_n E_n}
{\sum_{n}a^2_n} \ge E_0,
$$
where we used that $H(\boldsymbol{R})\Psi_n(\boldsymbol{R})=E_n\Psi_n(\boldsymbol{R})$.
In general, the integrals involved in the calculation of various expectation
values are multi-dimensional ones.
The variational principle yields the lowest state of a given symmetry.
## Quantum Monte Carlo Motivation
In most cases, a wave function has only small values in large parts of
configuration space, and a straightforward procedure which uses
homogenously distributed random points in configuration space
will most likely lead to poor results. This may suggest that some kind
of importance sampling combined with e.g., the Metropolis algorithm
may be a more efficient way of obtaining the ground state energy.
The hope is then that those regions of configurations space where
the wave function assumes appreciable values are sampled more
efficiently.
## Quantum Monte Carlo Motivation
The tedious part in a VMC calculation is the search for the variational
minimum. A good knowledge of the system is required in order to carry out
reasonable VMC calculations. This is not always the case,
and often VMC calculations
serve rather as the starting
point for so-called diffusion Monte Carlo calculations (DMC). DMC is a way of
solving exactly the many-body Schroedinger equation by means of
a stochastic procedure. A good guess on the binding energy
and its wave function is however necessary.
A carefully performed VMC calculation can aid in this context.
## Quantum Monte Carlo Motivation
* Construct first a trial wave function $\psi_T(\boldsymbol{R},\boldsymbol{\alpha})$, for a many-body system consisting of $N$ particles located at positions $\boldsymbol{R}=(\boldsymbol{R}_1,\dots ,\boldsymbol{R}_N)$. The trial wave function depends on $\alpha$ variational parameters $\boldsymbol{\alpha}=(\alpha_1,\dots ,\alpha_M)$.
* Then we evaluate the expectation value of the hamiltonian $H$
$$
E[H]=\langle H \rangle =
\frac{\int d\boldsymbol{R}\Psi^{\ast}_{T}(\boldsymbol{R},\boldsymbol{\alpha})H(\boldsymbol{R})\Psi_{T}(\boldsymbol{R},\boldsymbol{\alpha})}
{\int d\boldsymbol{R}\Psi^{\ast}_{T}(\boldsymbol{R},\boldsymbol{\alpha})\Psi_{T}(\boldsymbol{R},\boldsymbol{\alpha})}.
$$
* Thereafter we vary $\alpha$ according to some minimization algorithm and return to the first step.
## Quantum Monte Carlo Motivation
**Basic steps.**
Choose a trial wave function
$\psi_T(\boldsymbol{R})$.
$$
P(\boldsymbol{R})= \frac{\left|\psi_T(\boldsymbol{R})\right|^2}{\int \left|\psi_T(\boldsymbol{R})\right|^2d\boldsymbol{R}}.
$$
This is our new probability distribution function (PDF).
The approximation to the expectation value of the Hamiltonian is now
$$
E[H(\boldsymbol{\alpha})] =
\frac{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R},\boldsymbol{\alpha})H(\boldsymbol{R})\Psi_T(\boldsymbol{R},\boldsymbol{\alpha})}
{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R},\boldsymbol{\alpha})\Psi_T(\boldsymbol{R},\boldsymbol{\alpha})}.
$$
## Quantum Monte Carlo Motivation
Define a new quantity
<!-- Equation labels as ordinary links -->
<div id="eq:locale1"></div>
$$
E_L(\boldsymbol{R},\boldsymbol{\alpha})=\frac{1}{\psi_T(\boldsymbol{R},\boldsymbol{\alpha})}H\psi_T(\boldsymbol{R},\boldsymbol{\alpha}),
\label{eq:locale1} \tag{1}
$$
called the local energy, which, together with our trial PDF yields
<!-- Equation labels as ordinary links -->
<div id="eq:vmc1"></div>
$$
E[H(\boldsymbol{\alpha})]=\int P(\boldsymbol{R})E_L(\boldsymbol{R}) d\boldsymbol{R}\approx \frac{1}{N}\sum_{i=1}^N E_L(\boldsymbol{R_i},\boldsymbol{\alpha})
\label{eq:vmc1} \tag{2}
$$
with $N$ being the number of Monte Carlo samples.
## Quantum Monte Carlo
The Algorithm for performing a variational Monte Carlo calculations runs thus as this
* Initialisation: Fix the number of Monte Carlo steps. Choose an initial $\boldsymbol{R}$ and variational parameters $\alpha$ and calculate $\left|\psi_T^{\alpha}(\boldsymbol{R})\right|^2$.
* Initialise the energy and the variance and start the Monte Carlo calculation.
* Calculate a trial position $\boldsymbol{R}_p=\boldsymbol{R}+r*step$ where $r$ is a random variable $r \in [0,1]$.
* Metropolis algorithm to accept or reject this move $w = P(\boldsymbol{R}_p)/P(\boldsymbol{R})$.
* If the step is accepted, then we set $\boldsymbol{R}=\boldsymbol{R}_p$.
* Update averages
* Finish and compute final averages.
Observe that the jumping in space is governed by the variable *step*. This is Called brute-force sampling.
Need importance sampling to get more relevant sampling, see lectures below.
## Quantum Monte Carlo: hydrogen atom
The radial Schroedinger equation for the hydrogen atom can be
written as
$$
-\frac{\hbar^2}{2m}\frac{\partial^2 u(r)}{\partial r^2}-
\left(\frac{ke^2}{r}-\frac{\hbar^2l(l+1)}{2mr^2}\right)u(r)=Eu(r),
$$
or with dimensionless variables
<!-- Equation labels as ordinary links -->
<div id="eq:hydrodimless1"></div>
$$
-\frac{1}{2}\frac{\partial^2 u(\rho)}{\partial \rho^2}-
\frac{u(\rho)}{\rho}+\frac{l(l+1)}{2\rho^2}u(\rho)-\lambda u(\rho)=0,
\label{eq:hydrodimless1} \tag{3}
$$
with the hamiltonian
$$
H=-\frac{1}{2}\frac{\partial^2 }{\partial \rho^2}-
\frac{1}{\rho}+\frac{l(l+1)}{2\rho^2}.
$$
Use variational parameter $\alpha$ in the trial
wave function
<!-- Equation labels as ordinary links -->
<div id="eq:trialhydrogen"></div>
$$
u_T^{\alpha}(\rho)=\alpha\rho e^{-\alpha\rho}.
\label{eq:trialhydrogen} \tag{4}
$$
## Quantum Monte Carlo: hydrogen atom
Inserting this wave function into the expression for the
local energy $E_L$ gives
$$
E_L(\rho)=-\frac{1}{\rho}-
\frac{\alpha}{2}\left(\alpha-\frac{2}{\rho}\right).
$$
A simple variational Monte Carlo calculation results in
<table border="1">
<thead>
<tr><th align="center"> $\alpha$ </th> <th align="center">$\langle H \rangle $</th> <th align="center"> $\sigma^2$</th> <th align="center">$\sigma/\sqrt{N}$</th> </tr>
</thead>
<tbody>
<tr><td align="center"> 7.00000E-01 </td> <td align="center"> -4.57759E-01 </td> <td align="center"> 4.51201E-02 </td> <td align="center"> 6.71715E-04 </td> </tr>
<tr><td align="center"> 8.00000E-01 </td> <td align="center"> -4.81461E-01 </td> <td align="center"> 3.05736E-02 </td> <td align="center"> 5.52934E-04 </td> </tr>
<tr><td align="center"> 9.00000E-01 </td> <td align="center"> -4.95899E-01 </td> <td align="center"> 8.20497E-03 </td> <td align="center"> 2.86443E-04 </td> </tr>
<tr><td align="center"> 1.00000E-00 </td> <td align="center"> -5.00000E-01 </td> <td align="center"> 0.00000E+00 </td> <td align="center"> 0.00000E+00 </td> </tr>
<tr><td align="center"> 1.10000E+00 </td> <td align="center"> -4.93738E-01 </td> <td align="center"> 1.16989E-02 </td> <td align="center"> 3.42036E-04 </td> </tr>
<tr><td align="center"> 1.20000E+00 </td> <td align="center"> -4.75563E-01 </td> <td align="center"> 8.85899E-02 </td> <td align="center"> 9.41222E-04 </td> </tr>
<tr><td align="center"> 1.30000E+00 </td> <td align="center"> -4.54341E-01 </td> <td align="center"> 1.45171E-01 </td> <td align="center"> 1.20487E-03 </td> </tr>
</tbody>
</table>
## Quantum Monte Carlo: hydrogen atom
We note that at $\alpha=1$ we obtain the exact
result, and the variance is zero, as it should. The reason is that
we then have the exact wave function, and the action of the hamiltionan
on the wave function
$$
H\psi = \mathrm{constant}\times \psi,
$$
yields just a constant. The integral which defines various
expectation values involving moments of the hamiltonian becomes then
$$
\langle H^n \rangle =
\frac{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})H^n(\boldsymbol{R})\Psi_T(\boldsymbol{R})}
{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})\Psi_T(\boldsymbol{R})}=
\mathrm{constant}\times\frac{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})\Psi_T(\boldsymbol{R})}
{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})\Psi_T(\boldsymbol{R})}=\mathrm{constant}.
$$
**This gives an important information: the exact wave function leads to zero variance!**
Variation is then performed by minimizing both the energy and the variance.
## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)
For bosons in a harmonic oscillator-like trap we will use is a spherical (S)
or an elliptical (E) harmonic trap in one, two and finally three
dimensions, with the latter given by
<!-- Equation labels as ordinary links -->
<div id="trap_eqn"></div>
$$
\begin{equation}
V_{ext}(\mathbf{r}) = \Bigg\{
\begin{array}{ll}
\frac{1}{2}m\omega_{ho}^2r^2 & (S)\\
\strut
\frac{1}{2}m[\omega_{ho}^2(x^2+y^2) + \omega_z^2z^2] & (E)
\label{trap_eqn} \tag{5}
\end{array}
\end{equation}
$$
where (S) stands for symmetric and
<!-- Equation labels as ordinary links -->
<div id="_auto1"></div>
$$
\begin{equation}
\hat{H} = \sum_i^N \left(
\frac{-\hbar^2}{2m}
{ \bigtriangledown }_{i}^2 +
V_{ext}({\bf{r}}_i)\right) +
\sum_{i<j}^{N} V_{int}({\bf{r}}_i,{\bf{r}}_j),
\label{_auto1} \tag{6}
\end{equation}
$$
as the two-body Hamiltonian of the system.
## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)
We will represent the inter-boson interaction by a pairwise, repulsive potential
<!-- Equation labels as ordinary links -->
<div id="_auto2"></div>
$$
\begin{equation}
V_{int}(|\mathbf{r}_i-\mathbf{r}_j|) = \Bigg\{
\begin{array}{ll}
\infty & {|\mathbf{r}_i-\mathbf{r}_j|} \leq {a}\\
0 & {|\mathbf{r}_i-\mathbf{r}_j|} > {a}
\end{array}
\label{_auto2} \tag{7}
\end{equation}
$$
where $a$ is the so-called hard-core diameter of the bosons.
Clearly, $V_{int}(|\mathbf{r}_i-\mathbf{r}_j|)$ is zero if the bosons are
separated by a distance $|\mathbf{r}_i-\mathbf{r}_j|$ greater than $a$ but
infinite if they attempt to come within a distance $|\mathbf{r}_i-\mathbf{r}_j| \leq a$.
## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)
Our trial wave function for the ground state with $N$ atoms is given by
<!-- Equation labels as ordinary links -->
<div id="eq:trialwf"></div>
$$
\begin{equation}
\Psi_T(\mathbf{R})=\Psi_T(\mathbf{r}_1, \mathbf{r}_2, \dots \mathbf{r}_N,\alpha,\beta)=\prod_i g(\alpha,\beta,\mathbf{r}_i)\prod_{i<j}f(a,|\mathbf{r}_i-\mathbf{r}_j|),
\label{eq:trialwf} \tag{8}
\end{equation}
$$
where $\alpha$ and $\beta$ are variational parameters. The
single-particle wave function is proportional to the harmonic
oscillator function for the ground state
<!-- Equation labels as ordinary links -->
<div id="_auto3"></div>
$$
\begin{equation}
g(\alpha,\beta,\mathbf{r}_i)= \exp{[-\alpha(x_i^2+y_i^2+\beta z_i^2)]}.
\label{_auto3} \tag{9}
\end{equation}
$$
## [Quantum Monte Carlo for bosons](https://github.com/mortele/variational-monte-carlo-fys4411)
For spherical traps we have $\beta = 1$ and for non-interacting
bosons ($a=0$) we have $\alpha = 1/2a_{ho}^2$. The correlation wave
function is
<!-- Equation labels as ordinary links -->
<div id="_auto4"></div>
$$
\begin{equation}
f(a,|\mathbf{r}_i-\mathbf{r}_j|)=\Bigg\{
\begin{array}{ll}
0 & {|\mathbf{r}_i-\mathbf{r}_j|} \leq {a}\\
(1-\frac{a}{|\mathbf{r}_i-\mathbf{r}_j|}) & {|\mathbf{r}_i-\mathbf{r}_j|} > {a}.
\end{array}
\label{_auto4} \tag{10}
\end{equation}
$$
### Simple example, the hydrogen atom
The radial Schroedinger equation for the hydrogen atom can be
written as (when we have gotten rid of the first derivative term in the kinetic energy and used $rR(r)=u(r)$)
$$
-\frac{\hbar^2}{2m}\frac{d^2 u(r)}{d r^2}-
\left(\frac{ke^2}{r}-\frac{\hbar^2l(l+1)}{2mr^2}\right)u(r)=Eu(r).
$$
We will specialize to the case with $l=0$ and end up with
$$
-\frac{\hbar^2}{2m}\frac{d^2 u(r)}{d r^2}-
\left(\frac{ke^2}{r}\right)u(r)=Eu(r).
$$
Then we introduce a dimensionless variable $\rho=r/a$ where $a$ is a constant with dimension length.
Multiplying with $ma^2/\hbar^2$ we can rewrite our equations as
$$
-\frac{1}{2}\frac{d^2 u(\rho)}{d \rho^2}-
\frac{ke^2ma}{\hbar^2}\frac{u(\rho)}{\rho}-\lambda u(\rho)=0.
$$
Since $a$ is just a parameter we choose to set
$$
\frac{ke^2ma}{\hbar^2}=1,
$$
which leads to $a=\hbar^2/mke^2$, better known as the Bohr radius with value $0.053$ nm. Scaling the equations this way does not only render our numerical treatment simpler since we avoid carrying with us all physical parameters, but we obtain also a **natural** length scale. We will see this again and again. In our discussions below with a harmonic oscillator trap, the **natural** lentgh scale with be determined by the oscillator frequency, the mass of the particle and $\hbar$. We have also defined a dimensionless 'energy' $\lambda = Ema^2/\hbar^2$.
With the rescaled quantities, the ground state energy of the hydrogen atom is $1/2$.
The equation we want to solve is now defined by the Hamiltonian
$$
H=-\frac{1}{2}\frac{d^2 }{d \rho^2}-\frac{1}{\rho}.
$$
As trial wave function we peep now into the analytical solution for
the hydrogen atom and use (with $\alpha$ as a variational parameter)
$$
u_T^{\alpha}(\rho)=\alpha\rho \exp{-(\alpha\rho)}.
$$
Inserting this wave function into the expression for the
local energy $E_L$ gives
$$
E_L(\rho)=-\frac{1}{\rho}-
\frac{\alpha}{2}\left(\alpha-\frac{2}{\rho}\right).
$$
To have analytical local energies saves us from computing numerically
the second derivative, a feature which often increases our numerical
expenditure with a factor of three or more. Integratng up the local energy (recall to bring back the PDF in the integration) gives $\overline{E}[\boldsymbol{\alpha}]=\alpha(\alpha/2-1)$.
### Second example, the harmonic oscillator in one dimension
We present here another well-known example, the harmonic oscillator in
one dimension for one particle. This will also serve the aim of
introducing our next model, namely that of interacting electrons in a
harmonic oscillator trap.
Here as well, we do have analytical solutions and the energy of the
ground state, with $\hbar=1$, is $1/2\omega$, with $\omega$ being the
oscillator frequency. We use the following trial wave function
$$
\psi_T(x;\alpha) = \exp{-(\frac{1}{2}\alpha^2x^2)},
$$
which results in a local energy
$$
\frac{1}{2}\left(\alpha^2+x^2(1-\alpha^4)\right).
$$
We can compare our numerically calculated energies with the exact energy as function of $\alpha$
$$
\overline{E}[\alpha] = \frac{1}{4}\left(\alpha^2+\frac{1}{\alpha^2}\right).
$$
Similarly, with the above ansatz, we can also compute the exact variance which reads
$$
\sigma^2[\alpha]=\frac{1}{4}\left(1+(1-\alpha^4)^2\frac{3}{4\alpha^4}\right)-\overline{E}.
$$
Our code for computing the energy of the ground state of the harmonic oscillator follows here. We start by defining directories where we store various outputs.
```
# Common imports
import os
# Where to save the figures and data files
PROJECT_ROOT_DIR = "Results"
FIGURE_ID = "Results/FigureFiles"
DATA_ID = "Results/VMCHarmonic"
if not os.path.exists(PROJECT_ROOT_DIR):
os.mkdir(PROJECT_ROOT_DIR)
if not os.path.exists(FIGURE_ID):
os.makedirs(FIGURE_ID)
if not os.path.exists(DATA_ID):
os.makedirs(DATA_ID)
def image_path(fig_id):
return os.path.join(FIGURE_ID, fig_id)
def data_path(dat_id):
return os.path.join(DATA_ID, dat_id)
def save_fig(fig_id):
plt.savefig(image_path(fig_id) + ".png", format='png')
outfile = open(data_path("VMCHarmonic.dat"),'w')
```
We proceed with the implementation of the Monte Carlo algorithm but list first the ansatz for the wave function and the expression for the local energy
```
%matplotlib inline
# VMC for the one-dimensional harmonic oscillator
# Brute force Metropolis, no importance sampling and no energy minimization
from math import exp, sqrt
from random import random, seed
import numpy as np
import matplotlib.pyplot as plt
from numba import jit
from decimal import *
# Trial wave function for the Harmonic oscillator in one dimension
def WaveFunction(r,alpha):
return exp(-0.5*alpha*alpha*r*r)
# Local energy for the Harmonic oscillator in one dimension
def LocalEnergy(r,alpha):
return 0.5*r*r*(1-alpha**4) + 0.5*alpha*alpha
```
Note that in the Metropolis algorithm there is no need to compute the
trial wave function, mainly since we are just taking the ratio of two
exponentials. It is then from a computational point view, more
convenient to compute the argument from the ratio and then calculate
the exponential. Here we have refrained from this purely of
pedagogical reasons.
```
# The Monte Carlo sampling with the Metropolis algo
# The jit decorator tells Numba to compile this function.
# The argument types will be inferred by Numba when the function is called.
def MonteCarloSampling():
NumberMCcycles= 100000
StepSize = 1.0
# positions
PositionOld = 0.0
PositionNew = 0.0
# seed for rng generator
seed()
# start variational parameter
alpha = 0.4
for ia in range(MaxVariations):
alpha += .05
AlphaValues[ia] = alpha
energy = energy2 = 0.0
#Initial position
PositionOld = StepSize * (random() - .5)
wfold = WaveFunction(PositionOld,alpha)
#Loop over MC MCcycles
for MCcycle in range(NumberMCcycles):
#Trial position
PositionNew = PositionOld + StepSize*(random() - .5)
wfnew = WaveFunction(PositionNew,alpha)
#Metropolis test to see whether we accept the move
if random() <= wfnew**2 / wfold**2:
PositionOld = PositionNew
wfold = wfnew
DeltaE = LocalEnergy(PositionOld,alpha)
energy += DeltaE
energy2 += DeltaE**2
#We calculate mean, variance and error
energy /= NumberMCcycles
energy2 /= NumberMCcycles
variance = energy2 - energy**2
error = sqrt(variance/NumberMCcycles)
Energies[ia] = energy
Variances[ia] = variance
outfile.write('%f %f %f %f \n' %(alpha,energy,variance,error))
return Energies, AlphaValues, Variances
```
Finally, the results are presented here with the exact energies and variances as well.
```
#Here starts the main program with variable declarations
MaxVariations = 20
Energies = np.zeros((MaxVariations))
ExactEnergies = np.zeros((MaxVariations))
ExactVariance = np.zeros((MaxVariations))
Variances = np.zeros((MaxVariations))
AlphaValues = np.zeros(MaxVariations)
(Energies, AlphaValues, Variances) = MonteCarloSampling()
outfile.close()
ExactEnergies = 0.25*(AlphaValues*AlphaValues+1.0/(AlphaValues*AlphaValues))
ExactVariance = 0.25*(1.0+((1.0-AlphaValues**4)**2)*3.0/(4*(AlphaValues**4)))-ExactEnergies*ExactEnergies
#simple subplot
plt.subplot(2, 1, 1)
plt.plot(AlphaValues, Energies, 'o-',AlphaValues, ExactEnergies,'r-')
plt.title('Energy and variance')
plt.ylabel('Dimensionless energy')
plt.subplot(2, 1, 2)
plt.plot(AlphaValues, Variances, '.-',AlphaValues, ExactVariance,'r-')
plt.xlabel(r'$\alpha$', fontsize=15)
plt.ylabel('Variance')
save_fig("VMCHarmonic")
plt.show()
#nice printout with Pandas
import pandas as pd
from pandas import DataFrame
data ={'Alpha':AlphaValues, 'Energy':Energies,'Exact Energy':ExactEnergies,'Variance':Variances,'Exact Variance':ExactVariance,}
frame = pd.DataFrame(data)
print(frame)
```
For $\alpha=1$ we have the exact eigenpairs, as can be deduced from the
table here. With $\omega=1$, the exact energy is $1/2$ a.u. with zero
variance, as it should. We see also that our computed variance follows rather well the exact variance.
Increasing the number of Monte Carlo cycles will improve our statistics (try to increase the number of Monte Carlo cycles).
The fact that the variance is exactly equal to zero when $\alpha=1$ is that
we then have the exact wave function, and the action of the hamiltionan
on the wave function
$$
H\psi = \mathrm{constant}\times \psi,
$$
yields just a constant. The integral which defines various
expectation values involving moments of the hamiltonian becomes then
$$
\langle H^n \rangle =
\frac{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})H^n(\boldsymbol{R})\Psi_T(\boldsymbol{R})}
{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})\Psi_T(\boldsymbol{R})}=
\mathrm{constant}\times\frac{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})\Psi_T(\boldsymbol{R})}
{\int d\boldsymbol{R}\Psi^{\ast}_T(\boldsymbol{R})\Psi_T(\boldsymbol{R})}=\mathrm{constant}.
$$
**This gives an important information: the exact wave function leads to zero variance!**
As we will see below, many practitioners perform a minimization on both the energy and the variance.
|
github_jupyter
|
## Step 1: Import Libraries
```
# All imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno
import seaborn as sns
from sklearn.feature_selection import SelectKBest, f_regression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.filterwarnings('ignore')
# List all the files
for dir_name, _, file_names in os.walk('data'):
for file_name in file_names:
print(os.path.join(dir_name, file_name))
```
## Step 2: Reading the Data
```
data_vw = pd.read_csv("data/vw.csv")
data_vw.shape
data_vw.head()
data_vw.describe()
missingno.matrix(data_vw)
data_vw.isnull().sum()
```
## Step 3: EDA
```
categorical_features = [feature for feature in data_vw.columns if data_vw[feature].dtype == 'O']
# Getting the count plot
for feature in categorical_features:
sns.countplot(y=data_vw[feature])
plt.show()
# Getting the barplot
plt.figure(figsize=(10,5), facecolor='w')
sns.barplot(x=data_vw['year'], y=data_vw['price'])
sns.barplot(x=data_vw['transmission'], y=data_vw['price'])
# Getting the relation b/w milleage and price
plt.figure(figsize=(10, 6))
sns.scatterplot(x=data_vw['mileage'], y=data_vw['price'], hue=data_vw['year'])
plt.figure(figsize=(5,5))
sns.scatterplot(x=data_vw['mileage'], y=data_vw['price'], hue=data_vw['transmission'])
plt.figure(figsize=(10,10))
sns.pairplot(data_vw)
```
## Step 4: Feature Engineering
```
data_vw.head()
```
Dropping the year column, but instead will create data on how old the car is
```
data_vw['age_of_car'] = 2020 - data_vw['year']
data_vw.drop(['year'], axis=1, inplace=True)
# Look at the frequency of the ages
sns.countplot(y=data_vw['age_of_car'])
# OHE the categorical variables
data_vw_extended = pd.get_dummies(data_vw)
data_vw_extended.shape
sc = StandardScaler()
data_vw_extended = pd.DataFrame(sc.fit_transform(data_vw_extended), columns=data_vw_extended.columns)
data_vw_extended.head()
X_train, X_test, y_train, y_test = train_test_split(data_vw_extended.drop(['price'], axis=1), data_vw_extended[['price']])
X_train.shape, X_test.shape, y_train.shape, y_test.shape
```
## Step 5: Feature Selection
```
# Select the k best features
no_of_features = []
r_2_train = []
r_2_test = []
for k in range(3, 40, 2):
selector = SelectKBest(f_regression, k=k)
X_train_selector = selector.fit_transform(X_train, y_train)
X_test_selector = selector.transform(X_test)
lin_reg = LinearRegression()
lin_reg.fit(X_train_selector, y_train)
no_of_features.append(k)
r_2_train.append(lin_reg.score(X_train_selector, y_train))
r_2_test.append(lin_reg.score(X_test_selector, y_test))
sns.lineplot(x=no_of_features, y=r_2_train)
sns.lineplot(x=no_of_features, y=r_2_test)
```
k=23 is providing us the best optimal result. Hence training the model on 23
```
selector = SelectKBest(f_regression, k=23)
X_train_selector = selector.fit_transform(X_train, y_train)
X_test_selector = selector.transform(X_test)
column_name = data_vw_extended.drop(['price'], axis=1).columns
column_name[selector.get_support()]
```
## Step 6: Model
```
def regressor_builder(model):
regressor = model
regressor.fit(X_train_selector, y_train)
score = regressor.score(X_test_selector, y_test)
return regressor, score
list_models = [LinearRegression(), Lasso(), Ridge(), SVR(), RandomForestRegressor(), MLPRegressor()]
model_performance = pd.DataFrame(columns=['Features', 'Model', 'Performance'])
for model in list_models:
regressor, score = regressor_builder(model)
model_performance = model_performance.append({"Feature": "Linear", "Model": regressor, "Performance": score}, ignore_index=True)
model_performance
```
Randomforest provides the best r^2
|
github_jupyter
|
```
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# E2E ML on GCP: MLOps stage 2 : experimentation: get started with Vertex Training for Scikit-Learn
<table align="left">
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/get_started_vertex_training_sklearn.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/main/notebooks/community/ml_ops/stage2/get_started_vertex_training_sklearn.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
<br/><br/><br/>
## Overview
This tutorial demonstrates how to use Vertex AI for E2E MLOps on Google Cloud in production. This tutorial covers stage 2 : experimentation: get started with Vertex Training for Scikit-Learn.
### Dataset
The dataset used for this tutorial is the [News Aggregation](https://archive.ics.uci.edu/ml/datasets/News+Aggregator) from [ICS Machine Learning Datasets](https://archive.ics.uci.edu/ml/datasets.php). The trained model predicts the news category of the news article.
### Objective
In this tutorial, you learn how to use `Vertex AI Training` for training a Scikit-Learn custom model.
This tutorial uses the following Google Cloud ML services:
- `Vertex AI Training`
- `Vertex AI Model` resource
The steps performed include:
- Training using a Python package.
- Report accuracy when hyperparameter tuning.
- Save the model artifacts to Cloud Storage using GCSFuse.
- Create a `Vertex AI Model` resource.
## Installations
Install *one time* the packages for executing the MLOps notebooks.
```
ONCE_ONLY = False
if ONCE_ONLY:
! pip3 install -U tensorflow==2.5 $USER_FLAG
! pip3 install -U tensorflow-data-validation==1.2 $USER_FLAG
! pip3 install -U tensorflow-transform==1.2 $USER_FLAG
! pip3 install -U tensorflow-io==0.18 $USER_FLAG
! pip3 install --upgrade google-cloud-aiplatform[tensorboard] $USER_FLAG
! pip3 install --upgrade google-cloud-pipeline-components $USER_FLAG
! pip3 install --upgrade google-cloud-bigquery $USER_FLAG
! pip3 install --upgrade google-cloud-logging $USER_FLAG
! pip3 install --upgrade apache-beam[gcp] $USER_FLAG
! pip3 install --upgrade pyarrow $USER_FLAG
! pip3 install --upgrade cloudml-hypertune $USER_FLAG
! pip3 install --upgrade kfp $USER_FLAG
! pip3 install --upgrade torchvision $USER_FLAG
! pip3 install --upgrade rpy2 $USER_FLAG
```
### Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
```
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
#### Set your project ID
**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
```
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
```
#### Region
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
- Americas: `us-central1`
- Europe: `europe-west4`
- Asia Pacific: `asia-east1`
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations).
```
REGION = "us-central1" # @param {type: "string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Create a Cloud Storage bucket
**The following steps are required, regardless of your notebook environment.**
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Set up variables
Next, set up some variables used throughout the tutorial.
### Import libraries and define constants
```
import google.cloud.aiplatform as aip
```
### Initialize Vertex AI SDK for Python
Initialize the Vertex AI SDK for Python for your project and corresponding bucket.
```
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
```
#### Set hardware accelerators
You can set hardware accelerators for training and prediction.
Set the variables `TRAIN_GPU/TRAIN_NGPU` and `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
(aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
Otherwise specify `(None, None)` to use a container image to run on a CPU.
Learn more about [hardware accelerator support for your region](https://cloud.google.com/vertex-ai/docs/general/locations#accelerators).
*Note*: TF releases before 2.3 for GPU support will fail to load the custom model in this tutorial. It is a known issue and fixed in TF 2.3. This is caused by static graph ops that are generated in the serving function. If you encounter this issue on your own custom models, use a container image for TF 2.3 with GPU support.
```
if os.getenv("IS_TESTING_TRAIN_GPU"):
TRAIN_GPU, TRAIN_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_TRAIN_GPU")),
)
else:
TRAIN_GPU, TRAIN_NGPU = (None, None)
if os.getenv("IS_TESTING_DEPLOY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.gapic.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPLOY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (None, None)
```
#### Set pre-built containers
Set the pre-built Docker container image for training and prediction.
For the latest list, see [Pre-built containers for training](https://cloud.google.com/ai-platform-unified/docs/training/pre-built-containers).
For the latest list, see [Pre-built containers for prediction](https://cloud.google.com/ai-platform-unified/docs/predictions/pre-built-containers).
```
TRAIN_VERSION = "scikit-learn-cpu.0-23"
DEPLOY_VERSION = "sklearn-cpu.0-23"
TRAIN_IMAGE = "{}-docker.pkg.dev/vertex-ai/training/{}:latest".format(
REGION.split("-")[0], TRAIN_VERSION
)
DEPLOY_IMAGE = "{}-docker.pkg.dev/vertex-ai/prediction/{}:latest".format(
REGION.split("-")[0], DEPLOY_VERSION
)
```
#### Set machine type
Next, set the machine type to use for training.
- Set the variable `TRAIN_COMPUTE` to configure the compute resources for the VMs you will use for for training.
- `machine type`
- `n1-standard`: 3.75GB of memory per vCPU.
- `n1-highmem`: 6.5GB of memory per vCPU
- `n1-highcpu`: 0.9 GB of memory per vCPU
- `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
*Note: The following is not supported for training:*
- `standard`: 2 vCPUs
- `highcpu`: 2, 4 and 8 vCPUs
*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*.
```
if os.getenv("IS_TESTING_TRAIN_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_TRAIN_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
TRAIN_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Train machine type", TRAIN_COMPUTE)
```
## Introduction to Scikit-learn training
Once you have trained a Scikit-learn model, you will want to save it at a Cloud Storage location, so it can subsequently be uploaded to a `Vertex AI Model` resource. The Scikit-learn package does not have support to save the model to a Cloud Storage location. Instead, you will do the following steps to save to a Cloud Storage location.
1. Save the in-memory model to the local filesystem in pickle format (e.g., model.pkl).
2. Create a Cloud Storage storage client.
3. Upload the pickle file as a blob to the specified Cloud Storage location using the Cloud Storage storage client.
*Note*: You can do hyperparameter tuning with a Scikit-learn model.
### Examine the training package
#### Package layout
Before you start the training, you will look at how a Python package is assembled for a custom training job. When unarchived, the package contains the following directory/file layout.
- PKG-INFO
- README.md
- setup.cfg
- setup.py
- trainer
- \_\_init\_\_.py
- task.py
The files `setup.cfg` and `setup.py` are the instructions for installing the package into the operating environment of the Docker image.
The file `trainer/task.py` is the Python script for executing the custom training job. *Note*, when we referred to it in the worker pool specification, we replace the directory slash with a dot (`trainer.task`) and dropped the file suffix (`.py`).
#### Package Assembly
In the following cells, you will assemble the training package.
```
# Make folder for Python training script
! rm -rf custom
! mkdir custom
# Add package information
! touch custom/README.md
setup_cfg = "[egg_info]\n\ntag_build =\n\ntag_date = 0"
! echo "$setup_cfg" > custom/setup.cfg
setup_py = "import setuptools\n\nsetuptools.setup(\n\n install_requires=[\n\n 'wget',\n\n 'cloudml-hypertune',\n\n ],\n\n packages=setuptools.find_packages())"
! echo "$setup_py" > custom/setup.py
pkg_info = "Metadata-Version: 1.0\n\nName: News Aggregation text classification\n\nVersion: 0.0.0\n\nSummary: Demostration training script\n\nHome-page: www.google.com\n\nAuthor: Google\n\nAuthor-email: [email protected]\n\nLicense: Public\n\nDescription: Demo\n\nPlatform: Vertex"
! echo "$pkg_info" > custom/PKG-INFO
# Make the training subfolder
! mkdir custom/trainer
! touch custom/trainer/__init__.py
```
### Create the task script for the Python training package
Next, you create the `task.py` script for driving the training package. Some noteable steps include:
- Command-line arguments:
- `model-dir`: The location to save the trained model. When using Vertex AI custom training, the location will be specified in the environment variable: `AIP_MODEL_DIR`,
- `dataset_url`: The location of the dataset to download.
- `alpha`: Hyperparameter
- Data preprocessing (`get_data()`):
- Download the dataset and split into training and test.
- Model architecture (`get_model()`):
- Builds the corresponding model architecture.
- Training (`train_model()`):
- Trains the model
- Evaluation (`evaluate_model()`):
- Evaluates the model.
- If hyperparameter tuning, reports the metric for accuracy.
- Model artifact saving
- Saves the model artifacts and evaluation metrics where the Cloud Storage location specified by `model-dir`.
- *Note*: GCSFuse (`/gcs`) is used to do filesystem operations on Cloud Storage buckets.
```
%%writefile custom/trainer/task.py
import argparse
import logging
import os
import pickle
import zipfile
from typing import List, Tuple
import pandas as pd
import wget
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
import hypertune
parser = argparse.ArgumentParser()
parser.add_argument('--model-dir', dest='model_dir',
default=os.getenv('AIP_MODEL_DIR'), type=str, help='Model dir.')
parser.add_argument("--dataset-url", dest="dataset_url",
type=str, help="Download url for the training data.")
parser.add_argument('--alpha', dest='alpha',
default=1.0, type=float,
help='Alpha parameters for MultinomialNB')
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
def get_data(url: str, test_size: float = 0.2) -> Tuple[List, List, List, List]:
logging.info("Downloading training data from: {}".format(args.dataset_url))
zip_filepath = wget.download(url, out=".")
with zipfile.ZipFile(zip_filepath, "r") as zf:
zf.extract(path=".", member="newsCorpora.csv")
COLUMN_NAMES = ["id", "title", "url", "publisher",
"category", "story", "hostname", "timestamp"]
dataframe = pd.read_csv(
"newsCorpora.csv", delimiter=" ", names=COLUMN_NAMES, index_col=0
)
train, test = train_test_split(dataframe, test_size=test_size)
x_train, y_train = train["title"].values, train["category"].values
x_test, y_test = test["title"].values, test["category"].values
return x_train, y_train, x_test, y_test
def get_model():
logging.info("Build model ...")
model = Pipeline([
("vectorizer", CountVectorizer()),
("tfidf", TfidfTransformer()),
("naivebayes", MultinomialNB(alpha=args.alpha)),
])
return model
def train_model(model: Pipeline, X_train: List, y_train: List, X_test: List, y_test: List
) -> Pipeline:
logging.info("Training started ...")
model.fit(X_train, y_train)
logging.info("Training completed")
return model
def evaluate_model(model: Pipeline, X_train: List, y_train: List, X_test: List, y_test: List
) -> float:
score = model.score(X_test, y_test)
logging.info(f"Evaluation completed with model score: {score}")
# report metric for hyperparameter tuning
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=score
)
return score
def export_model_to_gcs(fitted_pipeline: Pipeline, gcs_uri: str) -> str:
"""Exports trained pipeline to GCS
Parameters:
fitted_pipeline (sklearn.pipelines.Pipeline): the Pipeline object
with data already fitted (trained pipeline object).
gcs_uri (str): GCS path to store the trained pipeline
i.e gs://example_bucket/training-job.
Returns:
export_path (str): Model GCS location
"""
# Upload model artifact to Cloud Storage
artifact_filename = 'model.pkl'
storage_path = os.path.join(gcs_uri, artifact_filename)
# Save model artifact to local filesystem (doesn't persist)
with open(storage_path, 'wb') as model_file:
pickle.dump(fitted_pipeline, model_file)
def export_evaluation_report_to_gcs(report: str, gcs_uri: str) -> None:
"""
Exports training job report to GCS
Parameters:
report (str): Full report in text to sent to GCS
gcs_uri (str): GCS path to store the report
i.e gs://example_bucket/training-job
"""
# Upload model artifact to Cloud Storage
artifact_filename = 'report.txt'
storage_path = os.path.join(gcs_uri, artifact_filename)
# Save model artifact to local filesystem (doesn't persist)
with open(storage_path, 'w') as report_file:
report_file.write(report)
logging.info("Starting custom training job.")
data = get_data(args.dataset_url)
model = get_model()
model = train_model(model, *data)
score = evaluate_model(model, *data)
# export model to gcs using GCSFuse
logging.info("Exporting model artifacts ...")
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if args.model_dir.startswith(gs_prefix):
args.model_dir = args.model_dir.replace(gs_prefix, gcsfuse_prefix)
dirpath = os.path.split(args.model_dir)[0]
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
export_model_to_gcs(model, args.model_dir)
export_evaluation_report_to_gcs(str(score), args.model_dir)
logging.info(f"Exported model artifacts to GCS bucket: {args.model_dir}")
```
#### Store training script on your Cloud Storage bucket
Next, you package the training folder into a compressed tar ball, and then store it in your Cloud Storage bucket.
```
! rm -f custom.tar custom.tar.gz
! tar cvf custom.tar custom
! gzip custom.tar
! gsutil cp custom.tar.gz $BUCKET_NAME/trainer_newsaggr.tar.gz
```
### Create and run custom training job
To train a custom model, you perform two steps: 1) create a custom training job, and 2) run the job.
#### Create custom training job
A custom training job is created with the `CustomTrainingJob` class, with the following parameters:
- `display_name`: The human readable name for the custom training job.
- `container_uri`: The training container image.
- `python_package_gcs_uri`: The location of the Python training package as a tarball.
- `python_module_name`: The relative path to the training script in the Python package.
- `model_serving_container_uri`: The container image for deploying the model.
*Note:* There is no requirements parameter. You specify any requirements in the `setup.py` script in your Python package.
```
DISPLAY_NAME = "newsaggr_" + TIMESTAMP
job = aip.CustomPythonPackageTrainingJob(
display_name=DISPLAY_NAME,
python_package_gcs_uri=f"{BUCKET_NAME}/trainer_newsaggr.tar.gz",
python_module_name="trainer.task",
container_uri=TRAIN_IMAGE,
model_serving_container_image_uri=DEPLOY_IMAGE,
project=PROJECT_ID,
)
```
### Prepare your command-line arguments
Now define the command-line arguments for your custom training container:
- `args`: The command-line arguments to pass to the executable that is set as the entry point into the container.
- `--model-dir` : For our demonstrations, we use this command-line argument to specify where to store the model artifacts.
- direct: You pass the Cloud Storage location as a command line argument to your training script (set variable `DIRECT = True`), or
- indirect: The service passes the Cloud Storage location as the environment variable `AIP_MODEL_DIR` to your training script (set variable `DIRECT = False`). In this case, you tell the service the model artifact location in the job specification.
- `--dataset-url`: The location of the dataset to download.
- `--alpha`: Tunable hyperparameter
```
MODEL_DIR = "{}/{}".format(BUCKET_NAME, TIMESTAMP)
DATASET_URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/00359/NewsAggregatorDataset.zip"
DIRECT = False
if DIRECT:
CMDARGS = [
"--alpha=" + str(0.9),
"--dataset-url=" + DATASET_URL,
"--model_dir=" + MODEL_DIR,
]
else:
CMDARGS = ["--alpha=" + str(0.9), "--dataset-url=" + DATASET_URL]
```
#### Run the custom training job
Next, you run the custom job to start the training job by invoking the method `run`, with the following parameters:
- `model_display_name`: The human readable name for the `Model` resource.
- `args`: The command-line arguments to pass to the training script.
- `replica_count`: The number of compute instances for training (replica_count = 1 is single node training).
- `machine_type`: The machine type for the compute instances.
- `accelerator_type`: The hardware accelerator type.
- `accelerator_count`: The number of accelerators to attach to a worker replica.
- `base_output_dir`: The Cloud Storage location to write the model artifacts to.
- `sync`: Whether to block until completion of the job.
```
if TRAIN_GPU:
model = job.run(
model_display_name="newsaggr_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
accelerator_type=TRAIN_GPU.name,
accelerator_count=TRAIN_NGPU,
base_output_dir=MODEL_DIR,
sync=False,
)
else:
model = job.run(
model_display_name="newsaggr_" + TIMESTAMP,
args=CMDARGS,
replica_count=1,
machine_type=TRAIN_COMPUTE,
base_output_dir=MODEL_DIR,
sync=False,
)
model_path_to_deploy = MODEL_DIR
```
### List a custom training job
```
_job = job.list(filter=f"display_name={DISPLAY_NAME}")
print(_job)
```
### Wait for completion of custom training job
Next, wait for the custom training job to complete. Alternatively, one can set the parameter `sync` to `True` in the `run()` method to block until the custom training job is completed.
```
model.wait()
```
### Delete a custom training job
After a training job is completed, you can delete the training job with the method `delete()`. Prior to completion, a training job can be canceled with the method `cancel()`.
```
job.delete()
```
# Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Dataset
- Pipeline
- Model
- Endpoint
- AutoML Training Job
- Batch Job
- Custom Job
- Hyperparameter Tuning Job
- Cloud Storage Bucket
```
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.undeploy_all()
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline training job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom training job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
```
|
github_jupyter
|
```
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#default_exp data.transforms
#export
from fastai.torch_basics import *
from fastai.data.core import *
from fastai.data.load import *
from fastai.data.external import *
from sklearn.model_selection import train_test_split
#hide
from nbdev.showdoc import *
```
# Helper functions for processing data and basic transforms
> Functions for getting, splitting, and labeling data, as well as generic transforms
## Get, split, and label
For most data source creation we need functions to get a list of items, split them in to train/valid sets, and label them. fastai provides functions to make each of these steps easy (especially when combined with `fastai.data.blocks`).
### Get
First we'll look at functions that *get* a list of items (generally file names).
We'll use *tiny MNIST* (a subset of MNIST with just two classes, `7`s and `3`s) for our examples/tests throughout this page.
```
path = untar_data(URLs.MNIST_TINY)
(path/'train').ls()
# export
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
# export
def get_files(path, extensions=None, recurse=True, folders=None, followlinks=True):
"Get all the files in `path` with optional `extensions`, optionally with `recurse`, only in `folders`, if specified."
path = Path(path)
folders=L(folders)
extensions = setify(extensions)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path, followlinks=followlinks)): # returns (dirpath, dirnames, filenames)
if len(folders) !=0 and i==0: d[:] = [o for o in d if o in folders]
else: d[:] = [o for o in d if not o.startswith('.')]
if len(folders) !=0 and i==0 and '.' not in folders: continue
res += _get_files(p, f, extensions)
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
res = _get_files(path, f, extensions)
return L(res)
```
This is the most general way to grab a bunch of file names from disk. If you pass `extensions` (including the `.`) then returned file names are filtered by that list. Only those files directly in `path` are included, unless you pass `recurse`, in which case all child folders are also searched recursively. `folders` is an optional list of directories to limit the search to.
```
t3 = get_files(path/'train'/'3', extensions='.png', recurse=False)
t7 = get_files(path/'train'/'7', extensions='.png', recurse=False)
t = get_files(path/'train', extensions='.png', recurse=True)
test_eq(len(t), len(t3)+len(t7))
test_eq(len(get_files(path/'train'/'3', extensions='.jpg', recurse=False)),0)
test_eq(len(t), len(get_files(path, extensions='.png', recurse=True, folders='train')))
t
#hide
test_eq(len(get_files(path/'train'/'3', recurse=False)),346)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders=['train', 'test'])),729)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='train')),709)
test_eq(len(get_files(path, extensions='.png', recurse=True, folders='training')),0)
```
It's often useful to be able to create functions with customized behavior. `fastai.data` generally uses functions named as CamelCase verbs ending in `er` to create these functions. `FileGetter` is a simple example of such a function creator.
```
#export
def FileGetter(suf='', extensions=None, recurse=True, folders=None):
"Create `get_files` partial function that searches path suffix `suf`, only in `folders`, if specified, and passes along args"
def _inner(o, extensions=extensions, recurse=recurse, folders=folders):
return get_files(o/suf, extensions, recurse, folders)
return _inner
fpng = FileGetter(extensions='.png', recurse=False)
test_eq(len(t7), len(fpng(path/'train'/'7')))
test_eq(len(t), len(fpng(path/'train', recurse=True)))
fpng_r = FileGetter(extensions='.png', recurse=True)
test_eq(len(t), len(fpng_r(path/'train')))
#export
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
#export
def get_image_files(path, recurse=True, folders=None):
"Get image files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=image_extensions, recurse=recurse, folders=folders)
```
This is simply `get_files` called with a list of standard image extensions.
```
test_eq(len(t), len(get_image_files(path, recurse=True, folders='train')))
#export
def ImageGetter(suf='', recurse=True, folders=None):
"Create `get_image_files` partial that searches suffix `suf` and passes along `kwargs`, only in `folders`, if specified"
def _inner(o, recurse=recurse, folders=folders): return get_image_files(o/suf, recurse, folders)
return _inner
```
Same as `FileGetter`, but for image extensions.
```
test_eq(len(get_files(path/'train', extensions='.png', recurse=True, folders='3')),
len(ImageGetter( 'train', recurse=True, folders='3')(path)))
#export
def get_text_files(path, recurse=True, folders=None):
"Get text files in `path` recursively, only in `folders`, if specified."
return get_files(path, extensions=['.txt'], recurse=recurse, folders=folders)
#export
class ItemGetter(ItemTransform):
"Creates a proper transform that applies `itemgetter(i)` (even on a tuple)"
_retain = False
def __init__(self, i): self.i = i
def encodes(self, x): return x[self.i]
test_eq(ItemGetter(1)((1,2,3)), 2)
test_eq(ItemGetter(1)(L(1,2,3)), 2)
test_eq(ItemGetter(1)([1,2,3]), 2)
test_eq(ItemGetter(1)(np.array([1,2,3])), 2)
#export
class AttrGetter(ItemTransform):
"Creates a proper transform that applies `attrgetter(nm)` (even on a tuple)"
_retain = False
def __init__(self, nm, default=None): store_attr()
def encodes(self, x): return getattr(x, self.nm, self.default)
test_eq(AttrGetter('shape')(torch.randn([4,5])), [4,5])
test_eq(AttrGetter('shape', [0])([4,5]), [0])
```
### Split
The next set of functions are used to *split* data into training and validation sets. The functions return two lists - a list of indices or masks for each of training and validation sets.
```
# export
def RandomSplitter(valid_pct=0.2, seed=None):
"Create function that splits `items` between train/val with `valid_pct` randomly."
def _inner(o):
if seed is not None: torch.manual_seed(seed)
rand_idx = L(list(torch.randperm(len(o)).numpy()))
cut = int(valid_pct * len(o))
return rand_idx[cut:],rand_idx[:cut]
return _inner
src = list(range(30))
f = RandomSplitter(seed=42)
trn,val = f(src)
assert 0<len(trn)<len(src)
assert all(o not in val for o in trn)
test_eq(len(trn), len(src)-len(val))
# test random seed consistency
test_eq(f(src)[0], trn)
```
Use scikit-learn train_test_split. This allow to *split* items in a stratified fashion (uniformely according to the ‘labels‘ distribution)
```
# export
def TrainTestSplitter(test_size=0.2, random_state=None, stratify=None, train_size=None, shuffle=True):
"Split `items` into random train and test subsets using sklearn train_test_split utility."
def _inner(o, **kwargs):
train,valid = train_test_split(range_of(o), test_size=test_size, random_state=random_state,
stratify=stratify, train_size=train_size, shuffle=shuffle)
return L(train), L(valid)
return _inner
src = list(range(30))
labels = [0] * 20 + [1] * 10
test_size = 0.2
f = TrainTestSplitter(test_size=test_size, random_state=42, stratify=labels)
trn,val = f(src)
assert 0<len(trn)<len(src)
assert all(o not in val for o in trn)
test_eq(len(trn), len(src)-len(val))
# test random seed consistency
test_eq(f(src)[0], trn)
# test labels distribution consistency
# there should be test_size % of zeroes and ones respectively in the validation set
test_eq(len([t for t in val if t < 20]) / 20, test_size)
test_eq(len([t for t in val if t > 20]) / 10, test_size)
#export
def IndexSplitter(valid_idx):
"Split `items` so that `val_idx` are in the validation set and the others in the training set"
def _inner(o):
train_idx = np.setdiff1d(np.array(range_of(o)), np.array(valid_idx))
return L(train_idx, use_list=True), L(valid_idx, use_list=True)
return _inner
items = list(range(10))
splitter = IndexSplitter([3,7,9])
test_eq(splitter(items),[[0,1,2,4,5,6,8],[3,7,9]])
# export
def _grandparent_idxs(items, name):
def _inner(items, name): return mask2idxs(Path(o).parent.parent.name == name for o in items)
return [i for n in L(name) for i in _inner(items,n)]
# export
def GrandparentSplitter(train_name='train', valid_name='valid'):
"Split `items` from the grand parent folder names (`train_name` and `valid_name`)."
def _inner(o):
return _grandparent_idxs(o, train_name),_grandparent_idxs(o, valid_name)
return _inner
fnames = [path/'train/3/9932.png', path/'valid/7/7189.png',
path/'valid/7/7320.png', path/'train/7/9833.png',
path/'train/3/7666.png', path/'valid/3/925.png',
path/'train/7/724.png', path/'valid/3/93055.png']
splitter = GrandparentSplitter()
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
fnames2 = fnames + [path/'test/3/4256.png', path/'test/7/2345.png', path/'valid/7/6467.png']
splitter = GrandparentSplitter(train_name=('train', 'valid'), valid_name='test')
test_eq(splitter(fnames2),[[0,3,4,6,1,2,5,7,10],[8,9]])
# export
def FuncSplitter(func):
"Split `items` by result of `func` (`True` for validation, `False` for training set)."
def _inner(o):
val_idx = mask2idxs(func(o_) for o_ in o)
return IndexSplitter(val_idx)(o)
return _inner
splitter = FuncSplitter(lambda o: Path(o).parent.parent.name == 'valid')
test_eq(splitter(fnames),[[0,3,4,6],[1,2,5,7]])
# export
def MaskSplitter(mask):
"Split `items` depending on the value of `mask`."
def _inner(o): return IndexSplitter(mask2idxs(mask))(o)
return _inner
items = list(range(6))
splitter = MaskSplitter([True,False,False,True,False,True])
test_eq(splitter(items),[[1,2,4],[0,3,5]])
# export
def FileSplitter(fname):
"Split `items` by providing file `fname` (contains names of valid items separated by newline)."
valid = Path(fname).read_text().split('\n')
def _func(x): return x.name in valid
def _inner(o): return FuncSplitter(_func)(o)
return _inner
with tempfile.TemporaryDirectory() as d:
fname = Path(d)/'valid.txt'
fname.write_text('\n'.join([Path(fnames[i]).name for i in [1,3,4]]))
splitter = FileSplitter(fname)
test_eq(splitter(fnames),[[0,2,5,6,7],[1,3,4]])
# export
def ColSplitter(col='is_valid'):
"Split `items` (supposed to be a dataframe) by value in `col`"
def _inner(o):
assert isinstance(o, pd.DataFrame), "ColSplitter only works when your items are a pandas DataFrame"
valid_idx = (o.iloc[:,col] if isinstance(col, int) else o[col]).values.astype('bool')
return IndexSplitter(mask2idxs(valid_idx))(o)
return _inner
df = pd.DataFrame({'a': [0,1,2,3,4], 'b': [True,False,True,True,False]})
splits = ColSplitter('b')(df)
test_eq(splits, [[1,4], [0,2,3]])
#Works with strings or index
splits = ColSplitter(1)(df)
test_eq(splits, [[1,4], [0,2,3]])
# does not get confused if the type of 'is_valid' is integer, but it meant to be a yes/no
df = pd.DataFrame({'a': [0,1,2,3,4], 'is_valid': [1,0,1,1,0]})
splits_by_int = ColSplitter('is_valid')(df)
test_eq(splits_by_int, [[1,4], [0,2,3]])
# export
def RandomSubsetSplitter(train_sz, valid_sz, seed=None):
"Take randoms subsets of `splits` with `train_sz` and `valid_sz`"
assert 0 < train_sz < 1
assert 0 < valid_sz < 1
assert train_sz + valid_sz <= 1.
def _inner(o):
if seed is not None: torch.manual_seed(seed)
train_len,valid_len = int(len(o)*train_sz),int(len(o)*valid_sz)
idxs = L(list(torch.randperm(len(o)).numpy()))
return idxs[:train_len],idxs[train_len:train_len+valid_len]
return _inner
items = list(range(100))
valid_idx = list(np.arange(70,100))
splits = RandomSubsetSplitter(0.3, 0.1)(items)
test_eq(len(splits[0]), 30)
test_eq(len(splits[1]), 10)
```
### Label
The final set of functions is used to *label* a single item of data.
```
# export
def parent_label(o):
"Label `item` with the parent folder name."
return Path(o).parent.name
```
Note that `parent_label` doesn't have anything customize, so it doesn't return a function - you can just use it directly.
```
test_eq(parent_label(fnames[0]), '3')
test_eq(parent_label("fastai_dev/dev/data/mnist_tiny/train/3/9932.png"), '3')
[parent_label(o) for o in fnames]
#hide
#test for MS Windows when os.path.sep is '\\' instead of '/'
test_eq(parent_label(os.path.join("fastai_dev","dev","data","mnist_tiny","train", "3", "9932.png") ), '3')
# export
class RegexLabeller():
"Label `item` with regex `pat`."
def __init__(self, pat, match=False):
self.pat = re.compile(pat)
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
res = self.matcher(str(o))
assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
```
`RegexLabeller` is a very flexible function since it handles any regex search of the stringified item. Pass `match=True` to use `re.match` (i.e. check only start of string), or `re.search` otherwise (default).
For instance, here's an example the replicates the previous `parent_label` results.
```
f = RegexLabeller(fr'{os.path.sep}(\d){os.path.sep}')
test_eq(f(fnames[0]), '3')
[f(o) for o in fnames]
f = RegexLabeller(r'(\d*)', match=True)
test_eq(f(fnames[0].name), '9932')
#export
class ColReader(DisplayedTransform):
"Read `cols` in `row` with potential `pref` and `suff`"
def __init__(self, cols, pref='', suff='', label_delim=None):
store_attr()
self.pref = str(pref) + os.path.sep if isinstance(pref, Path) else pref
self.cols = L(cols)
def _do_one(self, r, c):
o = r[c] if isinstance(c, int) else r[c] if c=='name' else getattr(r, c)
if len(self.pref)==0 and len(self.suff)==0 and self.label_delim is None: return o
if self.label_delim is None: return f'{self.pref}{o}{self.suff}'
else: return o.split(self.label_delim) if len(o)>0 else []
def __call__(self, o, **kwargs):
if len(self.cols) == 1: return self._do_one(o, self.cols[0])
return L(self._do_one(o, c) for c in self.cols)
```
`cols` can be a list of column names or a list of indices (or a mix of both). If `label_delim` is passed, the result is split using it.
```
df = pd.DataFrame({'a': 'a b c d'.split(), 'b': ['1 2', '0', '', '1 2 3']})
f = ColReader('a', pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], '0a1 0b1 0c1 0d1'.split())
f = ColReader('b', label_delim=' ')
test_eq([f(o) for o in df.itertuples()], [['1', '2'], ['0'], [], ['1', '2', '3']])
df['a1'] = df['a']
f = ColReader(['a', 'a1'], pref='0', suff='1')
test_eq([f(o) for o in df.itertuples()], [L('0a1', '0a1'), L('0b1', '0b1'), L('0c1', '0c1'), L('0d1', '0d1')])
df = pd.DataFrame({'a': [L(0,1), L(2,3,4), L(5,6,7)]})
f = ColReader('a')
test_eq([f(o) for o in df.itertuples()], [L(0,1), L(2,3,4), L(5,6,7)])
df['name'] = df['a']
f = ColReader('name')
test_eq([f(df.iloc[0,:])], [L(0,1)])
```
## Categorize -
```
#export
class CategoryMap(CollBase):
"Collection of categories with the reverse mapping in `o2i`"
def __init__(self, col, sort=True, add_na=False, strict=False):
if is_categorical_dtype(col):
items = L(col.cat.categories, use_list=True)
#Remove non-used categories while keeping order
if strict: items = L(o for o in items if o in col.unique())
else:
if not hasattr(col,'unique'): col = L(col, use_list=True)
# `o==o` is the generalized definition of non-NaN used by Pandas
items = L(o for o in col.unique() if o==o)
if sort: items = items.sorted()
self.items = '#na#' + items if add_na else items
self.o2i = defaultdict(int, self.items.val2idx()) if add_na else dict(self.items.val2idx())
def map_objs(self,objs):
"Map `objs` to IDs"
return L(self.o2i[o] for o in objs)
def map_ids(self,ids):
"Map `ids` to objects in vocab"
return L(self.items[o] for o in ids)
def __eq__(self,b): return all_equal(b,self)
t = CategoryMap([4,2,3,4])
test_eq(t, [2,3,4])
test_eq(t.o2i, {2:0,3:1,4:2})
test_eq(t.map_objs([2,3]), [0,1])
test_eq(t.map_ids([0,1]), [2,3])
test_fail(lambda: t.o2i['unseen label'])
t = CategoryMap([4,2,3,4], add_na=True)
test_eq(t, ['#na#',2,3,4])
test_eq(t.o2i, {'#na#':0,2:1,3:2,4:3})
t = CategoryMap(pd.Series([4,2,3,4]), sort=False)
test_eq(t, [4,2,3])
test_eq(t.o2i, {4:0,2:1,3:2})
col = pd.Series(pd.Categorical(['M','H','L','M'], categories=['H','M','L'], ordered=True))
t = CategoryMap(col)
test_eq(t, ['H','M','L'])
test_eq(t.o2i, {'H':0,'M':1,'L':2})
col = pd.Series(pd.Categorical(['M','H','M'], categories=['H','M','L'], ordered=True))
t = CategoryMap(col, strict=True)
test_eq(t, ['H','M'])
test_eq(t.o2i, {'H':0,'M':1})
# export
class Categorize(DisplayedTransform):
"Reversible transform of category string to `vocab` id"
loss_func,order=CrossEntropyLossFlat(),1
def __init__(self, vocab=None, sort=True, add_na=False):
if vocab is not None: vocab = CategoryMap(vocab, sort=sort, add_na=add_na)
store_attr()
def setups(self, dsets):
if self.vocab is None and dsets is not None: self.vocab = CategoryMap(dsets, sort=self.sort, add_na=self.add_na)
self.c = len(self.vocab)
def encodes(self, o):
try:
return TensorCategory(self.vocab.o2i[o])
except KeyError as e:
raise KeyError(f"Label '{o}' was not included in the training dataset") from e
def decodes(self, o): return Category (self.vocab [o])
#export
class Category(str, ShowTitle): _show_args = {'label': 'category'}
cat = Categorize()
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['cat', 'dog'])
test_eq(cat('cat'), 0)
test_eq(cat.decode(1), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
test_fail(lambda: cat('bird'))
cat = Categorize(add_na=True)
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['#na#', 'cat', 'dog'])
test_eq(cat('cat'), 1)
test_eq(cat.decode(2), 'dog')
test_stdout(lambda: show_at(tds,2), 'cat')
cat = Categorize(vocab=['dog', 'cat'], sort=False, add_na=True)
tds = Datasets(['cat', 'dog', 'cat'], tfms=[cat])
test_eq(cat.vocab, ['#na#', 'dog', 'cat'])
test_eq(cat('dog'), 1)
test_eq(cat.decode(2), 'cat')
test_stdout(lambda: show_at(tds,2), 'cat')
```
## Multicategorize -
```
# export
class MultiCategorize(Categorize):
"Reversible transform of multi-category strings to `vocab` id"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab=None, add_na=False): super().__init__(vocab=vocab,add_na=add_na,sort=vocab==None)
def setups(self, dsets):
if not dsets: return
if self.vocab is None:
vals = set()
for b in dsets: vals = vals.union(set(b))
self.vocab = CategoryMap(list(vals), add_na=self.add_na)
def encodes(self, o):
if not all(elem in self.vocab.o2i.keys() for elem in o):
diff = [elem for elem in o if elem not in self.vocab.o2i.keys()]
diff_str = "', '".join(diff)
raise KeyError(f"Labels '{diff_str}' were not included in the training dataset")
return TensorMultiCategory([self.vocab.o2i[o_] for o_ in o])
def decodes(self, o): return MultiCategory ([self.vocab [o_] for o_ in o])
#export
class MultiCategory(L):
def show(self, ctx=None, sep=';', color='black', **kwargs):
return show_title(sep.join(self.map(str)), ctx=ctx, color=color, **kwargs)
cat = MultiCategorize()
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], tfms=[cat])
test_eq(tds[3][0], TensorMultiCategory([]))
test_eq(cat.vocab, ['a', 'b', 'c'])
test_eq(cat(['a', 'c']), tensor([0,2]))
test_eq(cat([]), tensor([]))
test_eq(cat.decode([1]), ['b'])
test_eq(cat.decode([0,2]), ['a', 'c'])
test_stdout(lambda: show_at(tds,2), 'a;c')
# if vocab supplied, ensure it maintains its order (i.e., it doesn't sort)
cat = MultiCategorize(vocab=['z', 'y', 'x'])
test_eq(cat.vocab, ['z','y','x'])
test_fail(lambda: cat('bird'))
# export
class OneHotEncode(DisplayedTransform):
"One-hot encodes targets"
order=2
def __init__(self, c=None): store_attr()
def setups(self, dsets):
if self.c is None: self.c = len(L(getattr(dsets, 'vocab', None)))
if not self.c: warn("Couldn't infer the number of classes, please pass a value for `c` at init")
def encodes(self, o): return TensorMultiCategory(one_hot(o, self.c).float())
def decodes(self, o): return one_hot_decode(o, None)
```
Works in conjunction with ` MultiCategorize` or on its own if you have one-hot encoded targets (pass a `vocab` for decoding and `do_encode=False` in this case)
```
_tfm = OneHotEncode(c=3)
test_eq(_tfm([0,2]), tensor([1.,0,1]))
test_eq(_tfm.decode(tensor([0,1,1])), [1,2])
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
#hide
#test with passing the vocab
tds = Datasets([['b', 'c'], ['a'], ['a', 'c'], []], [[MultiCategorize(vocab=['a', 'b', 'c']), OneHotEncode()]])
test_eq(tds[1], [tensor([1.,0,0])])
test_eq(tds[3], [tensor([0.,0,0])])
test_eq(tds.decode([tensor([False, True, True])]), [['b','c']])
test_eq(type(tds[1][0]), TensorMultiCategory)
test_stdout(lambda: show_at(tds,2), 'a;c')
# export
class EncodedMultiCategorize(Categorize):
"Transform of one-hot encoded multi-category that decodes with `vocab`"
loss_func,order=BCEWithLogitsLossFlat(),1
def __init__(self, vocab):
super().__init__(vocab, sort=vocab==None)
self.c = len(vocab)
def encodes(self, o): return TensorMultiCategory(tensor(o).float())
def decodes(self, o): return MultiCategory (one_hot_decode(o, self.vocab))
_tfm = EncodedMultiCategorize(vocab=['a', 'b', 'c'])
test_eq(_tfm([1,0,1]), tensor([1., 0., 1.]))
test_eq(type(_tfm([1,0,1])), TensorMultiCategory)
test_eq(_tfm.decode(tensor([False, True, True])), ['b','c'])
_tfm2 = EncodedMultiCategorize(vocab=['c', 'b', 'a'])
test_eq(_tfm2.vocab, ['c', 'b', 'a'])
#export
class RegressionSetup(DisplayedTransform):
"Transform that floatifies targets"
loss_func=MSELossFlat()
def __init__(self, c=None): store_attr()
def encodes(self, o): return tensor(o).float()
def decodes(self, o): return TitledFloat(o) if o.ndim==0 else TitledTuple(o_.item() for o_ in o)
def setups(self, dsets):
if self.c is not None: return
try: self.c = len(dsets[0]) if hasattr(dsets[0], '__len__') else 1
except: self.c = 0
_tfm = RegressionSetup()
dsets = Datasets([0, 1, 2], RegressionSetup)
test_eq(dsets.c, 1)
test_eq_type(dsets[0], (tensor(0.),))
dsets = Datasets([[0, 1, 2], [3,4,5]], RegressionSetup)
test_eq(dsets.c, 3)
test_eq_type(dsets[0], (tensor([0.,1.,2.]),))
#export
def get_c(dls):
if getattr(dls, 'c', False): return dls.c
if getattr(getattr(dls.train, 'after_item', None), 'c', False): return dls.train.after_item.c
if getattr(getattr(dls.train, 'after_batch', None), 'c', False): return dls.train.after_batch.c
vocab = getattr(dls, 'vocab', [])
if len(vocab) > 0 and is_listy(vocab[-1]): vocab = vocab[-1]
return len(vocab)
```
## End-to-end dataset example with MNIST
Let's show how to use those functions to grab the mnist dataset in a `Datasets`. First we grab all the images.
```
path = untar_data(URLs.MNIST_TINY)
items = get_image_files(path)
```
Then we split between train and validation depending on the folder.
```
splitter = GrandparentSplitter()
splits = splitter(items)
train,valid = (items[i] for i in splits)
train[:3],valid[:3]
```
Our inputs are images that we open and convert to tensors, our targets are labeled depending on the parent directory and are categories.
```
from PIL import Image
def open_img(fn:Path): return Image.open(fn).copy()
def img2tensor(im:Image.Image): return TensorImage(array(im)[None])
tfms = [[open_img, img2tensor],
[parent_label, Categorize()]]
train_ds = Datasets(train, tfms)
x,y = train_ds[3]
xd,yd = decode_at(train_ds,3)
test_eq(parent_label(train[3]),yd)
test_eq(array(Image.open(train[3])),xd[0].numpy())
ax = show_at(train_ds, 3, cmap="Greys", figsize=(1,1))
assert ax.title.get_text() in ('3','7')
test_fig_exists(ax)
```
## ToTensor -
```
#export
class ToTensor(Transform):
"Convert item to appropriate tensor class"
order = 5
```
## IntToFloatTensor -
```
# export
class IntToFloatTensor(DisplayedTransform):
"Transform image to float tensor, optionally dividing by 255 (e.g. for images)."
order = 10 #Need to run after PIL transforms on the GPU
def __init__(self, div=255., div_mask=1): store_attr()
def encodes(self, o:TensorImage): return o.float().div_(self.div)
def encodes(self, o:TensorMask ): return o.long() // self.div_mask
def decodes(self, o:TensorImage): return ((o.clamp(0., 1.) * self.div).long()) if self.div else o
t = (TensorImage(tensor(1)),tensor(2).long(),TensorMask(tensor(3)))
tfm = IntToFloatTensor()
ft = tfm(t)
test_eq(ft, [1./255, 2, 3])
test_eq(type(ft[0]), TensorImage)
test_eq(type(ft[2]), TensorMask)
test_eq(ft[0].type(),'torch.FloatTensor')
test_eq(ft[1].type(),'torch.LongTensor')
test_eq(ft[2].type(),'torch.LongTensor')
```
## Normalization -
```
# export
def broadcast_vec(dim, ndim, *t, cuda=True):
"Make a vector broadcastable over `dim` (out of `ndim` total) by prepending and appending unit axes"
v = [1]*ndim
v[dim] = -1
f = to_device if cuda else noop
return [f(tensor(o).view(*v)) for o in t]
# export
@docs
class Normalize(DisplayedTransform):
"Normalize/denorm batch of `TensorImage`"
parameters,order = L('mean', 'std'),99
def __init__(self, mean=None, std=None, axes=(0,2,3)): store_attr()
@classmethod
def from_stats(cls, mean, std, dim=1, ndim=4, cuda=True): return cls(*broadcast_vec(dim, ndim, mean, std, cuda=cuda))
def setups(self, dl:DataLoader):
if self.mean is None or self.std is None:
x,*_ = dl.one_batch()
self.mean,self.std = x.mean(self.axes, keepdim=True),x.std(self.axes, keepdim=True)+1e-7
def encodes(self, x:TensorImage): return (x-self.mean) / self.std
def decodes(self, x:TensorImage):
f = to_cpu if x.device.type=='cpu' else noop
return (x*f(self.std) + f(self.mean))
_docs=dict(encodes="Normalize batch", decodes="Denormalize batch")
mean,std = [0.5]*3,[0.5]*3
mean,std = broadcast_vec(1, 4, mean, std)
batch_tfms = [IntToFloatTensor(), Normalize.from_stats(mean,std)]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4, device=default_device())
x,y = tdl.one_batch()
xd,yd = tdl.decode((x,y))
test_eq(x.type(), 'torch.cuda.FloatTensor' if default_device().type=='cuda' else 'torch.FloatTensor')
test_eq(xd.type(), 'torch.LongTensor')
test_eq(type(x), TensorImage)
test_eq(type(y), TensorCategory)
assert x.mean()<0.0
assert x.std()>0.5
assert 0<xd.float().mean()/255.<1
assert 0<xd.float().std()/255.<0.5
#hide
nrm = Normalize()
batch_tfms = [IntToFloatTensor(), nrm]
tdl = TfmdDL(train_ds, after_batch=batch_tfms, bs=4)
x,y = tdl.one_batch()
test_close(x.mean(), 0.0, 1e-4)
assert x.std()>0.9, x.std()
#Just for visuals
from fastai.vision.core import *
tdl.show_batch((x,y))
#hide
x,y = cast(x,Tensor),cast(y,Tensor) #Lose type of tensors (to emulate predictions)
test_ne(type(x), TensorImage)
tdl.show_batch((x,y), figsize=(1,1)) #Check that types are put back by dl.
```
## Export -
```
#hide
from nbdev.export import notebook2script
notebook2script()
```
|
github_jupyter
|

[](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/webinars_conferences_etc/multi_lingual_webinar/4_Unsupervise_Chinese_Keyword_Extraction_NER_and_Translation_from_Chinese_News.ipynb)

```
import os
! apt-get update -qq > /dev/null
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! pip install nlu pyspark==2.4.4 > /dev/null
import nlu
import pandas as pd
! wget http://ckl-it.de/wp-content/uploads/2021/02/chinese_news.csv
```
# Analyzing chinese News Articles With NLU
## This notebook showcases how to extract Chinese Keywords Unsupervied with YAKE and Named Entities and translate them to English
### In addition, we will leverage the Chinese WordSegmenter and Lemmatizer to preprocess our data further and get a better view fof our data distribution
```
```
# [Chinese official daily news](https://www.kaggle.com/noxmoon/chinese-official-daily-news-since-2016)

### Xinwen Lianbo is a daily news programme produced by China Central Television. It is shown simultaneously by all local TV stations in mainland China, making it one of the world's most-watched programmes. It has been broadcast since 1 January 1978.
wikipedia
```
df = pd.read_csv('./chinese_news.csv')
df
```
# Depending how we pre-process our text, we will get different keywords extracted with YAKE. In This tutorial we will see the effect of **Lemmatization** and **Word Segmentation** and see how the distribution of Keywords changes
- Lemmatization
- Word Segmentation
# Apply YAKE - Keyword Extractor to the raw text
First we do no pre-processing at all and just calculate keywords from the raw titles with YAKE
```
yake_df = nlu.load('yake').predict(df.headline)
yake_df
```
## The predicted Chinese Keywords dont show up on Pandas Label and you probably do not speek Chinese!
### This is why we will translate each extracted Keyword into english and then take a look at the distribution again
```
yake_df.explode('keywords_classes').keywords_classes.value_counts()[0:100].plot.bar(title='Top 100 in Chinese News Articles. No Chinese Keywords :( So lets translate!', figsize=(20,8))
```
### We get the top 100 keywords and store the counts toegether with the keywords in a new DF
```
top_100_zh = yake_df.explode('keywords_classes').keywords_classes.value_counts()[0:100]
top_100_zh = pd.DataFrame(top_100_zh)
# Create new DF from the counts
top_100_zh['zh'] = top_100_zh.index
top_100_zh.reset_index(inplace=True)
top_100_zh
```
### Now we can just translate each predicted keyword with `zh.translate_to.en` in 1 line of code and see what is actually going on in the dataset
```
top_100_en = nlu.load('zh.translate_to.en').predict(top_100_zh.zh)
top_100_en
```
#### Write the translations into the df with the Keyword counts so we can plot them together in the next step
```
# Write translation back to the keyword df with the counts
top_100_zh['en']= top_100_en.translation
top_100_zh
```
## Now we can simply look at every keyword as a bar chart with the actual translation of it and understand what keywordsa ppeared in chinese news!
```
top_100_zh.index = top_100_zh.en
top_100_zh.keywords_classes.plot.barh(figsize=(20,20), title='Distribution of top 100 translated chinese News Articles generated by YAKE alogirthm applied to RAW data')
```
# Apply Yake to Segmented/Tokenized data
We gave the YAKE algorithm full heatlines which where not segmented. To better understand the Chinese text ,we can segment it into token and analyze their occurcence instead
## YAKE + Word Segmentation
```
# Segment words into tokenz with the word segmenter
# This will output 1 row per token
seg_df = nlu.load('zh.segment_words').predict(df.headline)
seg_df
```
### Join the tokens back as white space seperated strings for the Yake Keyword extraction in the next step
```
# Join the tokens back as white space seperated strings
joined_segs = seg_df.token.groupby(seg_df.index).transform(lambda x : ' '.join(x)).drop_duplicates()
joined_segs
```
### Now we can extract keywords with yake on the whitespace seperated tokens
```
seg_yake_df = nlu.load('yake').predict(joined_segs)
seg_yake_df
# Get top 100 occoring Keywords from the joined segmented tokens
top_100_seg_zh = seg_yake_df.explode('keywords_classes').keywords_classes.value_counts()[0:100]#.plot.bar(title='Top 100 in Chinese News Articles Segmented', figsize=(20,8))
top_100_seg_zh = pd.DataFrame(top_100_seg_zh )
top_100_seg_zh
```
## Get top 100 keywords and Translate them like we did for the raw Data as data preperation for the visualization of the keyword distribution
```
# Create new DF from the counts
top_100_seg_zh['zh'] = top_100_seg_zh.index
top_100_seg_zh.reset_index(inplace=True)
# Write Translations back to df with keyword counts
top_100_seg_zh['en'] = nlu.load('zh.translate_to.en').predict(top_100_seg_zh.zh).translation
```
### Visualize the distirbution of the Keywords extracted from the segmented tokens
We can observe that we now have a very different distribution than originally
```
top_100_seg_zh.index = top_100_seg_zh.en
top_100_seg_zh.keywords_classes.plot.barh(figsize=(20,20), title = 'Segmented Keywords YAKE Distribution')
```
# Apply Yake to Segmented and Lemmatized data
```
# Automated Word Segmentation Included!
zh_lem_df = nlu.load('zh.lemma').predict(df.headline)
zh_lem_df
```
## Join tokens into whitespace seperated string like we did previosuly for Word Segmentation
```
zh_lem_df['lem_str'] = zh_lem_df.lemma.str.join(' ')
zh_lem_df
```
## Extract Keywords on Stemmed + Word Segmented Chinese text
```
yake_lem_df = nlu.load('yake').predict(zh_lem_df.lem_str)
yake_lem_df
top_100_stem = yake_lem_df.explode('keywords_classes').keywords_classes.value_counts()[:100]
top_100_stem = pd.DataFrame(top_100_stem)
# Create new DF from the counts
top_100_stem['zh'] = top_100_stem.index
top_100_stem.reset_index(inplace=True)
# Write Translations back to df with keyword counts
top_100_stem['en'] = nlu.load('zh.translate_to.en').predict(top_100_stem.zh).translation
top_100_stem
```
# Plot the Segmented and Lemmatized Distribution of extracted keywords
```
top_100_stem.index = top_100_stem.en
top_100_stem.keywords_classes.plot.barh(figsize=(20,20), title='Distribution of top 100 translated chinese News Artzzzicles generated by YAKE alogirthm applied to Lemmatized and Segmented Chinese Text')
```
# Extract Chinese Named entities
```
zh_ner_df = nlu.load('zh.ner').predict(df.iloc[:1000].headline, output_level='document')
zh_ner_df
# Translate Detected Chinese Entities to English
en_entities = nlu.load('zh.translate_to.en').predict(zh_ner_df.explode('entities').entities)
en_entities
en_entities.translation.value_counts()[0:100].plot.barh(figsize=(20,20), title = "Top 100 Translated detected Named entities")
```
# There are many more models!
## Checkout [the Modelshub](https://nlp.johnsnowlabs.com/models) and the [NLU Namespace](https://nlu.johnsnowlabs.com/docs/en/namespace) for more models
|
github_jupyter
|
# 📃 Solution for Exercise M1.04
The goal of this exercise is to evaluate the impact of using an arbitrary
integer encoding for categorical variables along with a linear
classification model such as Logistic Regression.
To do so, let's try to use `OrdinalEncoder` to preprocess the categorical
variables. This preprocessor is assembled in a pipeline with
`LogisticRegression`. The generalization performance of the pipeline can be
evaluated by cross-validation and then compared to the score obtained when
using `OneHotEncoder` or to some other baseline score.
First, we load the dataset.
```
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
```
In the previous notebook, we used `sklearn.compose.make_column_selector` to
automatically select columns with a specific data type (also called `dtype`).
Here, we will use this selector to get only the columns containing strings
(column with `object` dtype) that correspond to categorical features in our
dataset.
```
from sklearn.compose import make_column_selector as selector
categorical_columns_selector = selector(dtype_include=object)
categorical_columns = categorical_columns_selector(data)
data_categorical = data[categorical_columns]
```
We filter our dataset that it contains only categorical features.
Define a scikit-learn pipeline com
Because `OrdinalEncoder` can raise errors if it sees an unknown category at
prediction time, you can set the `handle_unknown="use_encoded_value"` and
`unknown_value` parameters. You can refer to the
[scikit-learn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.OrdinalEncoder.html)
for more details regarding these parameters.
```
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
model = make_pipeline(
OrdinalEncoder(handle_unknown="use_encoded_value", unknown_value=-1),
LogisticRegression(max_iter=500))
```
Your model is now defined. Evaluate it using a cross-validation using
`sklearn.model_selection.cross_validate`.
```
from sklearn.model_selection import cross_validate
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
```
Using an arbitrary mapping from string labels to integers as done here causes
the linear model to make bad assumptions on the relative ordering of
categories.
This prevents the model from learning anything predictive enough and the
cross-validated score is even lower than the baseline we obtained by ignoring
the input data and just constantly predicting the most frequent class:
```
from sklearn.dummy import DummyClassifier
cv_results = cross_validate(DummyClassifier(strategy="most_frequent"),
data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
```
Now, we would like to compare the generalization performance of our previous
model with a new model where instead of using an `OrdinalEncoder`, we will
use a `OneHotEncoder`. Repeat the model evaluation using cross-validation.
Compare the score of both models and conclude on the impact of choosing a
specific encoding strategy when using a linear model.
```
from sklearn.preprocessing import OneHotEncoder
model = make_pipeline(
OneHotEncoder(handle_unknown="ignore"),
LogisticRegression(max_iter=500))
cv_results = cross_validate(model, data_categorical, target)
scores = cv_results["test_score"]
print("The mean cross-validation accuracy is: "
f"{scores.mean():.3f} +/- {scores.std():.3f}")
```
With the linear classifier chosen, using an encoding that does not assume
any ordering lead to much better result.
The important message here is: linear model and `OrdinalEncoder` are used
together only for ordinal categorical features, features with a specific
ordering. Otherwise, your model will perform poorly.
|
github_jupyter
|
## Face and Facial Keypoint detection
After you've trained a neural network to detect facial keypoints, you can then apply this network to *any* image that includes faces. The neural network expects a Tensor of a certain size as input and, so, to detect any face, you'll first have to do some pre-processing.
1. Detect all the faces in an image using a face detector (we'll be using a Haar Cascade detector in this notebook).
2. Pre-process those face images so that they are grayscale, and transformed to a Tensor of the input size that your net expects. This step will be similar to the `data_transform` you created and applied in Notebook 2, whose job was tp rescale, normalize, and turn any iimage into a Tensor to be accepted as input to your CNN.
3. Use your trained model to detect facial keypoints on the image.
---
In the next python cell we load in required libraries for this section of the project.
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
%matplotlib inline
```
#### Select an image
Select an image to perform facial keypoint detection on; you can select any image of faces in the `images/` directory.
```
import cv2
# load in color image for face detection
image = cv2.imread('images/obamas.jpg')
# switch red and blue color channels
# --> by default OpenCV assumes BLUE comes first, not RED as in many images
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# plot the image
fig = plt.figure(figsize=(9,9))
plt.imshow(image)
```
## Detect all faces in an image
Next, you'll use one of OpenCV's pre-trained Haar Cascade classifiers, all of which can be found in the `detector_architectures/` directory, to find any faces in your selected image.
In the code below, we loop over each face in the original image and draw a red square on each face (in a copy of the original image, so as not to modify the original). You can even [add eye detections](https://docs.opencv.org/3.4.1/d7/d8b/tutorial_py_face_detection.html) as an *optional* exercise in using Haar detectors.
An example of face detection on a variety of images is shown below.
<img src='images/haar_cascade_ex.png' width=80% height=80%/>
```
# load in a haar cascade classifier for detecting frontal faces
face_cascade = cv2.CascadeClassifier('detector_architectures/haarcascade_frontalface_default.xml')
# run the detector
# the output here is an array of detections; the corners of each detection box
# if necessary, modify these parameters until you successfully identify every face in a given image
faces = face_cascade.detectMultiScale(image, 1.2, 2)
# make a copy of the original image to plot detections on
image_with_detections = image.copy()
# loop over the detected faces, mark the image where each face is found
for (x,y,w,h) in faces:
# draw a rectangle around each detected face
# you may also need to change the width of the rectangle drawn depending on image resolution
cv2.rectangle(image_with_detections,(x,y),(x+w,y+h),(255,0,0),3)
fig = plt.figure(figsize=(9,9))
plt.imshow(image_with_detections)
```
## Loading in a trained model
Once you have an image to work with (and, again, you can select any image of faces in the `images/` directory), the next step is to pre-process that image and feed it into your CNN facial keypoint detector.
First, load your best model by its filename.
```
import torch
from models import Net
net = Net()
## TODO: load the best saved model parameters (by your path name)
## You'll need to un-comment the line below and add the correct name for *your* saved model
# net.load_state_dict(torch.load('saved_models/keypoints_model_1.pt'))
## print out your net and prepare it for testing (uncomment the line below)
# net.eval()
```
## Keypoint detection
Now, we'll loop over each detected face in an image (again!) only this time, you'll transform those faces in Tensors that your CNN can accept as input images.
### TODO: Transform each detected face into an input Tensor
You'll need to perform the following steps for each detected face:
1. Convert the face from RGB to grayscale
2. Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
3. Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
4. Reshape the numpy image into a torch image.
**Hint**: The sizes of faces detected by a Haar detector and the faces your network has been trained on are of different sizes. If you find that your model is generating keypoints that are too small for a given face, try adding some padding to the detected `roi` before giving it as input to your model.
You may find it useful to consult to transformation code in `data_load.py` to help you perform these processing steps.
### TODO: Detect and display the predicted keypoints
After each face has been appropriately converted into an input Tensor for your network to see as input, you can apply your `net` to each face. The ouput should be the predicted the facial keypoints. These keypoints will need to be "un-normalized" for display, and you may find it helpful to write a helper function like `show_keypoints`. You should end up with an image like the following with facial keypoints that closely match the facial features on each individual face:
<img src='images/michelle_detected.png' width=30% height=30%/>
```
image_copy = np.copy(image)
# loop over the detected faces from your haar cascade
for (x,y,w,h) in faces:
# Select the region of interest that is the face in the image
roi = image_copy[y:y+h, x:x+w]
## TODO: Convert the face region from RGB to grayscale
## TODO: Normalize the grayscale image so that its color range falls in [0,1] instead of [0,255]
## TODO: Rescale the detected face to be the expected square size for your CNN (224x224, suggested)
## TODO: Reshape the numpy image shape (H x W x C) into a torch image shape (C x H x W)
## TODO: Make facial keypoint predictions using your loaded, trained network
## TODO: Display each detected face and the corresponding keypoints
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import gzip
#loading the data from the given file
image_size = 28
num_images = 55000
f = gzip.open('train-images-idx3-ubyte.gz','r')
f.read(16)
buf = f.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = data.reshape(num_images, image_size, image_size, 1)
#pritning the images
image = np.asarray(data[550]).squeeze()
plt.imshow(image)
plt.show()
#storing the data in the form of matrix
X=np.asarray(data[:])
X=X.squeeze()
X=X.reshape(X.shape[0],X.shape[2]*X.shape[1])
X=X.T/255
X.shape
#knowing the no of features and the no of data points in the given array
m=X.shape[1]
n=X.shape[0]
print(m)
print(n)
#loading the labels
f = gzip.open('train-labels-idx1-ubyte.gz','r')
f.read(8)
Y = np.zeros((1,m))
for i in range(0,54999):
buf = f.read(1)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
Y[0,i]=labels
print(Y[0,550])
print(Y.shape)
Y1= np.zeros((10,m))
for i in range (0,m):
for j in range(0,10):
if(j==int(Y[0,i])):
Y1[j,i]=1
else:
Y1[j,i]=0
Y=Y1
```
df = pd.read_csv('Downloads/mnist_train.csv',header = None)
data = np.array(df)
X = (data[:,1:].transpose())/255
m = X.shape[1]
n = X.shape[0]
Y_orig = data[:,0:1].transpose()
Y = np.zeros((10,m))
for i in range(m):
Y[int(Y_orig[0,i]),i] = 1
```
def relu(Z):
result = (Z + np.abs(Z))/2
return result
def relu_backward(Z):
result = (Z + np.abs(Z))/(2*np.abs(Z))
return result
def softmax(Z):
temp = np.exp(Z)
result = temp/np.sum(temp,axis = 0,keepdims = True)
return result
def initialize_parameters(layer_dims):
parameters = {}
L = len(layer_dims) - 1
for l in range(1,L + 1):
parameters["W" + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1])*0.01
parameters["b" + str(l)] = np.zeros((layer_dims[l],1))
#print(parameters)
return parameters
def forward_prop(X,parameters):
cache = {}
L = len(layer_dims) - 1
A_prev = X
for l in range(1,L):
Z = parameters["W" + str(l)].dot(A_prev) + parameters["b" + str(l)]
A = relu(Z)
cache["Z" + str(l)] = Z
A_prev = A
Z = parameters["W" + str(L)].dot(A_prev) + parameters["b" + str(L)]
AL = softmax(Z)
cache["Z" + str(L)] = Z
return AL,cache
def compute_cost(AL,Y):
m = AL.shape[1]
cost = (np.sum(-(Y * np.log(AL))))/(m)
return cost
def backward_prop(X,Y,cache,parameters,AL,layer_dims):
m = X.shape[1]
dparameters = {}
L = len(layer_dims) - 1
dZ = AL - Y
dparameters["dW" + str(L)] = dZ.dot(relu(cache["Z" + str(L-1)]).transpose())/m
#dparameters["dW" + str(L)] = dZ.dot(X.transpose())/m
dparameters["db" + str(L)] = np.sum(dZ,axis = 1,keepdims = True)/m
for l in range(1,L):
dZ = ((parameters["W" + str(L-l+1)].transpose()).dot(dZ)) * (relu_backward(cache["Z" + str(L-l)]))
if L-l-1 != 0:
dparameters["dW" + str(L-l)] = dZ.dot(relu(cache["Z" + str(L-1-l)]).transpose())/m
else:
dparameters["dW" + str(L-l)] = dZ.dot(X.transpose())/m
dparameters["db" + str(L-l)] = np.sum(dZ,axis = 1,keepdims = True)/m
return dparameters
def update_parameters(parameters,dparameters,layer_dims,learning_rate):
L = len(layer_dims) - 1
for l in range(1,L+1):
parameters["W" + str(l)] = parameters["W" + str(l)] - learning_rate*dparameters["dW" + str(l)]
parameters["b" + str(l)] = parameters["b" + str(l)] - learning_rate*dparameters["db" + str(l)]
return parameters
def model(X,Y,layer_dims,learning_rate,num_iters):
costs = []
parameters = initialize_parameters(layer_dims)
for i in range(num_iters):
AL,cache = forward_prop(X,parameters)
cost = compute_cost(AL,Y)
costs.append(cost)
dparameters = backward_prop(X,Y,cache,parameters,AL,layer_dims)
parameters = update_parameters(parameters,dparameters,layer_dims,learning_rate)
print(i,"\t",cost)
return parameters,costs
#trainig
layer_dims = [784,120,10]
parameters,costs = model(X,Y,layer_dims,0.5,2000)
plt.plot(costs)
#training
df = pd.read_csv('mnist_test.csv',header = None)
data = np.array(df)
X_test = (data[:,1:].transpose())/255
Y_test = data[:,0:1].transpose()
accuracy = 0
m_test = X_test.shape[1]
predict = np.zeros((1,m_test))
A_test,cache = forward_prop(X_test,parameters)
for i in range(m_test):
max = 0
for j in range(10):
if A_test[j,i] > max:
max = A_test[j,i]
max_index = j
predict[0,i] = max_index
if predict[0,i] == Y_test[0,i]:
accuracy = accuracy + 1
accuracy = (accuracy/m_test)*100
print(accuracy,"%")
index = 0
#change index toview different examples
index = 897
print("Its a",int(predict[0,index]))
plt.imshow(X_test[:,index].reshape(28,28))
```
|
github_jupyter
|
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Partial Differential Equations
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/pdes.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/community/en/pdes.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
TensorFlow isn't just for machine learning. Here you will use TensorFlow to simulate the behavior of a [partial differential equation](https://en.wikipedia.org/wiki/Partial_differential_equation). You'll simulate the surface of square pond as a few raindrops land on it.
## Basic setup
A few imports you'll need.
```
#Import libraries for simulation
import tensorflow as tf
assert tf.__version__.startswith('2')
import numpy as np
#Imports for visualization
import PIL.Image
from io import BytesIO
from IPython.display import clear_output, Image, display
```
A function for displaying the state of the pond's surface as an image.
```
def DisplayArray(a, fmt='jpeg', rng=[0,1]):
"""Display an array as a picture."""
a = (a - rng[0])/float(rng[1] - rng[0])*255
a = np.uint8(np.clip(a, 0, 255))
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
clear_output(wait = True)
display(Image(data=f.getvalue()))
```
## Computational convenience functions
```
@tf.function
def make_kernel(a):
"""Transform a 2D array into a convolution kernel"""
a = np.asarray(a)
a = a.reshape(list(a.shape) + [1,1])
return tf.constant(a, dtype=1)
@tf.function
def simple_conv(x, k):
"""A simplified 2D convolution operation"""
x = tf.expand_dims(tf.expand_dims(x, 0), -1)
y = tf.nn.depthwise_conv2d(input=x, filter=k, strides=[1, 1, 1, 1], padding='SAME')
return y[0, :, :, 0]
@tf.function
def laplace(x):
"""Compute the 2D laplacian of an array"""
laplace_k = make_kernel([[0.5, 1.0, 0.5],
[1.0, -6., 1.0],
[0.5, 1.0, 0.5]])
return simple_conv(x, laplace_k)
```
## Define the PDE
Your pond is a perfect 500 x 500 square, as is the case for most ponds found in nature.
```
N = 500
```
Here you create your pond and hit it with some rain drops.
```
# Initial Conditions -- some rain drops hit a pond
# Set everything to zero
u_init = np.zeros([N, N], dtype=np.float32)
ut_init = np.zeros([N, N], dtype=np.float32)
# Some rain drops hit a pond at random points
for n in range(40):
a,b = np.random.randint(0, N, 2)
u_init[a,b] = np.random.uniform()
DisplayArray(u_init, rng=[-0.1, 0.1])
```
Now let's specify the details of the differential equation.
```
# Parameters:
# eps -- time resolution
# damping -- wave damping
eps = 0.03
damping = 0.04
# Create variables for simulation state
U = tf.Variable(u_init)
Ut = tf.Variable(ut_init)
```
## Run the simulation
This is where it gets fun -- running time forward with a simple for loop.
```
# Run 1000 steps of PDE
for i in range(1000):
# Step simulation
# Discretized PDE update rules
U = U + eps * Ut
Ut = Ut + eps * (laplace(U) - damping * Ut)
# Show final image
DisplayArray(U.numpy(), rng=[-0.1, 0.1])
```
Look! Ripples!
|
github_jupyter
|
# NumPy
Numpy is the core library for scientific computing in Python. <br/>
It provides a high-performance multidimensional array object, and tools for working with these arrays. <br/>
Official NumPy Documentation: https://numpy.org/doc/stable/reference/
```
# Install NumPy
# ! pip install numpy
```
Since NumPy is not a default thing in Python. We import this library. When we import a library we allow all the functions and types with the initial of that library.
```
# Import NumPy
import numpy as np
```
# NumPy Arrays
A grid of values, all of the same type. <br/>
**Rank:** number of dimensions of the array <br/>
**Shape:** an array of tuple of integers giving the size of the array along each dimension.
```
# Rank 1 array
a = np.array([1, 2, 3])
print(type(a)) # Prints data type
print(a.shape)
print(a[0], a[1], a[2]) # Indexing
a[0] = 5 # Assigning
print(a)
# Rank 2 array
b = np.array([ [1,2,3],
[4,5,6]
])
'''
# of elements in first 3rd bracket => 2
# of elements in second 3rd bracket => 3
'''
print(b.shape)
print(b[0, 0], b[0, 1], b[1, 0], b[1,2])
```
## Special Arrays
```
a = np.zeros((6,4)) # Create an array of all zeros
a
np.zeros_like(b,dtype=float)
b = np.ones((3,2)) # Create an array of all ones
b
c = np.full((6,4), 7) # Create a constant array
c
d = np.eye(5) # Create a 2x2 identity matrix
d
e = np.random.random((4,3)) # Create an array filled with random values
e
```
## Indexing
```
a = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
a
a[:2,:3]
b = a[:2, 1:3]
b
print(a[0, 1]) # Prints "2"
b[0, 0] = 77 # b[0, 0] is the same piece of data as a[0, 1]
print(a[0, 1]) # Prints "77"
a[1, :]
a[1:2, :]
a[:, 1]
a[:, 1:2]
np.arange(2,10,2)
```
## Boolean array indexing
```
a
bool_idx = (a>10)
bool_idx
a[bool_idx]
a [ a>10 ]
```
# Data Types
```
x = np.array([1, 2])
print(x.dtype)
x = np.array([1.0, 2.0])
print(x.dtype)
x = np.array([1, 2], dtype=np.float64) # Foring a particular datatype
print(x,x.dtype)
x.dtype
```
# Operations
```
x = np.array([[1,2],[3,4]], dtype=np.float64)
y = np.array([[5,6],[7,8]], dtype=np.float64)
x,y
# Adding two arrays element-wise
print(x + y)
print(np.add(x, y))
# Substracting two arrays element-wise
print(x - y)
print(np.subtract(x, y))
# Mutiplication Element-wise
print(x * y)
print(np.multiply(x, y))
# Elementwise division
print(x / y)
print(np.divide(x, y))
# Elementwise square root
print(np.sqrt(x))
# Matrix Multiplication
print(x.dot(y))
print(np.dot(x, y))
x
# Sum of all elements in the array
np.sum(x)
print(np.sum(x, axis=0)) # Compute sum of each column
print(np.sum(x, axis=1)) # Compute sum of each row
a
# Transpose
a.T
```
# Broadcasting
```
x = np.array([[1,2,3], [4,5,6], [7,8,9], [10, 11, 12]])
v = np.array([1, 0, 1])
y = x + v # Add v to each row of x using broadcasting
print(y)
x = np.array([[1,2,3], [4,5,6]])
y = np.array([4,5])
(x.T+y).T
x, x.shape
x.T, x.T.shape
y, y.shape
x.T+y
(x.T+y).T
x*2
x+2
```
|
github_jupyter
|
# Backtest Orbit Model
In this section, we will cover:
- How to create a TimeSeriesSplitter
- How to create a BackTester and retrieve the backtesting results
- How to leverage the backtesting to tune the hyper-paramters for orbit models
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import orbit
from orbit.models import LGT, DLT
from orbit.diagnostics.backtest import BackTester, TimeSeriesSplitter
from orbit.diagnostics.plot import plot_bt_predictions
from orbit.diagnostics.metrics import smape, wmape
from orbit.utils.dataset import load_iclaims
import warnings
warnings.filterwarnings('ignore')
print(orbit.__version__)
# load log-transformed data
data = load_iclaims()
data.shape
```
The way to gauge the performance of a time-series model is through re-training models with different historic periods and check their forecast within certain steps. This is similar to a time-based style cross-validation. More often, we called it `backtest` in time-series modeling.
The purpose of this notebook is to illustrate how to `backtest` a single model using `BackTester`
`BackTester` will compose a `TimeSeriesSplitter` within it, but `TimeSeriesSplitter` is useful as a standalone, in case there are other tasks to perform that requires splitting but not backtesting. `TimeSeriesSplitter` implemented each 'slices' as genertor, i.e it can be used in a for loop. You can also retrieve the composed `TimeSeriesSplitter` object from `BackTester` to utilize the additional methods in `TimeSeriesSplitter`
Currently, there are two schemes supported for the back-testing engine: expanding window and rolling window.
* **expanding window**: for each back-testing model training, the train start date is fixed, while the train end date is extended forward.
* **rolling window**: for each back-testing model training, the training window length is fixed but the window is moving forward.
## Create a TimeSeriesSplitter
There two main way to splitting a timeseries: expanding and rolling. Expanding window has a fixed starting point, and the window length grows as we move forward in timeseries. It is useful when we want to incoporate all historical information. On the other hand, rolling window has a fixed window length, and the starting point of the window moves forward as we move forward in timeseries. Now, we will illustrate how to use `TimeSeriesSplitter` to split the claims timeseries.
### Expanding window
```
# configs
min_train_len = 380 # minimal length of window length
forecast_len = 20 # length forecast window
incremental_len = 20 # step length for moving forward
ex_splitter = TimeSeriesSplitter(df=data,
min_train_len=min_train_len,
incremental_len=incremental_len,
forecast_len=forecast_len,
window_type='expanding',
date_col='week')
print(ex_splitter)
```
We can visualize the splits, green is training window and yellow it the forecasting windown. The starting point is always 0 for three splits but window length increases from 380 to 420.
```
_ = ex_splitter.plot()
```
### Rolling window
```
# configs
min_train_len = 380 # in case of rolling window, this specify the length of window length
forecast_len = 20 # length forecast window
incremental_len = 20 # step length for moving forward
roll_splitter = TimeSeriesSplitter(data,
min_train_len=min_train_len,
incremental_len=incremental_len,
forecast_len=forecast_len,
window_type='rolling', date_col='week')
```
We can visualize the splits, green is training window and yellow it the forecasting windown. The window length is always 380, while the starting point moves forward 20 weeks each steps.
```
_ = roll_splitter.plot()
```
### Specifying number of splits
User can also define number of splits using `n_splits` instead of specifying minimum training length. That way, minimum training length will be automatically calculated.
```
ex_splitter2 = TimeSeriesSplitter(data,
min_train_len=min_train_len,
incremental_len=incremental_len,
forecast_len=forecast_len,
n_splits=5,
window_type='expanding', date_col='week')
_ = ex_splitter2.plot()
```
### TimeSeriesSplitter as generator
`TimeSeriesSplitter` is implemented as a genetor, therefore we can call `split()` to loop through it. It comes handy even for tasks other than backtest.
```
for train_df, test_df, scheme, key in roll_splitter.split():
print('Initial Claim slice {} rolling mean:{:.3f}'.format(key, train_df['claims'].mean()))
```
## Create a BackTester
Now, we are ready to do backtest, first let's initialize a `DLT` model and a `BackTester`. You pass in `TimeSeriesSplitter` parameters to `BackTester`.
```
# instantiate a model
dlt = DLT(
date_col='week',
response_col='claims',
regressor_col=['trend.unemploy', 'trend.filling', 'trend.job'],
seasonality=52,
estimator='stan-map',
)
# configs
min_train_len = 100
forecast_len = 20
incremental_len = 100
window_type = 'expanding'
bt = BackTester(
model=dlt,
df=data,
min_train_len=min_train_len,
incremental_len=incremental_len,
forecast_len=forecast_len,
window_type=window_type,
)
```
## Backtest fit and predict
The most expensive portion of backtesting is fitting the model iteratively. Thus, we separate the api calls for `fit_predict` and `score` to avoid redundant computation for multiple metrics or scoring methods
```
bt.fit_predict()
```
Once `fit_predict()` is called, the fitted models and predictions can be easily retrieved from `BackTester`. Here the data is grouped by the date, split_key, and whether or not that observation is part of the training or test data
```
predicted_df = bt.get_predicted_df()
predicted_df.head()
```
We also provide a plotting utility to visualize the predictions against the actuals for each split.
```
plot_bt_predictions(predicted_df, metrics=smape, ncol=2, include_vline=True);
```
Users might find this useful for any custom computations that may need to be performed on the set of predicted data. Note that the columns are renamed to generic and consistent names.
Sometimes, it might be useful to match the data back to the original dataset for ad-hoc diagnostics. This can easily be done by merging back to the orignal dataset
```
predicted_df.merge(data, left_on='date', right_on='week')
```
## Backtest Scoring
The main purpose of `BackTester` are the evaluation metrics. Some of the most widely used metrics are implemented and built into the `BackTester` API.
The default metric list is **smape, wmape, mape, mse, mae, rmsse**.
```
bt.score()
```
It is possible to filter for only specific metrics of interest, or even implement your own callable and pass into the `score()` method. For example, see this function that uses last observed value as a predictor and computes the `mse`. Or `naive_error` which computes the error as the delta between predicted values and the training period mean.
Note these are not really useful error metrics, just showing some examples of callables you can use ;)
```
def mse_naive(test_actual):
actual = test_actual[1:]
predicted = test_actual[:-1]
return np.mean(np.square(actual - predicted))
def naive_error(train_actual, test_predicted):
train_mean = np.mean(train_actual)
return np.mean(np.abs(test_predicted - train_mean))
bt.score(metrics=[mse_naive, naive_error])
```
It doesn't take additional time to refit and predict the model, since the results are stored when `fit_predict()` is called. Check docstrings for function criteria that is required for it to be supported with this api.
In some cases, we may want to evaluate our metrics on both train and test data. To do this you can call score again with the following indicator
```
bt.score(include_training_metrics=True)
```
## Backtest Get Models
In cases where `BackTester` doesn't cut it or for more custom use-cases, there's an interface to export the `TimeSeriesSplitter` and predicted data, as shown earlier. It's also possible to get each of the fitted models for deeper diving
```
fitted_models = bt.get_fitted_models()
model_1 = fitted_models[0]
model_1.get_regression_coefs()
```
BackTester composes a TimeSeriesSplitter within it, but TimeSeriesSplitter can also be created on its own as a standalone object. See section below on TimeSeriesSplitter for more details on how to use the splitter.
All of the additional TimeSeriesSplitter args can also be passed into BackTester on instantiation
```
ts_splitter = bt.get_splitter()
_ = ts_splitter.plot()
```
## Hyperparameter Tunning
After seeing the results fromt the backtest, users may wish to fine tune the hyperparmeters. Orbit also provide a `grid_search_orbit` utilities for parameter searching. It uses `Backtester` under the hood so users can compare backtest metrics for different paramters combination.
```
from orbit.utils.params_tuning import grid_search_orbit
# defining the search space for level smoothing paramter and seasonality smooth paramter
param_grid = {
'level_sm_input': [0.3, 0.5, 0.8],
'seasonality_sm_input': [0.3, 0.5, 0.8],
}
# configs
min_train_len = 380 # in case of rolling window, this specify the length of window length
forecast_len = 20 # length forecast window
incremental_len = 20 # step length for moving forward
best_params, tuned_df = grid_search_orbit(param_grid,
model=dlt,
df=data,
min_train_len=min_train_len,
incremental_len=incremental_len,
forecast_len=forecast_len,
metrics=None, criteria=None, verbose=True)
tuned_df.head() # backtest output for each parameter searched
best_params # output best parameters
```
|
github_jupyter
|
# Representação numérica de palavras e textos
Neste notebook iremos apresentação formas de representar valores textuais por meio de representação numérica. Iremos usar pandas, caso queira entender um pouco sobre pandas, [veja este notebook](pandas.ipynb). Por isso, não esqueça de instalar o módulo pandas:
``pip3 install pandas``
Em aprendizado de máquina, muitas vezes, precisamos da representação numérica de um determinado valor. Por exemplo:
```
import pandas as pd
df_jogos = pd.DataFrame([ ["boa","nublado","não"],
["boa","chuvoso","não"],
["média","nublado","sim"],
["fraca","chuvoso","não"]],
columns=["disposição","tempo","jogar volei?"])
df_jogos
```
Caso quisermos maperar cada coluna (agora chamada de atributo) para um valor, forma mais simples de se fazer a transformação é simplesmente mapear esse atributo para um valor numérico. Veja o exemplo abaixo:
Nesse exemplo, temos dois atributos disposição do jogador e tempo e queremos prever se o jogar irá jogar volei ou não. Tanto os atributos quanto a classe podem ser mapeados como número. Além disso, o atributo `disposicao` é um atributo que representa uma escala - o que deixa essa forma de tranformação bem adequada para esse atributo.
```
from typing import Dict
def mapeia_atributo_para_int(df_data:pd.DataFrame, coluna:str, dic_nom_to_int: Dict[int,str]):
for i,valor in enumerate(df_data[coluna]):
valor_int = dic_nom_to_int[valor]
df_data[coluna].iat[i] = valor_int
df_jogos = pd.DataFrame([ ["boa","nublado","sim"],
["boa","chuvoso","não"],
["média","ensolarado","sim"],
["fraca","chuvoso","não"]],
columns=["disposição","tempo","jogar volei?"])
dic_disposicao = {"boa":3,"média":2,"fraca":1}
mapeia_atributo_para_int(df_jogos, "disposição", dic_disposicao)
dic_tempo = {"ensolarado":3,"nublado":2,"chuvoso":1}
mapeia_atributo_para_int(df_jogos, "tempo", dic_tempo)
dic_volei = {"sim":1, "não":0}
mapeia_atributo_para_int(df_jogos, "jogar volei?", dic_volei)
df_jogos
```
## Binarização dos atributos categóricos
Podemos fazer a binarização dos atributos categóricos em que, cada valor de atributo transforma-se em uma coluna que recebe `0` caso esse atributo não exista e `1`, caso contrário. Em nosso exemplo:
```
from preprocessamento_atributos import BagOfItems
df_jogos = pd.DataFrame([ [4, "boa","nublado","sim"],
[3,"boa","chuvoso","não"],
[2,"média","ensolarado","sim"],
[1,"fraca","chuvoso","não"]],
columns=["id","disposição","tempo","jogar volei?"])
dic_disposicao = {"boa":3,"média":2,"fraca":1}
bag_of_tempo = BagOfItems(0)
#veja a implementação do método em preprocesamento_atributos.py
df_jogos_bot = bag_of_tempo.cria_bag_of_items(df_jogos,["tempo"])
df_jogos_bot
```
Como existem vários valores no teste que você desconhece, se fizermos dessa forma, atributos que estão no teste poderiam estar completamente zerados no treino, sendo desnecessário, por exemplo:
```
df_jogos_treino = df_jogos[:2]
df_jogos_treino
df_jogos_teste = df_jogos[2:]
df_jogos_teste
```
## Exemplo Real
Considere este exemplo real de filmes e seus atores ([obtidos no kaggle](https://www.kaggle.com/rounakbanik/the-movies-dataset)):
```
import pandas as pd
df_amostra = pd.read_csv("movies_amostra.csv")
df_amostra
```
Nesse exemplo, as colunas que representam os atores principais podem ser binarizadas. Em nosso caso, podemos colocar os atores todos em um "Bag of Items". Os atores são representados por as colunas `ator_1`, `ator_2`,..., `ator_5`. Abaixo, veja um sugestão de como fazer em dataset:
```
import pandas as pd
from preprocessamento_atributos import BagOfItems
obj_bag_of_actors = BagOfItems(min_occur=3)
#boa=bag of actors ;)
df_amostra_boa = obj_bag_of_actors.cria_bag_of_items(df_amostra,["ator_1","ator_2","ator_3","ator_4","ator_5"])
df_amostra_boa
```
Veja que temos bastante atributos um para cada ator. Mesmo sendo melhor possuirmos poucos atributos e mais informativos, um método de aprendizado de máquina pode ser capaz de usar essa quantidade de forma eficaz. Particularmente, o [SVM linear](https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html) e o [RandomForest](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) são métodos que conseguem ir bem nesse tipo de dado.
Essa é a forma mais prática de fazer, porém, em aprendizado de máquina, geralmente dividimos nossos dados em, pelo menos, treino e teste em que treino é o dado que você terá todo o acesso e, o teste, deve reproduzir uma amostra do mundo real. Vamos supor que no treino há atores raros que não ocorrem no teste, nesse caso tais atributos seriam inúteis para o teste. Isso pode fazer com que o resultado reproduza menos o mundo real - neste caso, é muito possível que a diferença seja quase insignificante. Mas, caso queiramos fazer da forma "mais correta", temos que considerar apenas o treino para isso:
```
#supondo que 80% da amostra é treino
df_treino_amostra = df_amostra.sample(frac=0.8, random_state = 2)
df_teste_amostra = df_amostra.drop(df_treino_amostra.index)
#min_occur=3 definie o minimo de ocorrencias desse ator para ser considerado
#pois, um ator que apareceu em poucos filmes, pode ser menos relevante para a predição do genero
obj_bag_of_actors = BagOfItems(min_occur=3)
df_treino_amostra_boa = obj_bag_of_actors.cria_bag_of_items(df_treino_amostra,["ator_1","ator_2","ator_3","ator_4","ator_5"])
df_teste_amostra_boa = obj_bag_of_actors.aplica_bag_of_items(df_teste_amostra,["ator_1","ator_2","ator_3","ator_4","ator_5"])
```
## Representação Bag of Words
Muitas vezes, temos textos que podem ser relevantes para uma determinada tarefa de aprendizado d máquina. Por isso, temos que representar tais elementos para nosso método de aprendizado de máquina.
A forma mais usual para isso, é a `Bag of Words` em que cada palavra é um atributo e, o valor dela, é a frequencia dele no texto (ou algum outro valor que indique a importancia dessa palavra no texto).
Por exemplo, caso temos as frases `A casa é grande`, `A casa é verde verde` em que cada frase é uma instancia diferente. A representação seria da seguinte forma:
```
dic_bow = {"a":[1,1],
"casa":[1,1],
"é":[1,1],
"verde":[0,2]
}
df_bow = pd.DataFrame.from_dict(dic_bow)
df_bow
```
Da forma que fizemos acima, usamos a frequencia de um termo para definir sua importancia no texto, porém, existem termos que possuem uma frequencia muito alta e importancia baixa: são os casos dos artigos e preposições por exemplo, pois, eles não discriminam o texto.
Uma forma de mensurar o porder discriminativo das palavras é usando a métrica `TF-IDF`. Para calcularmos essa métrica, primeiramente calculamos a frequencia de um termo no documento (TF) e, logo após multiplamos pelo IDF.
A fórmula para calcular o TF-IDF do termo $i$ no documento (ou instancia) $j$ é a seguinte:
\begin{equation}
TFIDF_{ij} = TF_{ij} \times IDF_i
\end{equation}
\begin{equation}
TF_{ij} = log(f_{ij})
\end{equation}
em que $f_{ij}$ é a frequencia de um termo $i$ no documento $j$. Usa-se o `log` para suavizar valores muito altos e o $IDF$ (do inglês, _Inverse Document Frequency_) do termo $i$ é calculado da seguinte forma:
\begin{equation}
IDF_i = log(\frac{N}{n_i})
\end{equation}
em que $N$ é o número de documentos da coleção e $n_i$ é o número de documentos em que esse termo $i$ ocorre. Espera-se que, quanto mais discriminativo o termo, em menos documentos esse termo irá ocorrer e, consequentemente, o $IDF$ deste termo será mais alto.
Por exemplo, considere as palavras `de`, `bebida` e `cerveja`. `cerveja` é uma palavra mais discriminativa do que `bebida`; e `bebibda` é mais discriminativo do que a preposição `de`. Muito provavelmente teremos mais frequentemente termos menos discriminativos. Por exemplo, se tivermos uma coleção de 1000 documentos, `de` poderia ocorrer em 900 documentos, `bebida` em 500 e `cerveja` em 100 documentos. Se fizermos o calculo, veremos que quanto mais discriminativo um termo, mais alto é seu IDF:
```
import math
N = 1000
n_de = 900
n_bebida = 500
n_cerveja = 100
IDF_de = math.log(N/n_de)
IDF_bebida = math.log(N/n_bebida)
IDF_cerveja = math.log(N/n_cerveja)
print(f"IDF_de: {IDF_de}\tIDF_bebida:{IDF_bebida}\tIDF_cerveja:{IDF_cerveja}")
```
A biblioteca `scikitlearn`também já possui uma classe [TFIDFVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) que transforma um texto em um vetor de atributos usando o TF-IDF para o valor referente a relevancia deste termo. Veja um exemplo na coluna `resumo` do nosso dataset de filme:
```
import pandas as pd
from preprocessamento_atributos import BagOfWords
df_amostra = pd.read_csv("datasets/movies_amostra.csv")
bow_amostra = BagOfWords()
df_bow_amostra = bow_amostra.cria_bow(df_amostra,"resumo")
df_bow_amostra
```
Como são muitos atributos, pode parecer que não ficou corretamente gerado. Mas, filtrando as palavras de um determinado resumo você verificará que está ok:
```
df_bow_amostra[["in","lake", "high"]]
```
Não fique preso apenas nessas representações. Vocês podem tentar fazer representações mais sucintas, como, por exemplo: para preprocessar os dados da equipe do filme (atores, diretor e escritor), calcule o número de filmes de comédia que membros da equipe participaram e, logo após, o número de filme de ação. Neste caso, como você usará a classe, você deverá usar **apenas** os dados de treino. No caso do resumo, você pode utilizar palavras chaves. Por exemplo, faça uma lista de palavras chaves que remetem "ação" e contabilize o quantidade dessas palavras chaves no resumo.
|
github_jupyter
|
# Machine Learning Engineer Nanodegree
## Reinforcement Learning
## Project: Train a Smartcab to Drive
Welcome to the fourth project of the Machine Learning Engineer Nanodegree! In this notebook, template code has already been provided for you to aid in your analysis of the *Smartcab* and your implemented learning algorithm. You will not need to modify the included code beyond what is requested. There will be questions that you must answer which relate to the project and the visualizations provided in the notebook. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide in `agent.py`.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
-----
## Getting Started
In this project, you will work towards constructing an optimized Q-Learning driving agent that will navigate a *Smartcab* through its environment towards a goal. Since the *Smartcab* is expected to drive passengers from one location to another, the driving agent will be evaluated on two very important metrics: **Safety** and **Reliability**. A driving agent that gets the *Smartcab* to its destination while running red lights or narrowly avoiding accidents would be considered **unsafe**. Similarly, a driving agent that frequently fails to reach the destination in time would be considered **unreliable**. Maximizing the driving agent's **safety** and **reliability** would ensure that *Smartcabs* have a permanent place in the transportation industry.
**Safety** and **Reliability** are measured using a letter-grade system as follows:
| Grade | Safety | Reliability |
|:-----: |:------: |:-----------: |
| A+ | Agent commits no traffic violations,<br/>and always chooses the correct action. | Agent reaches the destination in time<br />for 100% of trips. |
| A | Agent commits few minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 90% of trips. |
| B | Agent commits frequent minor traffic violations,<br/>such as failing to move on a green light. | Agent reaches the destination on time<br />for at least 80% of trips. |
| C | Agent commits at least one major traffic violation,<br/> such as driving through a red light. | Agent reaches the destination on time<br />for at least 70% of trips. |
| D | Agent causes at least one minor accident,<br/> such as turning left on green with oncoming traffic. | Agent reaches the destination on time<br />for at least 60% of trips. |
| F | Agent causes at least one major accident,<br />such as driving through a red light with cross-traffic. | Agent fails to reach the destination on time<br />for at least 60% of trips. |
To assist evaluating these important metrics, you will need to load visualization code that will be used later on in the project. Run the code cell below to import this code which is required for your analysis.
```
# Import the visualization code
import visuals as vs
# Pretty display for notebooks
%matplotlib inline
```
### Understand the World
Before starting to work on implementing your driving agent, it's necessary to first understand the world (environment) which the *Smartcab* and driving agent work in. One of the major components to building a self-learning agent is understanding the characteristics about the agent, which includes how the agent operates. To begin, simply run the `agent.py` agent code exactly how it is -- no need to make any additions whatsoever. Let the resulting simulation run for some time to see the various working components. Note that in the visual simulation (if enabled), the **white vehicle** is the *Smartcab*.
### Question 1
In a few sentences, describe what you observe during the simulation when running the default `agent.py` agent code. Some things you could consider:
- *Does the Smartcab move at all during the simulation?*
- *What kind of rewards is the driving agent receiving?*
- *How does the light changing color affect the rewards?*
**Hint:** From the `/smartcab/` top-level directory (where this notebook is located), run the command
```bash
'python smartcab/agent.py'
```
**Answer:** The smart cab does not move at all during the simulation.
Whilst runnning the simulation, we see both the movement of other vehicles around the grid system, and the changing colour of the traffic, either red or green. The smartcab receives either a positive or negative reward depending on whether is took an appropriate action: negative if the wrong action, positive if correct. The magnitude of the reward increases for consecutive incorrect/correct actions i.e. the reward received will be greater if the smartcab continues to do the correct action, if the previous action was also correct.
The light colour determines the reward the smartcab receives for the current action taken. The smartcab receives a positive reward if it idles in front of red lights, and conversely receives a negative reward if it idles in front of green lights.
It is receiving reward when waiting on red light, or green light with oncoming traffic. There is also a penalty when idling on green light without traffic.
Light color determines whether going or staying will give reward or penalty.
### Understand the Code
In addition to understanding the world, it is also necessary to understand the code itself that governs how the world, simulation, and so on operate. Attempting to create a driving agent would be difficult without having at least explored the *"hidden"* devices that make everything work. In the `/smartcab/` top-level directory, there are two folders: `/logs/` (which will be used later) and `/smartcab/`. Open the `/smartcab/` folder and explore each Python file included, then answer the following question.
### Question 2
- *In the *`agent.py`* Python file, choose three flags that can be set and explain how they change the simulation.*
- *In the *`environment.py`* Python file, what Environment class function is called when an agent performs an action?*
- *In the *`simulator.py`* Python file, what is the difference between the *`'render_text()'`* function and the *`'render()'`* function?*
- *In the *`planner.py`* Python file, will the *`'next_waypoint()`* function consider the North-South or East-West direction first?*
**Answer:**
agent.py
- update_delay determines time delay between actions with a default of 2 seconds
- log_metrics Boolean toggle to determine whether to log trial and simulation results to /logs
- optimized set default log file name
environment.py
- The class function act is called when the agent performs an action
simulator.py
- render_text producing the logging viewed in the terminal, whereas render produces the logging viewed in the GUI simulation
planner.py
- next_waypoint checks the East-West direction before checking the North-South direction
-----
## Implement a Basic Driving Agent
The first step to creating an optimized Q-Learning driving agent is getting the agent to actually take valid actions. In this case, a valid action is one of `None`, (do nothing) `'Left'` (turn left), `'Right'` (turn right), or `'Forward'` (go forward). For your first implementation, navigate to the `'choose_action()'` agent function and make the driving agent randomly choose one of these actions. Note that you have access to several class variables that will help you write this functionality, such as `'self.learning'` and `'self.valid_actions'`. Once implemented, run the agent file and simulation briefly to confirm that your driving agent is taking a random action each time step.
### Basic Agent Simulation Results
To obtain results from the initial simulation, you will need to adjust following flags:
- `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.
- `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.
- `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file in `/logs/`.
- `'n_test'` - Set this to `'10'` to perform 10 testing trials.
Optionally, you may disable to the visual simulation (which can make the trials go faster) by setting the `'display'` flag to `False`. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation!
Once you have successfully completed the initial simulation (there should have been 20 training trials and 10 testing trials), run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!
Run the agent.py file after setting the flags from projects/smartcab folder instead of projects/smartcab/smartcab.
```
# Load the 'sim_no-learning' log file from the initial simulation results
vs.plot_trials('sim_no-learning.csv')
```
### Question 3
Using the visualization above that was produced from your initial simulation, provide an analysis and make several observations about the driving agent. Be sure that you are making at least one observation about each panel present in the visualization. Some things you could consider:
- *How frequently is the driving agent making bad decisions? How many of those bad decisions cause accidents?*
- *Given that the agent is driving randomly, does the rate of reliabilty make sense?*
- *What kind of rewards is the agent receiving for its actions? Do the rewards suggest it has been penalized heavily?*
- *As the number of trials increases, does the outcome of results change significantly?*
- *Would this Smartcab be considered safe and/or reliable for its passengers? Why or why not?*
**Answer:**
- From the "10-trial rolling relative frequency of bad actions" visualisation, the agent is making bad decisions approximately 44% percent of the time. Of those bad decisions, approximately 24% result in accidents
- This reliability result makes some sense in light of the agent choosing actions randomly between four actions
- The "10-trial rolling average reward per action" displays that on average the agent receives a reward between -7 and -5.5. This would indicate that the agent is being heavily penalised as it is performing a slight majority of good decisions, from the top left visualisation.
- From the visualisation "10-trial rolling rate of reliability", the results are consistently 0% as no trail completed successfully
- This smartcab should not be considered safe for its passengers, as both the safety rating and reliability rating are "F". This would indicate that the agent caused at least one major accident per run, and failed to reach the destination on time for at least 60% of trips
-----
## Inform the Driving Agent
The second step to creating an optimized Q-learning driving agent is defining a set of states that the agent can occupy in the environment. Depending on the input, sensory data, and additional variables available to the driving agent, a set of states can be defined for the agent so that it can eventually *learn* what action it should take when occupying a state. The condition of `'if state then action'` for each state is called a **policy**, and is ultimately what the driving agent is expected to learn. Without defining states, the driving agent would never understand which action is most optimal -- or even what environmental variables and conditions it cares about!
### Identify States
Inspecting the `'build_state()'` agent function shows that the driving agent is given the following data from the environment:
- `'waypoint'`, which is the direction the *Smartcab* should drive leading to the destination, relative to the *Smartcab*'s heading.
- `'inputs'`, which is the sensor data from the *Smartcab*. It includes
- `'light'`, the color of the light.
- `'left'`, the intended direction of travel for a vehicle to the *Smartcab*'s left. Returns `None` if no vehicle is present.
- `'right'`, the intended direction of travel for a vehicle to the *Smartcab*'s right. Returns `None` if no vehicle is present.
- `'oncoming'`, the intended direction of travel for a vehicle across the intersection from the *Smartcab*. Returns `None` if no vehicle is present.
- `'deadline'`, which is the number of actions remaining for the *Smartcab* to reach the destination before running out of time.
### Question 4
*Which features available to the agent are most relevant for learning both **safety** and **efficiency**? Why are these features appropriate for modeling the *Smartcab* in the environment? If you did not choose some features, why are those features* not *appropriate?*
**Answer:**
Should only need waypoint and inputs features, including lightm left, right, and oncoming attributes. Though debatable, it can be said that the deadline feature is not as relevant as either of the other features as it does not contain any information that cannot also be derived from the other features.
The inputs feature is relevant for safety, because it determines the constraints under which the smartcab can operate. For example, if there there is a car from the smartcab's left that will come across the intersection, the smartcab should respond appropriately. It does not capture any sense of efficiency as we do not know how this feature relates to the direction the smartcab should go under some constraint. On important qualification comes from the domain knowledge knowing that this agent relates to a road system where traffic travels on the right hand side of the road. It would be more efficient, but less generalisable, to drop knowledge of traffic to the right of the agent in our state description.
The waypoint feature captures efficiency. This indicates the ideal actions to follow to reach our destination. Furthermore, waypoint would produce the optimal path under the constraint of no lights or other vehicles. Unlike the deadline feature, this feature is a necessary requirement to undertand the smartcab's position in the environment.
Although the deadline feature is a measure of efficiency, we can deduce a measure of efficiency from the waypoint feature, assuming that it will always indicate the optimal direction the smartcab should follow.
### Define a State Space
When defining a set of states that the agent can occupy, it is necessary to consider the *size* of the state space. That is to say, if you expect the driving agent to learn a **policy** for each state, you would need to have an optimal action for *every* state the agent can occupy. If the number of all possible states is very large, it might be the case that the driving agent never learns what to do in some states, which can lead to uninformed decisions. For example, consider a case where the following features are used to define the state of the *Smartcab*:
`('is_raining', 'is_foggy', 'is_red_light', 'turn_left', 'no_traffic', 'previous_turn_left', 'time_of_day')`.
How frequently would the agent occupy a state like `(False, True, True, True, False, False, '3AM')`? Without a near-infinite amount of time for training, it's doubtful the agent would ever learn the proper action!
### Question 5
*If a state is defined using the features you've selected from **Question 4**, what would be the size of the state space? Given what you know about the evironment and how it is simulated, do you think the driving agent could learn a policy for each possible state within a reasonable number of training trials?*
**Hint:** Consider the *combinations* of features to calculate the total number of states!
**Answer:**
From question 4, I said that the required features were waypoint and inputs.
Waypoint can be one of four values: right, forward, left - we can discount None from our state space as this would indicate that the smartcab has reached its destination
Inputs breaks down as:
- light can be red or green
- left, and oncoming can be either right, forward, left, or None. Can drop inputs['right'] feature as traffic to right of smartcab is travelling away
The total state space would be 3 \* 4 \* 4 \* * 2 = 96
I do not think it is reasonable to expect the agent to learn a policy for each possible state. This is because if we are to assume each journey is going to take a maximum of the order of 10 steps, we would realistically need an order of 10^3 trials to obtain some meaningful results for this state space. Or in other words, for anything less than 10^3 trials, we would get a total number or data points of a similar order of magnitude as the state space.
### Update the Driving Agent State
For your second implementation, navigate to the `'build_state()'` agent function. With the justification you've provided in **Question 4**, you will now set the `'state'` variable to a tuple of all the features necessary for Q-Learning. Confirm your driving agent is updating its state by running the agent file and simulation briefly and note whether the state is displaying. If the visual simulation is used, confirm that the updated state corresponds with what is seen in the simulation.
**Note:** Remember to reset simulation flags to their default setting when making this observation!
-----
## Implement a Q-Learning Driving Agent
The third step to creating an optimized Q-Learning agent is to begin implementing the functionality of Q-Learning itself. The concept of Q-Learning is fairly straightforward: For every state the agent visits, create an entry in the Q-table for all state-action pairs available. Then, when the agent encounters a state and performs an action, update the Q-value associated with that state-action pair based on the reward received and the interative update rule implemented. Of course, additional benefits come from Q-Learning, such that we can have the agent choose the *best* action for each state based on the Q-values of each state-action pair possible. For this project, you will be implementing a *decaying,* $\epsilon$*-greedy* Q-learning algorithm with *no* discount factor. Follow the implementation instructions under each **TODO** in the agent functions.
Note that the agent attribute `self.Q` is a dictionary: This is how the Q-table will be formed. Each state will be a key of the `self.Q` dictionary, and each value will then be another dictionary that holds the *action* and *Q-value*. Here is an example:
```
{ 'state-1': {
'action-1' : Qvalue-1,
'action-2' : Qvalue-2,
...
},
'state-2': {
'action-1' : Qvalue-1,
...
},
...
}
```
Furthermore, note that you are expected to use a *decaying* $\epsilon$ *(exploration) factor*. Hence, as the number of trials increases, $\epsilon$ should decrease towards 0. This is because the agent is expected to learn from its behavior and begin acting on its learned behavior. Additionally, The agent will be tested on what it has learned after $\epsilon$ has passed a certain threshold (the default threshold is 0.01). For the initial Q-Learning implementation, you will be implementing a linear decaying function for $\epsilon$.
### Q-Learning Simulation Results
To obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup:
- `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.
- `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.
- `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`.
- `'n_test'` - Set this to `'10'` to perform 10 testing trials.
- `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation.
In addition, use the following decay function for $\epsilon$:
$$ \epsilon_{t+1} = \epsilon_{t} - 0.05, \hspace{10px}\textrm{for trial number } t$$
If you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation!
Once you have successfully completed the initial Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!
```
# Load the 'sim_default-learning' file from the default Q-Learning simulation
vs.plot_trials('sim_default-learning.csv')
```
### Question 6
Using the visualization above that was produced from your default Q-Learning simulation, provide an analysis and make observations about the driving agent like in **Question 3**. Note that the simulation should have also produced the Q-table in a text file which can help you make observations about the agent's learning. Some additional things you could consider:
- *Are there any observations that are similar between the basic driving agent and the default Q-Learning agent?*
- *Approximately how many training trials did the driving agent require before testing? Does that number make sense given the epsilon-tolerance?*
- *Is the decaying function you implemented for $\epsilon$ (the exploration factor) accurately represented in the parameters panel?*
- *As the number of training trials increased, did the number of bad actions decrease? Did the average reward increase?*
- *How does the safety and reliability rating compare to the initial driving agent?*
**Answer:**
- Between this simulation and the previous with no learning enabled the rolling average reward is still consistently negative although of a much smaller size and getting better with trials. The safety rating is similar with both having a safety rating of "F"
- By default epsilon tolerance is 0.05, and we reduced the exploration factor by 0.05 each training trial. This corresponds to the 20 training trials performed by the agent, as 1 / 0.05 = 20
- From the second diagram on the right hand side, we see a plot of paramtere values with the trials. Exploration factor decreases at a constant rate, which is expected as this was reduced by a constant amount following each trial
- As the number of training trials increased, the number of bad actions decreased significantly to around 11% as seen in the top left plot and the average reward improved significantly shown in the top right plot
- The reliability has substaintially improved, with a grade of "D", this would indicate the agent is effectively learning how to navigate the grid. Perhaps with more trials this could improve much more. The safety rating is still "F", but this discounts the fact that the frequency of bad actions has fallen.
-----
## Improve the Q-Learning Driving Agent
The third step to creating an optimized Q-Learning agent is to perform the optimization! Now that the Q-Learning algorithm is implemented and the driving agent is successfully learning, it's necessary to tune settings and adjust learning paramaters so the driving agent learns both **safety** and **efficiency**. Typically this step will require a lot of trial and error, as some settings will invariably make the learning worse. One thing to keep in mind is the act of learning itself and the time that this takes: In theory, we could allow the agent to learn for an incredibly long amount of time; however, another goal of Q-Learning is to *transition from experimenting with unlearned behavior to acting on learned behavior*. For example, always allowing the agent to perform a random action during training (if $\epsilon = 1$ and never decays) will certainly make it *learn*, but never let it *act*. When improving on your Q-Learning implementation, consider the impliciations it creates and whether it is logistically sensible to make a particular adjustment.
### Improved Q-Learning Simulation Results
To obtain results from the initial Q-Learning implementation, you will need to adjust the following flags and setup:
- `'enforce_deadline'` - Set this to `True` to force the driving agent to capture whether it reaches the destination in time.
- `'update_delay'` - Set this to a small value (such as `0.01`) to reduce the time between steps in each trial.
- `'log_metrics'` - Set this to `True` to log the simluation results as a `.csv` file and the Q-table as a `.txt` file in `/logs/`.
- `'learning'` - Set this to `'True'` to tell the driving agent to use your Q-Learning implementation.
- `'optimized'` - Set this to `'True'` to tell the driving agent you are performing an optimized version of the Q-Learning implementation.
Additional flags that can be adjusted as part of optimizing the Q-Learning agent:
- `'n_test'` - Set this to some positive number (previously 10) to perform that many testing trials.
- `'alpha'` - Set this to a real number between 0 - 1 to adjust the learning rate of the Q-Learning algorithm.
- `'epsilon'` - Set this to a real number between 0 - 1 to adjust the starting exploration factor of the Q-Learning algorithm.
- `'tolerance'` - set this to some small value larger than 0 (default was 0.05) to set the epsilon threshold for testing.
Furthermore, use a decaying function of your choice for $\epsilon$ (the exploration factor). Note that whichever function you use, it **must decay to **`'tolerance'`** at a reasonable rate**. The Q-Learning agent will not begin testing until this occurs. Some example decaying functions (for $t$, the number of trials):
$$ \epsilon = a^t, \textrm{for } 0 < a < 1 \hspace{50px}\epsilon = \frac{1}{t^2}\hspace{50px}\epsilon = e^{-at}, \textrm{for } 0 < a < 1 \hspace{50px} \epsilon = \cos(at), \textrm{for } 0 < a < 1$$
You may also use a decaying function for $\alpha$ (the learning rate) if you so choose, however this is typically less common. If you do so, be sure that it adheres to the inequality $0 \leq \alpha \leq 1$.
If you have difficulty getting your implementation to work, try setting the `'verbose'` flag to `True` to help debug. Flags that have been set here should be returned to their default setting when debugging. It is important that you understand what each flag does and how it affects the simulation!
Once you have successfully completed the improved Q-Learning simulation, run the code cell below to visualize the results. Note that log files are overwritten when identical simulations are run, so be careful with what log file is being loaded!
```
# Load the 'sim_improved-learning' file from the improved Q-Learning simulation
vs.plot_trials('sim_improved-learning.csv')
```
### Question 7
Using the visualization above that was produced from your improved Q-Learning simulation, provide a final analysis and make observations about the improved driving agent like in **Question 6**. Questions you should answer:
- *What decaying function was used for epsilon (the exploration factor)?*
- *Approximately how many training trials were needed for your agent before begining testing?*
- *What epsilon-tolerance and alpha (learning rate) did you use? Why did you use them?*
- *How much improvement was made with this Q-Learner when compared to the default Q-Learner from the previous section?*
- *Would you say that the Q-Learner results show that your driving agent successfully learned an appropriate policy?*
- *Are you satisfied with the safety and reliability ratings of the *Smartcab*?*
**Answer:**
- I use the cosine decay function for epsilon, cos(alpha * trial)
- The agent completed around 150 training trials before testing
- I set alpha to 0.01 and the epsilon-tolerance to 0.05. This was to make sure that I got a larger number of trials than the there are states in the sample space - found to be 96 above. This was so my agent could adequetly learn the environment without redundency
- The safety rating has significantly improved from "F" to "A+", which would indicate that we are adequetly capturing information about the environment. The reliability rating has also improved to "A+" though only from "D", which would indicate that it is less influenced by the exploration factor.
- I think this demonstrates that the agent learned an appropriate policy.
- I am satisfied with the ratings of the smartcab. More trials could run to improve the average reward per action. Perhpas a different epsilon function could be used to achieve better results within fewer trials to make the learner more scalable.
### Define an Optimal Policy
Sometimes, the answer to the important question *"what am I trying to get my agent to learn?"* only has a theoretical answer and cannot be concretely described. Here, however, you can concretely define what it is the agent is trying to learn, and that is the U.S. right-of-way traffic laws. Since these laws are known information, you can further define, for each state the *Smartcab* is occupying, the optimal action for the driving agent based on these laws. In that case, we call the set of optimal state-action pairs an **optimal policy**. Hence, unlike some theoretical answers, it is clear whether the agent is acting "incorrectly" not only by the reward (penalty) it receives, but also by pure observation. If the agent drives through a red light, we both see it receive a negative reward but also know that it is not the correct behavior. This can be used to your advantage for verifying whether the **policy** your driving agent has learned is the correct one, or if it is a **suboptimal policy**.
### Question 8
Provide a few examples (using the states you've defined) of what an optimal policy for this problem would look like. Afterwards, investigate the `'sim_improved-learning.txt'` text file to see the results of your improved Q-Learning algorithm. _For each state that has been recorded from the simulation, is the **policy** (the action with the highest value) correct for the given state? Are there any states where the policy is different than what would be expected from an optimal policy?_ Provide an example of a state and all state-action rewards recorded, and explain why it is the correct policy.
**Answer:**
In general we can imagine the optimal policy to determine that:
- The smartcab should respond 'right' if no oncoming traffic is approaching from left through the intersection on a red light
- The smartcab should respond with action 'None' if it will lead to a bad action with other traffic
- The smartcab should go in the direction of the waypoint if the lights are green and not obstructed by traffic
An example of a policy from the Q-Learning algorithm in line with the ideal is the following:
```
('forward', 'red', None, None)
-- forward : -3.97
-- right : 0.25
-- None : 1.39
-- left : -5.08
```
That is, with a waypoint of forward, the light are red, and there are no cars near the smartcab. In this case the ideal action of None, has the highest positive weighting, and the two or the most disruptive actions are severly penalised: any movement would be a violation.
The Q-Learning algorithm does not produce ideal policies when there is lots of noise: traffic in all directions. It must be difficult to optimise for these situations given the range of possibilities in a small number of training trials. For example,
```
('right', 'red', 'left', 'left')
-- forward : -0.21
-- right : 0.06
-- None : 0.00
-- left : -0.10
```
Where the waypoint is to the right, the agent is at a red light, and there is traffic in all directions. In this case we would expect that the ideal policy would strongly be a None action.
Also, on a red light, a right turn is permitted if no oncoming traffic is approaching from your left through the intersection.
-----
### Optional: Future Rewards - Discount Factor, `'gamma'`
Curiously, as part of the Q-Learning algorithm, you were asked to **not** use the discount factor, `'gamma'` in the implementation. Including future rewards in the algorithm is used to aid in propogating positive rewards backwards from a future state to the current state. Essentially, if the driving agent is given the option to make several actions to arrive at different states, including future rewards will bias the agent towards states that could provide even more rewards. An example of this would be the driving agent moving towards a goal: With all actions and rewards equal, moving towards the goal would theoretically yield better rewards if there is an additional reward for reaching the goal. However, even though in this project, the driving agent is trying to reach a destination in the allotted time, including future rewards will not benefit the agent. In fact, if the agent were given many trials to learn, it could negatively affect Q-values!
### Optional Question 9
*There are two characteristics about the project that invalidate the use of future rewards in the Q-Learning algorithm. One characteristic has to do with the *Smartcab* itself, and the other has to do with the environment. Can you figure out what they are and why future rewards won't work for this project?*
**Answer:**
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
|
github_jupyter
|
# Hash Codes
Consider the challenges associated with the 16-bit hashcode for a character string `s` that sums the Unicode values of the characters in `s`.
For example, let `s = "stop"`. It's unicode character representation is:
```
for char in "stop":
print(char + ': ' + str(ord(char)))
sum([ord(x) for x in "stop"])
```
If we then sum these unicode values, we arrive as the following hash code:
```
stop -----------> 454
```
The problem is, the following strings will all map to the same value!
```
stop -----------> 454
pots -----------> 454
tops -----------> 454
spot -----------> 454
```
A better hash code would take into account the _position_ of our characters.
## Polynomial Hash code
If we refer to the characters of our string as $x_0, x_1, \dots, x_n$, we can then chose a non-zero constant, $a \neq 1$, and use a hash code:
$$a^{n-1} x_0 + a^{n-2} x_1 + \dots + a^1 x_{n-1} + a^0 x_{n}$$
This is simply a polynomial in $a$ that has our $x_i$ values as it's coefficients. This is known as a **polynomial** hash code.
```
1 << 32
2**32
2 << 2
```
## Investigate hash map uniformity
```
import random
import numpy as np
import matplotlib.pyplot as plt
%config InlineBackend.figure_format='retina'
n = 0
prime = 109345121
scale = 1 + random.randrange(prime - 1)
shift = random.randrange(prime)
def my_hash_func(k, upper):
table = upper * [None]
hash_code = hash(k)
compressed_code = (hash_code * scale + shift) % prime % len(table)
return compressed_code
upper = 1000
inputs = list(range(0, upper))
hash_results = []
for i in inputs:
hash_results.append(my_hash_func(i, upper))
plt.figure(figsize=(15,10))
plt.plot(inputs, hash_results)
plt.figure(figsize=(15,10))
plt.scatter(inputs, hash_results)
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
averages_over_window_size_5 = moving_average(hash_results, 5)
plt.hist(averages_over_window_size_5)
l = [4, 7, 9, 13, 1, 3, 7]
l1 = [1, 4, 7]; l2 = [3, 9, 13]
def merge_sort(l):
size = len(l)
midway = size // 2
first_half = l[:midway]
second_half = l[midway:]
if len(first_half) > 1 or len(second_half) > 1:
sorted_first_half = merge_sort(first_half)
sorted_second_half = merge_sort(second_half)
else:
sorted_first_half = first_half
sorted_second_half = second_half
sorted_l = merge(sorted_first_half, sorted_second_half)
return sorted_l
def merge(l1, l2):
"""Merge two sorted lists."""
i = 0
j = 0
lmerged = []
while (i <= len(l1) - 1) or (j <= len(l2) - 1):
if i == len(l1):
lmerged.extend(l2[j:])
break
if j == len(l2):
lmerged.extend(l1[i:])
break
if (i < len(l1)) and (l1[i] < l2[j]):
lmerged.append(l1[i])
i += 1
else:
lmerged.append(l2[j])
j += 1
return lmerged
merge_sort(l)
l = [random.choice(list(range(1000))) for x in range(1000)]
%%time
res = sorted(l)
%%time
res = merge_sort(l)
```
|
github_jupyter
|
```
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn import datasets, linear_model
from sklearn import cross_validation
import numpy as np
import pandas as pd
from sklearn import preprocessing
df = pd.read_excel("data0505.xlsx",header=0)
# clean up data
df = df.dropna(how = 'all')
df = df.fillna(0)
df = df.round(4)
df=df[df['Power']>=0]
df.head()
min_max_scaler = preprocessing.MinMaxScaler()
np_scaled = min_max_scaler.fit_transform(df)
df_normalized = pd.DataFrame(np_scaled)
df_normalized.head()
x = np.array(df_normalized.ix[:,0:2])#first three column are SoC, SoH, power
y = np.array(df_normalized.ix[:,5])#delta SEI
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
total_len = X_train.shape[0]
total_len
# Parameters
learning_rate = 0.001
training_epochs = 50
batch_size = 100
display_step = 1
dropout_rate = 0.1
# Network Parameters
n_hidden_1 = 10 # 1st layer number of features
n_hidden_2 = 5 # 2nd layer number of features
n_input = X_train.shape[1]
n_classes = 1
# tf Graph input
x = tf.placeholder("float", [None, 3])
y = tf.placeholder("float", [None])
# Create model
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1], 0, 0.1)),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes], 0, 0.1))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1], 0, 0.1)),
'b2': tf.Variable(tf.random_normal([n_hidden_2], 0, 0.1)),
'out': tf.Variable(tf.random_normal([n_classes], 0, 0.1))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean((tf.transpose(pred)-y)*(tf.transpose(pred)-y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
tf.initialize_all_variables()
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(total_len/batch_size)
# Loop over all batches
for i in range(total_batch-1):
batch_x = X_train[i*batch_size:(i+1)*batch_size]
batch_y = Y_train[i*batch_size:(i+1)*batch_size]
# Run optimization op (backprop) and cost op (to get loss value)
_, c, p = sess.run([optimizer, cost, pred], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# sample prediction
label_value = batch_y
estimate = p
err = label_value-estimate
print ("num batch:", total_batch)
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print ("[*]----------------------------")
for i in range(3):
print ("label value:", label_value[i], \
"estimated value:", estimate[i])
print ("[*]============================")
print ("Optimization Finished!")
# Test model
# correct_prediction = tf.equal(tf.argmax(pred,0), tf.argmax(y,0))
# Calculate accuracy
accuracy = tf.reduce_mean((tf.transpose(pred)-y)*(tf.transpose(pred)-y))
print ("MSE:", accuracy.eval({x: X_test, y: Y_test}))
```
|
github_jupyter
|
```
import numpy as np
import scipy
from scipy import sparse
import scipy.sparse.linalg
import matplotlib.pyplot as plt
%matplotlib inline
# part a)
Id = sparse.csr_matrix(np.eye(2))
Sx = sparse.csr_matrix([[0., 1.], [1., 0.]])
Sz = sparse.csr_matrix([[1., 0.], [0., -1.]])
print(Sz.shape)
# part b)
def singesite_to_full(op, i, L):
op_list = [Id]*L # = [Id, Id, Id ...] with L entries
op_list[i] = op
full = op_list[0]
for op_i in op_list[1:]:
full = sparse.kron(full, op_i, format="csr")
return full
def gen_sx_list(L):
return [singesite_to_full(Sx, i, L) for i in range(L)]
# part c)
def gen_sz_list(L):
return [singesite_to_full(Sz, i, L) for i in range(L)]
# part d)
def gen_hamiltonian(sx_list, sz_list, g, J=1.):
L = len(sx_list)
H = sparse.csr_matrix((2**L, 2**L))
for j in range(L):
H = H - J *( sx_list[j] * sx_list[(j+1)%L])
H = H - g * sz_list[j]
return H
# check in part d)
L = 2
sx_list = gen_sx_list(L)
sz_list = gen_sz_list(L)
H = gen_hamiltonian(sx_list, sz_list, 0.1)
print("H for L=2, g=0.1")
print(H.toarray())
# part e)
L = 12
sx_list = gen_sx_list(L)
sz_list = gen_sz_list(L)
H = gen_hamiltonian(sx_list, sz_list, 1.)
Hdense = H.toarray()
print("L =12: H =", repr(H))
%%timeit
sparse.linalg.eigsh(H, which='SA')
%%timeit
np.linalg.eigh(Hdense)
# part f)
Ls = [6, 8, 10, 12]
gs = np.linspace(0., 2., 21)
plt.figure()
for L in Ls:
sx_list = gen_sx_list(L)
sz_list = gen_sz_list(L)
sxsx = sx_list[0]*sx_list[L//2]
corrs = []
for g in gs:
H = gen_hamiltonian(sx_list, sz_list, g, J=1.)
E, v = sparse.linalg.eigsh(H, k=3, which='SA')
v0 = v[:, 0] # first column of v is the ground state
corr = np.inner(v0, sxsx*v0)
corrs.append(corr)
corrs = np.array(corrs)
plt.plot(gs, corrs, label="L={L:d}".format(L=L))
plt.xlabel("g")
plt.ylabel("C")
plt.legend()
# part g)
plt.figure(figsize=(10, 8))
for L in [6, 8, 10, 12]:
sx_list = gen_sx_list(L)
sz_list = gen_sz_list(L)
gaps = []
for g in gs:
H = gen_hamiltonian(sx_list, sz_list, g, J=1.)
E, v = sparse.linalg.eigsh(H, k=3, which='SA')
gaps.append((E[1] - E[0], E[2] - E[0]))
gaps = np.array(gaps)
lines = plt.plot(gs, gaps[:, 0], linestyle='-', label="first excited state, L={L:d}".format(L=L))
plt.plot(gs, gaps[:, 1], color = lines[0].get_color(), linestyle='--', label="second excited state, L={L:d}".format(L=L))
plt.legend()
# just for fun: regenerate the correlation plot with open boundary conditions
def gen_hamiltonian_open_bc(sx_list, sz_list, g, J=1.):
L = len(sx_list)
H = sparse.csr_matrix((2**L, 2**L))
for j in range(L):
if j < L-1:
H = H - J *( sx_list[j] * sx_list[j+1])
H = H - g * sz_list[j]
return H
plt.figure()
for L in Ls:
sx_list = gen_sx_list(L)
sz_list = gen_sz_list(L)
sxsx = sx_list[0]*sx_list[L//2]
corrs = []
for g in gs:
H = gen_hamiltonian_open_bc(sx_list, sz_list, g, J=1.)
E, v = sparse.linalg.eigsh(H, k=3, which='SA')
v0 = v[:, 0] # first column of v is the ground state
corr = np.inner(v0, sxsx*v0)
corrs.append(corr)
corrs = np.array(corrs)
plt.plot(gs, corrs, label="L={L:d}".format(L=L))
plt.xlabel("g")
plt.ylabel("C")
plt.legend()
# and the plot for the excitation energies for open b.c.
plt.figure(figsize=(10, 8))
for L in [6, 8, 10, 12]:
sx_list = gen_sx_list(L)
sz_list = gen_sz_list(L)
gaps = []
for g in gs:
H = gen_hamiltonian_open_bc(sx_list, sz_list, g, J=1.)
E, v = sparse.linalg.eigsh(H, k=3, which='SA')
gaps.append((E[1] - E[0], E[2] - E[0]))
gaps = np.array(gaps)
lines = plt.plot(gs, gaps[:, 0], linestyle='-', label="first excited state, L={L:d}".format(L=L))
plt.plot(gs, gaps[:, 1], color = lines[0].get_color(), linestyle='--', label="second excited state, L={L:d}".format(L=L))
plt.legend()
# For comparison on the next sheet:
L = 10
sx_list = gen_sx_list(L)
sz_list = gen_sz_list(L)
H = gen_hamiltonian(sx_list, sz_list, g=0.1, J=1.)
E, v = sparse.linalg.eigsh(H, k=3, which='SA')
print(E[0])
```
|
github_jupyter
|
<center><img src="./images/logo_fmkn.png" width=300 style="display: inline-block;"></center>
## Машинное обучение
### Семинар 13. ЕМ-алгоритм
<br />
<br />
9 декабря 2021
Будем решать задачу восставновления картинки лица по набору зашумленных картинок (взято с курса deep bayes 2018 https://github.com/bayesgroup/deepbayes-2018).
У вас есть $K$ фотографий, поврежденных электромагнитным шумом. Известно, что на каждом фото есть лицо в неизвестно где начинающейся прямоугольной области ширины $w$ и фон, одинаковый для всех фотографий.
<center><img src="./images/example_and_structure.jpg" width=800 style="display: inline-block;"></center>
```
from matplotlib import pyplot as plt
import numpy as np
import zipfile
with zipfile.ZipFile('data_em.zip', 'r') as zip_ref:
zip_ref.extractall('.')
DATA_FILE = "data_em"
w = 73 # face_width
X = np.load(DATA_FILE)
X.shape # H, W, K
plt.imshow(X[:, :, 7], cmap="Greys_r")
plt.axis("off")
tH, tW, tw, tK = 2, 3, 1, 2
tX = np.arange(tH*tW*tK).reshape(tH, tW, tK)
tF = np.arange(tH*tw).reshape(tH, tw)
tB = np.arange(tH*tW).reshape(tH, tW)
ts = 0.1
ta = np.arange(1, (tW-tw+1)+1)
ta = ta / ta.sum()
tq = np.arange(1, (tW-tw+1)*tK+1).reshape(tW-tw+1, tK)
tq = tq / tq.sum(axis=0)[np.newaxis, :]
```
1. **Реализуйте calculate_log_probability**
Для $k$-й картини $X_k$ и некоторой позиции $d_k$:
$$ p(X_k \mid d_k,\,F,\,B,\, std) = \prod\limits_{ij}\begin{cases}
\mathcal{N}(X_k[i,j]\mid F[i,\,j-d_k],\, std^2),
& \text{if}\, (i,j)\in faceArea(d_k)\\
\mathcal{N}(X_k[i,j]\mid B[i,j],\, std^2), & \text{else}
\end{cases}
$$
Замечания:
* $faceArea(d_k) = \{[i, j]| d_k \leq j \leq d_k + w - 1 \}$
* Априорное распределение задаётся обучаемым вектором $a \in \mathbb{R}^{W-w+1}$: $$p(d_k \mid a) = a[d_k],\ \sum\limits_j a[j] = 1$$
* Итоговая вероятностная модель: $$ p(X, d \mid F,\,B,\,std,\,a) = \prod\limits_k p(X_k \mid d_k,\,F,\,B,\,std) p(d_k \mid a)$$
* Не забудьте про логарифм!
* `scipy.stats.norm` может вам пригодиться
```
import scipy.stats
def calculate_log_probability(X, F, B, s):
"""
Calculates log p(X_k|d_k, F, B, s) for all images X_k in X and
all possible face position d_k.
Parameters
----------
X : array, shape (H, W, K)
K images of size H x W.
F : array, shape (H, w)
Estimate of prankster's face.
B : array, shape (H, W)
Estimate of background.
s : float
Estimate of standard deviation of Gaussian noise.
Returns
-------
ll : array, shape(W-w+1, K)
ll[dw, k] - log-likelihood of observing image X_k given
that the prankster's face F is located at position dw
"""
H, W, K = X.shape
_, w = F.shape
# your code here
ll = np.zeros((W-w+1, K))
for dw in range(W-w+1):
combined = np.copy(B)
combined[:, dw:dw+w] = F
d_combined = X - np.expand_dims(combined, 2)
ll[dw] = scipy.stats.norm(0, s).logpdf(d_combined).sum(axis=(0,1))
return ll
# run this cell to test your implementation
expected = np.array([[-3541.69812064, -5541.69812064],
[-4541.69812064, -6741.69812064],
[-6141.69812064, -8541.69812064]])
actual = calculate_log_probability(tX, tF, tB, ts)
assert np.allclose(actual, expected)
print("OK")
```
2. **Реализуйте calculate_lower_bound**
\begin{equation}\mathscr{L}(q, \,F, \,B,\, s,\, a) = \sum_k \biggl (\mathbb{E} _ {q( d_k)}\bigl ( \log p( X_{k} \mid {d}_{k} , \,F,\,B,\,s) +
\log p( d_k \mid a)\bigr) - \mathbb{E} _ {q( d_k)} \log q( d_k)\biggr) \end{equation}
Замечания
* Используйте calculate_log_probability!
* Обратите внимание, что $q( d_k)$ и $p( d_k \mid a)$ дискретны. Например, $P(d_k=i \mid a) = a[i]$.
```
def calculate_lower_bound(X, F, B, s, a, q):
"""
Calculates the lower bound L(q, F, B, s, a) for
the marginal log likelihood.
Parameters
----------
X : array, shape (H, W, K)
K images of size H x W.
F : array, shape (H, w)
Estimate of prankster's face.
B : array, shape (H, W)
Estimate of background.
s : float
Estimate of standard deviation of Gaussian noise.
a : array, shape (W-w+1)
Estimate of prior on position of face in any image.
q : array
q[dw, k] - estimate of posterior
of position dw
of prankster's face given image Xk
Returns
-------
L : float
The lower bound L(q, F, B, s, a)
for the marginal log likelihood.
"""
# your code here
return (q * (calculate_log_probability(X,F,B,s) + np.expand_dims(np.log(a), 1) - np.log(q))).sum()
calculate_lower_bound(tX, tF, tB, ts, ta, tq)
# run this cell to test your implementation
expected = -12761.1875
actual = calculate_lower_bound(tX, tF, tB, ts, ta, tq)
assert np.allclose(actual, expected)
print("OK")
```
3. **Реализуем E шаг**
$$q(d_k) = p(d_k \mid X_k, \,F, \,B, \,s,\, a) =
\frac {p( X_{k} \mid {d}_{k} , \,F,\,B,\,s)\, p(d_k \mid a)}
{\sum_{d'_k} p( X_{k} \mid d'_k , \,F,\,B,\,s) \,p(d'_k \mid a)}$$
Замечания
* Используйте calculate_log_probability!
* Считайте в логарифмах, возведите в экспоненту в конце.
* Рекомендется использовать следующее утверждение для выч. стабильности: $$\beta_i = \log{p_i(\dots)} \quad\rightarrow \quad
\frac{e^{\beta_i}}{\sum\limits_k e^{\beta_k}} =
\frac{e^{(\beta_i - \max_j \beta_j)}}{\sum\limits_k e^{(\beta_k- \max_j \beta_j)}}$$
```
def run_e_step(X, F, B, s, a):
"""
Given the current esitmate of the parameters, for each image Xk
esitmates the probability p(d_k|X_k, F, B, s, a).
Parameters
----------
X : array, shape(H, W, K)
K images of size H x W.
F : array_like, shape(H, w)
Estimate of prankster's face.
B : array shape(H, W)
Estimate of background.
s : float
Estimate of standard deviation of Gaussian noise.
a : array, shape(W-w+1)
Estimate of prior on face position in any image.
Returns
-------
q : array
shape (W-w+1, K)
q[dw, k] - estimate of posterior of position dw
of prankster's face given image Xk
"""
# your code here
log_nom = calculate_log_probability(X,F,B,s) + np.expand_dims(np.log(a), 1)
mx = log_nom.max(axis=0)
nom = np.exp(log_nom - mx)
return nom / nom.sum(axis=0)
run_e_step(tX, tF, tB, ts, ta)
# run this cell to test your implementation
expected = np.array([[ 1., 1.],
[ 0., 0.],
[ 0., 0.]])
actual = run_e_step(tX, tF, tB, ts, ta)
assert np.allclose(actual, expected)
print("OK")
```
4. **Реализуйте M шаг**
Надо
\begin{equation}\mathscr{L}(q, \,F, \,B,\, s,\, a) = \sum_k \biggl (\mathbb{E} _ {q( d_k)}\bigl ( \log p( X_{k} \mid {d}_{k} , \,F,\,B,\,s) +
\log p( d_k \mid a)\bigr) - \mathbb{E} _ {q( d_k)} \log q( d_k)\biggr)\rightarrow \max\limits_{\theta, a} \end{equation}
После долгих вычислений получим:
$$a[j] = \frac{\sum_k q( d_k = j )}{\sum_{j'} \sum_{k'} q( d_{k'} = j')}$$$$F[i, m] = \frac 1 K \sum_k \sum_{d_k} q(d_k)\, X^k[i,\, m+d_k]$$\begin{equation}B[i, j] = \frac {\sum_k \sum_{ d_k:\, (i, \,j) \,\not\in faceArea(d_k)} q(d_k)\, X^k[i, j]}
{\sum_k \sum_{d_k: \,(i, \,j)\, \not\in faceArea(d_k)} q(d_k)}\end{equation}\begin{equation}s^2 = \frac 1 {HWK} \sum_k \sum_{d_k} q(d_k)
\sum_{i,\, j} (X^k[i, \,j] - Model^{d_k}[i, \,j])^2\end{equation}
где $Model^{d_k}[i, j]$ картинка из фона и лица, сдвинутого на $d_k$.
Замечания
* Обновляйте параметры в порядке: $a$, $F$, $B$, $s$.
* Используйте обновленный параметр для оценки следующего параметра.
```
def run_m_step(X, q, w):
"""
Estimates F, B, s, a given esitmate of posteriors defined by q.
Parameters
----------
X : array, shape (H, W, K)
K images of size H x W.
q :
q[dw, k] - estimate of posterior of position dw
of prankster's face given image Xk
w : int
Face mask width.
Returns
-------
F : array, shape (H, w)
Estimate of prankster's face.
B : array, shape (H, W)
Estimate of background.
s : float
Estimate of standard deviation of Gaussian noise.
a : array, shape (W-w+1)
Estimate of prior on position of face in any image.
"""
# your code here
H, W, K = X.shape
dw, _ = q.shape
w = W - dw + 1
a = q.sum(axis=1)/q.sum()
F = np.zeros((H, w))
for dk in range(dw):
F += (q[dk] * X[:, dk:dk+w]).sum(axis=2) / K
B = np.zeros((H, W))
denom = np.zeros((H, W))
for dk in range(dw):
if dk > 0:
denom[:, :dk] += q[dk].sum()
B[:, :dk] += (q[dk] * X[:, :dk]).sum(axis=2)
if dk + w < W:
B[:, dk+w:] += (q[dk] * X[:, dk+w:]).sum(axis=2)
denom[:, dk + w:] += q[dk].sum()
B /= denom
s2 = 0
for dk in range(dw):
model = np.copy(B)
model[:, dk:dk+w] = F
s2 += (q[dk] * ((X - np.expand_dims(model,2)) ** 2)).sum()
s2 /= H * W * K
return F, B, np.sqrt(s2), a
run_m_step(tX, tq, tw)
# run this cell to test your implementation
expected = [np.array([[ 3.27777778],
[ 9.27777778]]),
np.array([[ 0.48387097, 2.5 , 4.52941176],
[ 6.48387097, 8.5 , 10.52941176]]),
0.94868,
np.array([ 0.13888889, 0.33333333, 0.52777778])]
actual = run_m_step(tX, tq, tw)
for a, e in zip(actual, expected):
assert np.allclose(a, e)
print("OK")
```
5. **Реализуйте EM алгоритм**
```
def run_EM(X, w, F=None, B=None, s=None, a=None, tolerance=0.001,
max_iter=50):
"""
Runs EM loop until the likelihood of observing X given current
estimate of parameters is idempotent as defined by a fixed
tolerance.
Parameters
----------
X : array, shape (H, W, K)
K images of size H x W.
w : int
Face mask width.
F : array, shape (H, w), optional
Initial estimate of prankster's face.
B : array, shape (H, W), optional
Initial estimate of background.
s : float, optional
Initial estimate of standard deviation of Gaussian noise.
a : array, shape (W-w+1), optional
Initial estimate of prior on position of face in any image.
tolerance : float, optional
Parameter for stopping criterion.
max_iter : int, optional
Maximum number of iterations.
Returns
-------
F, B, s, a : trained parameters.
"""
# your code here
H, W, N = X.shape
if F is None:
F = np.random.randint(0, 255, (H, w))
if B is None:
B = np.random.randint(0, 255, (H, W))
if a is None:
a = np.ones(W - w + 1)
a /= np.sum(a)
if s is None:
s = np.random.rand()*64*64
l_prev = -np.inf
for it in range(max_iter):
print(f"iteration = {it}")
q = run_e_step(X, F, B, s, a)
print("e")
F, B, s, a = run_m_step(X, q, w)
print("m")
print(s)
if it == max_iter - 1:
print("no convergence")
break
l_cur = calculate_lower_bound(X, F, B, s, a, q)
if l_cur - l_prev < tolerance:
print(f"converged in {it} iterations {l_cur - l_prev}")
break
else:
l_prev = l_cur
return F, B, s, a
```
Расшифровываем картинку:
```
def show(F, i=1, n=1):
"""
shows face F at subplot i out of n
"""
plt.subplot(1, n, i)
plt.imshow(F, cmap="Greys_r")
plt.axis("off")
%%time
F, B, s, a = [None] * 4
lens = [50, 100, 300, 500, 1000]
iters = [5, 1, 1, 1, 1]
plt.figure(figsize=(20, 5))
for i, (l, it) in enumerate(zip(lens, iters)):
F, B, s, a = run_EM(X[:, :, :l], w, F, B, s, a, max_iter=it)
print(s)
show(F, i+1, 5)
```
И фон:
```
show(B)
```
|
github_jupyter
|
# Project 3: Smart Beta Portfolio and Portfolio Optimization
## Overview
Smart beta has a broad meaning, but we can say in practice that when we use the universe of stocks from an index, and then apply some weighting scheme other than market cap weighting, it can be considered a type of smart beta fund. A Smart Beta portfolio generally gives investors exposure or "beta" to one or more types of market characteristics (or factors) that are believed to predict prices while giving investors a diversified broad exposure to a particular market. Smart Beta portfolios generally target momentum, earnings quality, low volatility, and dividends or some combination. Smart Beta Portfolios are generally rebalanced infrequently and follow relatively simple rules or algorithms that are passively managed. Model changes to these types of funds are also rare requiring prospectus filings with US Security and Exchange Commission in the case of US focused mutual funds or ETFs.. Smart Beta portfolios are generally long-only, they do not short stocks.
In contrast, a purely alpha-focused quantitative fund may use multiple models or algorithms to create a portfolio. The portfolio manager retains discretion in upgrading or changing the types of models and how often to rebalance the portfolio in attempt to maximize performance in comparison to a stock benchmark. Managers may have discretion to short stocks in portfolios.
Imagine you're a portfolio manager, and wish to try out some different portfolio weighting methods.
One way to design portfolio is to look at certain accounting measures (fundamentals) that, based on past trends, indicate stocks that produce better results.
For instance, you may start with a hypothesis that dividend-issuing stocks tend to perform better than stocks that do not. This may not always be true of all companies; for instance, Apple does not issue dividends, but has had good historical performance. The hypothesis about dividend-paying stocks may go something like this:
Companies that regularly issue dividends may also be more prudent in allocating their available cash, and may indicate that they are more conscious of prioritizing shareholder interests. For example, a CEO may decide to reinvest cash into pet projects that produce low returns. Or, the CEO may do some analysis, identify that reinvesting within the company produces lower returns compared to a diversified portfolio, and so decide that shareholders would be better served if they were given the cash (in the form of dividends). So according to this hypothesis, dividends may be both a proxy for how the company is doing (in terms of earnings and cash flow), but also a signal that the company acts in the best interest of its shareholders. Of course, it's important to test whether this works in practice.
You may also have another hypothesis, with which you wish to design a portfolio that can then be made into an ETF. You may find that investors may wish to invest in passive beta funds, but wish to have less risk exposure (less volatility) in their investments. The goal of having a low volatility fund that still produces returns similar to an index may be appealing to investors who have a shorter investment time horizon, and so are more risk averse.
So the objective of your proposed portfolio is to design a portfolio that closely tracks an index, while also minimizing the portfolio variance. Also, if this portfolio can match the returns of the index with less volatility, then it has a higher risk-adjusted return (same return, lower volatility).
Smart Beta ETFs can be designed with both of these two general methods (among others): alternative weighting and minimum volatility ETF.
## Instructions
Each problem consists of a function to implement and instructions on how to implement the function. The parts of the function that need to be implemented are marked with a `# TODO` comment. After implementing the function, run the cell to test it against the unit tests we've provided. For each problem, we provide one or more unit tests from our `project_tests` package. These unit tests won't tell you if your answer is correct, but will warn you of any major errors. Your code will be checked for the correct solution when you submit it to Udacity.
## Packages
When you implement the functions, you'll only need to you use the packages you've used in the classroom, like [Pandas](https://pandas.pydata.org/) and [Numpy](http://www.numpy.org/). These packages will be imported for you. We recommend you don't add any import statements, otherwise the grader might not be able to run your code.
The other packages that we're importing are `helper`, `project_helper`, and `project_tests`. These are custom packages built to help you solve the problems. The `helper` and `project_helper` module contains utility functions and graph functions. The `project_tests` contains the unit tests for all the problems.
### Install Packages
```
import sys
!{sys.executable} -m pip install -r requirements.txt
```
### Load Packages
```
import pandas as pd
import numpy as np
import helper
import project_helper
import project_tests
```
## Market Data
### Load Data
For this universe of stocks, we'll be selecting large dollar volume stocks. We're using this universe, since it is highly liquid.
```
df = pd.read_csv('../../data/project_3/eod-quotemedia.csv')
percent_top_dollar = 0.2
high_volume_symbols = project_helper.large_dollar_volume_stocks(df, 'adj_close', 'adj_volume', percent_top_dollar)
df = df[df['ticker'].isin(high_volume_symbols)]
close = df.reset_index().pivot(index='date', columns='ticker', values='adj_close')
volume = df.reset_index().pivot(index='date', columns='ticker', values='adj_volume')
dividends = df.reset_index().pivot(index='date', columns='ticker', values='dividends')
```
### View Data
To see what one of these 2-d matrices looks like, let's take a look at the closing prices matrix.
```
project_helper.print_dataframe(close)
```
# Part 1: Smart Beta Portfolio
In Part 1 of this project, you'll build a portfolio using dividend yield to choose the portfolio weights. A portfolio such as this could be incorporated into a smart beta ETF. You'll compare this portfolio to a market cap weighted index to see how well it performs.
Note that in practice, you'll probably get the index weights from a data vendor (such as companies that create indices, like MSCI, FTSE, Standard and Poor's), but for this exercise we will simulate a market cap weighted index.
## Index Weights
The index we'll be using is based on large dollar volume stocks. Implement `generate_dollar_volume_weights` to generate the weights for this index. For each date, generate the weights based on dollar volume traded for that date. For example, assume the following is close prices and volume data:
```
Prices
A B ...
2013-07-08 2 2 ...
2013-07-09 5 6 ...
2013-07-10 1 2 ...
2013-07-11 6 5 ...
... ... ... ...
Volume
A B ...
2013-07-08 100 340 ...
2013-07-09 240 220 ...
2013-07-10 120 500 ...
2013-07-11 10 100 ...
... ... ... ...
```
The weights created from the function `generate_dollar_volume_weights` should be the following:
```
A B ...
2013-07-08 0.126.. 0.194.. ...
2013-07-09 0.759.. 0.377.. ...
2013-07-10 0.075.. 0.285.. ...
2013-07-11 0.037.. 0.142.. ...
... ... ... ...
```
```
def generate_dollar_volume_weights(close, volume):
"""
Generate dollar volume weights.
Parameters
----------
close : DataFrame
Close price for each ticker and date
volume : str
Volume for each ticker and date
Returns
-------
dollar_volume_weights : DataFrame
The dollar volume weights for each ticker and date
"""
assert close.index.equals(volume.index)
assert close.columns.equals(volume.columns)
#TODO: Implement function
dollar_volume = close * volume
for index,_ in close.iterrows():
# weights = close * volume / (sum of close * volume for all assets in the line)
dollar_volume.loc[index] = dollar_volume.loc[index]/sum(dollar_volume.loc[index])
return dollar_volume
project_tests.test_generate_dollar_volume_weights(generate_dollar_volume_weights)
```
### View Data
Let's generate the index weights using `generate_dollar_volume_weights` and view them using a heatmap.
```
index_weights = generate_dollar_volume_weights(close, volume)
project_helper.plot_weights(index_weights, 'Index Weights')
```
## Portfolio Weights
Now that we have the index weights, let's choose the portfolio weights based on dividend. You would normally calculate the weights based on trailing dividend yield, but we'll simplify this by just calculating the total dividend yield over time.
Implement `calculate_dividend_weights` to return the weights for each stock based on its total dividend yield over time. This is similar to generating the weight for the index, but it's using dividend data instead.
For example, assume the following is `dividends` data:
```
Prices
A B
2013-07-08 0 0
2013-07-09 0 1
2013-07-10 0.5 0
2013-07-11 0 0
2013-07-12 2 0
... ... ...
```
The weights created from the function `calculate_dividend_weights` should be the following:
```
A B
2013-07-08 NaN NaN
2013-07-09 0 1
2013-07-10 0.333.. 0.666..
2013-07-11 0.333.. 0.666..
2013-07-12 0.714.. 0.285..
... ... ...
```
```
def calculate_dividend_weights(dividends):
"""
Calculate dividend weights.
Parameters
----------
dividends : DataFrame
Dividend for each stock and date
Returns
-------
dividend_weights : DataFrame
Weights for each stock and date
"""
#TODO: Implement function
cumulated_dividend = dividends.cumsum()
for index,_ in dividends.iterrows():
# weights = dividends / (sum of dividends for all assets in the line)
cumulated_dividend.loc[index] = cumulated_dividend.loc[index]/sum(cumulated_dividend.loc[index])
return cumulated_dividend
project_tests.test_calculate_dividend_weights(calculate_dividend_weights)
```
### View Data
Just like the index weights, let's generate the ETF weights and view them using a heatmap.
```
etf_weights = calculate_dividend_weights(dividends)
project_helper.plot_weights(etf_weights, 'ETF Weights')
```
## Returns
Implement `generate_returns` to generate returns data for all the stocks and dates from price data. You might notice we're implementing returns and not log returns. Since we're not dealing with volatility, we don't have to use log returns.
```
def generate_returns(prices):
"""
Generate returns for ticker and date.
Parameters
----------
prices : DataFrame
Price for each ticker and date
Returns
-------
returns : Dataframe
The returns for each ticker and date
"""
#TODO: Implement function
return ((prices - prices.shift(1))/prices.shift(1))
project_tests.test_generate_returns(generate_returns)
```
### View Data
Let's generate the closing returns using `generate_returns` and view them using a heatmap.
```
returns = generate_returns(close)
project_helper.plot_returns(returns, 'Close Returns')
```
## Weighted Returns
With the returns of each stock computed, we can use it to compute the returns for an index or ETF. Implement `generate_weighted_returns` to create weighted returns using the returns and weights.
```
def generate_weighted_returns(returns, weights):
"""
Generate weighted returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
weights : DataFrame
Weights for each ticker and date
Returns
-------
weighted_returns : DataFrame
Weighted returns for each ticker and date
"""
assert returns.index.equals(weights.index)
assert returns.columns.equals(weights.columns)
#TODO: Implement function
return (returns * weights)
project_tests.test_generate_weighted_returns(generate_weighted_returns)
```
### View Data
Let's generate the ETF and index returns using `generate_weighted_returns` and view them using a heatmap.
```
index_weighted_returns = generate_weighted_returns(returns, index_weights)
etf_weighted_returns = generate_weighted_returns(returns, etf_weights)
project_helper.plot_returns(index_weighted_returns, 'Index Returns')
project_helper.plot_returns(etf_weighted_returns, 'ETF Returns')
```
## Cumulative Returns
To compare performance between the ETF and Index, we're going to calculate the tracking error. Before we do that, we first need to calculate the index and ETF comulative returns. Implement `calculate_cumulative_returns` to calculate the cumulative returns over time given the returns.
```
def calculate_cumulative_returns(returns):
"""
Calculate cumulative returns.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
cumulative_returns : Pandas Series
Cumulative returns for each date
"""
#TODO: Implement function
cumulative_returns = (returns.sum(axis=1) + 1).cumprod()
return cumulative_returns
project_tests.test_calculate_cumulative_returns(calculate_cumulative_returns)
```
### View Data
Let's generate the ETF and index cumulative returns using `calculate_cumulative_returns` and compare the two.
```
index_weighted_cumulative_returns = calculate_cumulative_returns(index_weighted_returns)
etf_weighted_cumulative_returns = calculate_cumulative_returns(etf_weighted_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, etf_weighted_cumulative_returns, 'Smart Beta ETF vs Index')
```
## Tracking Error
In order to check the performance of the smart beta portfolio, we can calculate the annualized tracking error against the index. Implement `tracking_error` to return the tracking error between the ETF and benchmark.
For reference, we'll be using the following annualized tracking error function:
$$ TE = \sqrt{252} * SampleStdev(r_p - r_b) $$
Where $ r_p $ is the portfolio/ETF returns and $ r_b $ is the benchmark returns.
_Note: When calculating the sample standard deviation, the delta degrees of freedom is 1, which is the also the default value._
```
def tracking_error(benchmark_returns_by_date, etf_returns_by_date):
"""
Calculate the tracking error.
Parameters
----------
benchmark_returns_by_date : Pandas Series
The benchmark returns for each date
etf_returns_by_date : Pandas Series
The ETF returns for each date
Returns
-------
tracking_error : float
The tracking error
"""
assert benchmark_returns_by_date.index.equals(etf_returns_by_date.index)
#TODO: Implement function
return (np.sqrt(252)*np.std(etf_returns_by_date - benchmark_returns_by_date, ddof=1))
project_tests.test_tracking_error(tracking_error)
```
### View Data
Let's generate the tracking error using `tracking_error`.
```
smart_beta_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(etf_weighted_returns, 1))
print('Smart Beta Tracking Error: {}'.format(smart_beta_tracking_error))
```
# Part 2: Portfolio Optimization
Now, let's create a second portfolio. We'll still reuse the market cap weighted index, but this will be independent of the dividend-weighted portfolio that we created in part 1.
We want to both minimize the portfolio variance and also want to closely track a market cap weighted index. In other words, we're trying to minimize the distance between the weights of our portfolio and the weights of the index.
$Minimize \left [ \sigma^2_p + \lambda \sqrt{\sum_{1}^{m}(weight_i - indexWeight_i)^2} \right ]$ where $m$ is the number of stocks in the portfolio, and $\lambda$ is a scaling factor that you can choose.
Why are we doing this? One way that investors evaluate a fund is by how well it tracks its index. The fund is still expected to deviate from the index within a certain range in order to improve fund performance. A way for a fund to track the performance of its benchmark is by keeping its asset weights similar to the weights of the index. We’d expect that if the fund has the same stocks as the benchmark, and also the same weights for each stock as the benchmark, the fund would yield about the same returns as the benchmark. By minimizing a linear combination of both the portfolio risk and distance between portfolio and benchmark weights, we attempt to balance the desire to minimize portfolio variance with the goal of tracking the index.
## Covariance
Implement `get_covariance_returns` to calculate the covariance of the `returns`. We'll use this to calculate the portfolio variance.
If we have $m$ stock series, the covariance matrix is an $m \times m$ matrix containing the covariance between each pair of stocks. We can use [`Numpy.cov`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.cov.html) to get the covariance. We give it a 2D array in which each row is a stock series, and each column is an observation at the same period of time. For any `NaN` values, you can replace them with zeros using the [`DataFrame.fillna`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.fillna.html) function.
The covariance matrix $\mathbf{P} =
\begin{bmatrix}
\sigma^2_{1,1} & ... & \sigma^2_{1,m} \\
... & ... & ...\\
\sigma_{m,1} & ... & \sigma^2_{m,m} \\
\end{bmatrix}$
```
def get_covariance_returns(returns):
"""
Calculate covariance matrices.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
Returns
-------
returns_covariance : 2 dimensional Ndarray
The covariance of the returns
"""
#TODO: Implement function
return (np.cov(returns.T.fillna(0)))
project_tests.test_get_covariance_returns(get_covariance_returns)
```
### View Data
Let's look at the covariance generated from `get_covariance_returns`.
```
covariance_returns = get_covariance_returns(returns)
covariance_returns = pd.DataFrame(covariance_returns, returns.columns, returns.columns)
covariance_returns_correlation = np.linalg.inv(np.diag(np.sqrt(np.diag(covariance_returns))))
covariance_returns_correlation = pd.DataFrame(
covariance_returns_correlation.dot(covariance_returns).dot(covariance_returns_correlation),
covariance_returns.index,
covariance_returns.columns)
project_helper.plot_covariance_returns_correlation(
covariance_returns_correlation,
'Covariance Returns Correlation Matrix')
```
### portfolio variance
We can write the portfolio variance $\sigma^2_p = \mathbf{x^T} \mathbf{P} \mathbf{x}$
Recall that the $\mathbf{x^T} \mathbf{P} \mathbf{x}$ is called the quadratic form.
We can use the cvxpy function `quad_form(x,P)` to get the quadratic form.
### Distance from index weights
We want portfolio weights that track the index closely. So we want to minimize the distance between them.
Recall from the Pythagorean theorem that you can get the distance between two points in an x,y plane by adding the square of the x and y distances and taking the square root. Extending this to any number of dimensions is called the L2 norm. So: $\sqrt{\sum_{1}^{n}(weight_i - indexWeight_i)^2}$ Can also be written as $\left \| \mathbf{x} - \mathbf{index} \right \|_2$. There's a cvxpy function called [norm()](https://www.cvxpy.org/api_reference/cvxpy.atoms.other_atoms.html#norm)
`norm(x, p=2, axis=None)`. The default is already set to find an L2 norm, so you would pass in one argument, which is the difference between your portfolio weights and the index weights.
### objective function
We want to minimize both the portfolio variance and the distance of the portfolio weights from the index weights.
We also want to choose a `scale` constant, which is $\lambda$ in the expression.
$\mathbf{x^T} \mathbf{P} \mathbf{x} + \lambda \left \| \mathbf{x} - \mathbf{index} \right \|_2$
This lets us choose how much priority we give to minimizing the difference from the index, relative to minimizing the variance of the portfolio. If you choose a higher value for `scale` ($\lambda$).
We can find the objective function using cvxpy `objective = cvx.Minimize()`. Can you guess what to pass into this function?
### constraints
We can also define our constraints in a list. For example, you'd want the weights to sum to one. So $\sum_{1}^{n}x = 1$. You may also need to go long only, which means no shorting, so no negative weights. So $x_i >0 $ for all $i$. you could save a variable as `[x >= 0, sum(x) == 1]`, where x was created using `cvx.Variable()`.
### optimization
So now that we have our objective function and constraints, we can solve for the values of $\mathbf{x}$.
cvxpy has the constructor `Problem(objective, constraints)`, which returns a `Problem` object.
The `Problem` object has a function solve(), which returns the minimum of the solution. In this case, this is the minimum variance of the portfolio.
It also updates the vector $\mathbf{x}$.
We can check out the values of $x_A$ and $x_B$ that gave the minimum portfolio variance by using `x.value`
```
import cvxpy as cvx
def get_optimal_weights(covariance_returns, index_weights, scale=2.0):
"""
Find the optimal weights.
Parameters
----------
covariance_returns : 2 dimensional Ndarray
The covariance of the returns
index_weights : Pandas Series
Index weights for all tickers at a period in time
scale : int
The penalty factor for weights the deviate from the index
Returns
-------
x : 1 dimensional Ndarray
The solution for x
"""
assert len(covariance_returns.shape) == 2
assert len(index_weights.shape) == 1
assert covariance_returns.shape[0] == covariance_returns.shape[1] == index_weights.shape[0]
#TODO: Implement function
# number of stocks m is number of rows of returns, and also number of index weights
m = covariance_returns.shape[0]
# x variables (to be found with optimization)
x = cvx.Variable(m)
# portfolio variance, in quadratic form
portfolio_variance = cvx.quad_form(x, covariance_returns)
# euclidean distance (L2 norm) between portfolio and index weights
distance_to_index = cvx.norm(x - index_weights)
# objective function
objective = cvx.Minimize(portfolio_variance + scale * distance_to_index)
# constraints
constraints = [x >= 0, sum(x) == 1]
# use cvxpy to solve the objective
problem = cvx.Problem(objective, constraints).solve()
# retrieve the weights of the optimized portfolio
x_values = x.value
return x_values
project_tests.test_get_optimal_weights(get_optimal_weights)
```
## Optimized Portfolio
Using the `get_optimal_weights` function, let's generate the optimal ETF weights without rebalanceing. We can do this by feeding in the covariance of the entire history of data. We also need to feed in a set of index weights. We'll go with the average weights of the index over time.
```
raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns.values, index_weights.iloc[-1])
optimal_single_rebalance_etf_weights = pd.DataFrame(
np.tile(raw_optimal_single_rebalance_etf_weights, (len(returns.index), 1)),
returns.index,
returns.columns)
```
With our ETF weights built, let's compare it to the index. Run the next cell to calculate the ETF returns and compare it to the index returns.
```
optim_etf_returns = generate_weighted_returns(returns, optimal_single_rebalance_etf_weights)
optim_etf_cumulative_returns = calculate_cumulative_returns(optim_etf_returns)
project_helper.plot_benchmark_returns(index_weighted_cumulative_returns, optim_etf_cumulative_returns, 'Optimized ETF vs Index')
optim_etf_tracking_error = tracking_error(np.sum(index_weighted_returns, 1), np.sum(optim_etf_returns, 1))
print('Optimized ETF Tracking Error: {}'.format(optim_etf_tracking_error))
```
## Rebalance Portfolio Over Time
The single optimized ETF portfolio used the same weights for the entire history. This might not be the optimal weights for the entire period. Let's rebalance the portfolio over the same period instead of using the same weights. Implement `rebalance_portfolio` to rebalance a portfolio.
Reblance the portfolio every n number of days, which is given as `shift_size`. When rebalancing, you should look back a certain number of days of data in the past, denoted as `chunk_size`. Using this data, compute the optoimal weights using `get_optimal_weights` and `get_covariance_returns`.
```
def rebalance_portfolio(returns, index_weights, shift_size, chunk_size):
"""
Get weights for each rebalancing of the portfolio.
Parameters
----------
returns : DataFrame
Returns for each ticker and date
index_weights : DataFrame
Index weight for each ticker and date
shift_size : int
The number of days between each rebalance
chunk_size : int
The number of days to look in the past for rebalancing
Returns
-------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
"""
assert returns.index.equals(index_weights.index)
assert returns.columns.equals(index_weights.columns)
assert shift_size > 0
assert chunk_size >= 0
#TODO: Implement function
# List of all rebalanced weights
rebalance_portfolio_weights = []
for index in range(chunk_size, returns.shape[0], shift_size):
# calculates the chunk of returns
chunk = returns.iloc[index - chunk_size : index]
# calculates covariance returns
covariance_returns = get_covariance_returns(chunk)
# calculates optimal weights
raw_optimal_single_rebalance_etf_weights = get_optimal_weights(covariance_returns, index_weights.iloc[index - 1])
# append the results
rebalance_portfolio_weights.append(raw_optimal_single_rebalance_etf_weights)
return rebalance_portfolio_weights
project_tests.test_rebalance_portfolio(rebalance_portfolio)
```
Run the following cell to rebalance the portfolio using `rebalance_portfolio`.
```
chunk_size = 250
shift_size = 5
all_rebalance_weights = rebalance_portfolio(returns, index_weights, shift_size, chunk_size)
```
## Portfolio Turnover
With the portfolio rebalanced, we need to use a metric to measure the cost of rebalancing the portfolio. Implement `get_portfolio_turnover` to calculate the annual portfolio turnover. We'll be using the formulas used in the classroom:
$ AnnualizedTurnover =\frac{SumTotalTurnover}{NumberOfRebalanceEvents} * NumberofRebalanceEventsPerYear $
$ SumTotalTurnover =\sum_{t,n}{\left | x_{t,n} - x_{t+1,n} \right |} $ Where $ x_{t,n} $ are the weights at time $ t $ for equity $ n $.
$ SumTotalTurnover $ is just a different way of writing $ \sum \left | x_{t_1,n} - x_{t_2,n} \right | $
```
def get_portfolio_turnover(all_rebalance_weights, shift_size, rebalance_count, n_trading_days_in_year=252):
"""
Calculage portfolio turnover.
Parameters
----------
all_rebalance_weights : list of Ndarrays
The ETF weights for each point they are rebalanced
shift_size : int
The number of days between each rebalance
rebalance_count : int
Number of times the portfolio was rebalanced
n_trading_days_in_year: int
Number of trading days in a year
Returns
-------
portfolio_turnover : float
The portfolio turnover
"""
assert shift_size > 0
assert rebalance_count > 0
#TODO: Implement function
portfolio_turnover = 0
for index in range(1, len(all_rebalance_weights)):
portfolio_turnover += sum(np.abs(all_rebalance_weights[index] - all_rebalance_weights[index-1]))
# annualized turnover calculation
annualized_portfolio_turnover = portfolio_turnover*(n_trading_days_in_year/shift_size)/rebalance_count
return annualized_portfolio_turnover
project_tests.test_get_portfolio_turnover(get_portfolio_turnover)
```
Run the following cell to get the portfolio turnover from `get_portfolio turnover`.
```
print(get_portfolio_turnover(all_rebalance_weights, shift_size, len(all_rebalance_weights) - 1))
```
That's it! You've built a smart beta portfolio in part 1 and did portfolio optimization in part 2. You can now submit your project.
## Submission
Now that you're done with the project, it's time to submit it. Click the submit button in the bottom right. One of our reviewers will give you feedback on your project with a pass or not passed grade. You can continue to the next section while you wait for feedback.
|
github_jupyter
|
# Homework: Basic Artificial Neural Networks
```
%matplotlib inline
from time import time, sleep
import numpy as np
import matplotlib.pyplot as plt
from IPython import display
```
# Framework
Implement everything in `Modules.ipynb`. Read all the comments thoughtfully to ease the pain. Please try not to change the prototypes.
Do not forget, that each module should return **AND** store `output` and `gradInput`.
The typical assumption is that `module.backward` is always executed after `module.forward`,
so `output` is stored, this would be useful for `SoftMax`.
```
# (re-)load layers
%run homework_modules.ipynb
```
Optimizer is implemented for you.
```
def sgd_momentum(x, dx, config, state):
"""
This is a very ugly implementation of sgd with momentum
just to show an example how to store old grad in state.
config:
- momentum
- learning_rate
state:
- old_grad
"""
# x and dx have complex structure, old dx will be stored in a simpler one
state.setdefault('old_grad', {})
i = 0
for cur_layer_x, cur_layer_dx in zip(x,dx):
for cur_x, cur_dx in zip(cur_layer_x,cur_layer_dx):
cur_old_grad = state['old_grad'].setdefault(i, np.zeros_like(cur_dx))
np.add(config['momentum'] * cur_old_grad, config['learning_rate'] * cur_dx, out = cur_old_grad)
cur_x -= cur_old_grad
i += 1
def normalization(column):
column_min = column.min()
column_max = column.max()
column_range = column_max - column_min
if(column_range == 0):
return (column - column_min)
return (column - column_min) / column_range
def create_onehot(column):
class_count = column.max() + 1
size = column.shape[0]
onehot = np.zeros((size, class_count), dtype=float)
for i in range(size):
onehot[i][column[i]] = 1.0
return onehot
# Open MNIST dataset and prepare for train
from mlxtend.data import loadlocal_mnist
x_train, y_train = loadlocal_mnist(images_path='Dataset/train-images-idx3-ubyte', labels_path='Dataset/train-labels-idx1-ubyte')
x_test, y_test = loadlocal_mnist(images_path='Dataset/t10k-images-idx3-ubyte', labels_path='Dataset/t10k-labels-idx1-ubyte')
# normalize
x_train = normalization(x_train)
x_test = normalization(x_test)
# create onehot for y
y_train_onehot = create_onehot(y_train)
y_test_onehot = create_onehot(y_test)
# batch generator
def get_batches(dataset, batch_size):
X, Y = dataset
n_samples = X.shape[0]
# Shuffle at the start of epoch
indices = np.arange(n_samples)
np.random.shuffle(indices)
for start in range(0, n_samples, batch_size):
end = min(start + batch_size, n_samples)
batch_idx = indices[start:end]
yield X[batch_idx], Y[batch_idx]
features = x_train.shape[1]
# Iptimizer params
optimizer_config = {'learning_rate' : 1e-1, 'momentum': 0.9}
optimizer_state = {}
# Looping params
n_epoch = 6
batch_size = 180
```
### Build NN
```
net = Sequential()
net.add(Linear(features, 300))
net.add(ReLU())
net.add(Linear(300, 10))
net.add(SoftMax())
criterion = MSECriterion()
print(net)
```
### Train
Basic training loop. Examine it.
```
loss_history = []
for i in range(n_epoch):
for x_batch, y_batch in get_batches((x_train, y_train_onehot), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
sgd_momentum(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
loss_history.append(loss)
# Visualize
display.clear_output(wait=True)
plt.figure(figsize=(8, 6))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(loss_history, 'b')
plt.show()
print('Current loss: %f' % loss)
```
### Build NN with dropout
```
net = Sequential()
net.add(Linear(features, 300))
net.add(ReLU())
net.add(Dropout(0.7))
net.add(Linear(300, 10))
net.add(SoftMax())
criterion = MSECriterion()
print(net)
loss_history = []
for i in range(n_epoch):
for x_batch, y_batch in get_batches((x_train, y_train_onehot), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
sgd_momentum(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
loss_history.append(loss)
# Visualize
display.clear_output(wait=True)
plt.figure(figsize=(8, 6))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(loss_history, 'b')
plt.show()
print('Current loss: %f' % loss)
# Your answer goes here. ################################################
net = Sequential()
net.add(Linear(features, 600))
net.add(ReLU())
net.add(Dropout(0.7))
net.add(Linear(600, 300))
net.add(ReLU())
net.add(Linear(300, 100))
net.add(ReLU())
net.add(Linear(100, 10))
net.add(SoftMax())
criterion = MSECriterion()
print(net)
loss_history = []
for i in range(n_epoch):
for x_batch, y_batch in get_batches((x_train, y_train_onehot), batch_size):
net.zeroGradParameters()
# Forward
predictions = net.forward(x_batch)
loss = criterion.forward(predictions, y_batch)
# Backward
dp = criterion.backward(predictions, y_batch)
net.backward(x_batch, dp)
# Update weights
sgd_momentum(net.getParameters(),
net.getGradParameters(),
optimizer_config,
optimizer_state)
loss_history.append(loss)
# Visualize
display.clear_output(wait=True)
plt.figure(figsize=(8, 6))
plt.title("Training loss")
plt.xlabel("#iteration")
plt.ylabel("loss")
plt.plot(loss_history, 'b')
plt.show()
print('Current loss: %f' % loss)
# Your code goes here. ################################################
# np.clip(prediction,0,1)
#
# Your code goes here. ################################################
```
|
github_jupyter
|
```
!date
import numpy as np, pandas as pd, matplotlib.pyplot as plt, seaborn as sns
%matplotlib inline
sns.set_context('paper')
sns.set_style('darkgrid')
```
# Mixture Model in PyMC3
Original NB by Abe Flaxman, modified by Thomas Wiecki
```
import pymc3 as pm, theano.tensor as tt
# simulate data from a known mixture distribution
np.random.seed(12345) # set random seed for reproducibility
k = 3
ndata = 500
spread = 5
centers = np.array([-spread, 0, spread])
# simulate data from mixture distribution
v = np.random.randint(0, k, ndata)
data = centers[v] + np.random.randn(ndata)
plt.hist(data);
# setup model
model = pm.Model()
with model:
# cluster sizes
a = pm.constant(np.array([1., 1., 1.]))
p = pm.Dirichlet('p', a=a, shape=k)
# ensure all clusters have some points
p_min_potential = pm.Potential('p_min_potential', tt.switch(tt.min(p) < .1, -np.inf, 0))
# cluster centers
means = pm.Normal('means', mu=[0, 0, 0], sd=15, shape=k)
# break symmetry
order_means_potential = pm.Potential('order_means_potential',
tt.switch(means[1]-means[0] < 0, -np.inf, 0)
+ tt.switch(means[2]-means[1] < 0, -np.inf, 0))
# measurement error
sd = pm.Uniform('sd', lower=0, upper=20)
# latent cluster of each observation
category = pm.Categorical('category',
p=p,
shape=ndata)
# likelihood for each observed value
points = pm.Normal('obs',
mu=means[category],
sd=sd,
observed=data)
# fit model
with model:
step1 = pm.Metropolis(vars=[p, sd, means])
step2 = pm.ElemwiseCategoricalStep(var=category, values=[0, 1, 2])
tr = pm.sample(10000, step=[step1, step2])
```
## Full trace
```
pm.plots.traceplot(tr, ['p', 'sd', 'means']);
```
## After convergence
```
# take a look at traceplot for some model parameters
# (with some burn-in and thinning)
pm.plots.traceplot(tr[5000::5], ['p', 'sd', 'means']);
# I prefer autocorrelation plots for serious confirmation of MCMC convergence
pm.autocorrplot(tr[5000::5], ['sd'])
```
## Sampling of cluster for individual data point
```
i=0
plt.plot(tr['category'][5000::5, i], drawstyle='steps-mid')
plt.axis(ymin=-.1, ymax=2.1)
def cluster_posterior(i=0):
print('true cluster:', v[i])
print(' data value:', np.round(data[i],2))
plt.hist(tr['category'][5000::5,i], bins=[-.5,.5,1.5,2.5,], rwidth=.9)
plt.axis(xmin=-.5, xmax=2.5)
plt.xticks([0,1,2])
cluster_posterior(i)
```
|
github_jupyter
|
### Neural style transfer in PyTorch
This tutorial implements the "slow" neural style transfer based on the VGG19 model.
It closely follows the official neural style tutorial you can find [here](http://pytorch.org/tutorials/advanced/neural_style_tutorial.html).
__Note:__ if you didn't sit through the explanation of neural style transfer in the on-campus lecture, you're _strongly recommended_ to follow the link above instead of this notebook.
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib.pyplot import imread
from skimage.transform import resize, rotate
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# desired size of the output image
imsize = 512 # REDUCE THIS TO 128 IF THE OPTIMIZATION IS TOO SLOW FOR YOU
def image_loader(image_name):
image = resize(imread(image_name), [imsize, imsize])
image = image.transpose([2,0,1]) / image.max()
image = Variable(dtype(image))
# fake batch dimension required to fit network's input dimensions
image = image.unsqueeze(0)
return image
use_cuda = torch.cuda.is_available()
print("torch", torch.__version__)
if use_cuda:
print("Using GPU.")
else:
print("Not using GPU.")
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
```
### Draw input images
```
!mkdir -p images
!wget https://github.com/yandexdataschool/Practical_DL/raw/fall21/week10_interpretability/bonus_style_transfer/images/wave.jpg -O images/wave.jpg
style_img = image_loader("images/wave.jpg").type(dtype)
!wget http://cdn.cnn.com/cnnnext/dam/assets/170809210024-trump-nk.jpg -O images/my_img.jpg
content_img = image_loader("images/my_img.jpg").type(dtype)
assert style_img.size() == content_img.size(), \
"we need to import style and content images of the same size"
def imshow(tensor, title=None):
image = tensor.clone().cpu() # we clone the tensor to not do changes on it
image = image.view(3, imsize, imsize) # remove the fake batch dimension
image = image.numpy().transpose([1,2,0])
plt.imshow(image / np.max(image))
if title is not None:
plt.title(title)
plt.figure(figsize=[12,6])
plt.subplot(1,2,1)
imshow(style_img.data, title='Style Image')
plt.subplot(1,2,2)
imshow(content_img.data, title='Content Image')
```
### Define Style Transfer Losses
As shown in the lecture, we define two loss functions: content and style losses.
Content loss is simply a pointwise mean squared error of high-level features while style loss is the error between gram matrices of intermediate feature layers.
To obtain the feature representations we use a pre-trained VGG19 network.
```
import torchvision.models as models
cnn = models.vgg19(pretrained=True).features
# move it to the GPU if possible:
if use_cuda:
cnn = cnn.cuda()
class ContentLoss(nn.Module):
def __init__(self, target, weight):
super(ContentLoss, self).__init__()
# we 'detach' the target content from the tree used
self.target = target.detach() * weight
self.weight = weight
def forward(self, input):
self.loss = F.mse_loss(input * self.weight, self.target)
return input.clone()
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
def gram_matrix(input):
a, b, c, d = input.size() # a=batch size(=1)
# b=number of feature maps
# (c,d)=dimensions of a f. map (N=c*d)
features = input.view(a * b, c * d) # resise F_XL into \hat F_XL
G = torch.mm(features, features.t()) # compute the gram product
# we 'normalize' the values of the gram matrix
# by dividing by the number of element in each feature maps.
return G.div(a * b * c * d)
class StyleLoss(nn.Module):
def __init__(self, target, weight):
super(StyleLoss, self).__init__()
self.target = target.detach() * weight
self.weight = weight
def forward(self, input):
self.G = gram_matrix(input)
self.G.mul_(self.weight)
self.loss = F.mse_loss(self.G, self.target)
return input.clone()
def backward(self, retain_graph=True):
self.loss.backward(retain_graph=retain_graph)
return self.loss
```
### Style transfer pipeline
We can now define a unified "model" that computes all the losses on the image triplet (content image, style image, optimized image) so that we could optimize them with backprop (over image pixels).
```
content_weight=1 # coefficient for content loss
style_weight=1000 # coefficient for style loss
content_layers=('conv_4',) # use these layers for content loss
style_layers=('conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5') # use these layers for style loss
content_losses = []
style_losses = []
model = nn.Sequential() # the new Sequential module network
# move these modules to the GPU if possible:
if use_cuda:
model = model.cuda()
i = 1
for layer in list(cnn):
if isinstance(layer, nn.Conv2d):
name = "conv_" + str(i)
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module("content_loss_" + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).clone()
target_feature_gram = gram_matrix(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module("style_loss_" + str(i), style_loss)
style_losses.append(style_loss)
if isinstance(layer, nn.ReLU):
name = "relu_" + str(i)
model.add_module(name, layer)
if name in content_layers:
# add content loss:
target = model(content_img).clone()
content_loss = ContentLoss(target, content_weight)
model.add_module("content_loss_" + str(i), content_loss)
content_losses.append(content_loss)
if name in style_layers:
# add style loss:
target_feature = model(style_img).clone()
target_feature_gram = gram_matrix(target_feature)
style_loss = StyleLoss(target_feature_gram, style_weight)
model.add_module("style_loss_" + str(i), style_loss)
style_losses.append(style_loss)
i += 1
if isinstance(layer, nn.MaxPool2d):
name = "pool_" + str(i)
model.add_module(name, layer) # ***
```
### Optimization
We can now optimize both style and content loss over input image.
```
input_image = Variable(content_img.clone().data, requires_grad=True)
optimizer = torch.optim.Adam([input_image], lr=0.1)
num_steps = 300
for i in range(num_steps):
# correct the values of updated input image
input_image.data.clamp_(0, 1)
model(input_image)
style_score = 0
content_score = 0
for sl in style_losses:
style_score += sl.backward()
for cl in content_losses:
content_score += cl.backward()
if i % 10 == 0: # <--- adjust the value to see updates more frequently
print('Step # {} Style Loss : {:4f} Content Loss: {:4f}'.format(
i, style_score.data.item(), content_score.item()))
plt.figure(figsize=[10,10])
imshow(input_image.data)
plt.show()
loss = style_score + content_score
optimizer.step(lambda: loss)
optimizer.zero_grad()
# a last correction...
input_image.data.clamp_(0, 1)
```
### Final image
```
plt.figure(figsize=[10,10])
imshow(input_image.data)
plt.show()
```
|
github_jupyter
|
## Implementing a 1D convnet
In Keras, you would use a 1D convnet via the `Conv1D` layer, which has a very similar interface to `Conv2D`. It **takes as input 3D tensors with shape (samples, time, features) and also returns similarly-shaped 3D tensors**. The convolution window is a 1D window on the temporal axis, axis 1 in the input tensor.
Let's build a simple 2-layer 1D convnet and apply it to the IMDB sentiment classification task altready seen previously.
```
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing import sequence
max_features = 10000 # number of words to consider as features
max_len = 500 # cut texts after this number of words (among top max_features most common words)
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=max_len)
x_test = sequence.pad_sequences(x_test, maxlen=max_len)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
```
**1D convnets are structured in the same way as their 2D counter-parts**: they consist of a stack of `Conv1D` and `MaxPooling1D layers`, eventually ending in either a global pooling layer or a `Flatten` layer, turning the 3D outputs into 2D outputs, allowing to add one or more Dense layers to the model, for classification or regression.
One difference, though, is the fact that **we can afford to use larger convolution windows with 1D convnets**. Indeed, with a 2D convolution layer, a 3x3 convolution window contains `3*3 = 9` feature vectors, but with a 1D convolution layer, a convolution window of size 3 would only contain 3 feature vectors. We can thus easily afford 1D convolution windows of size 7 or 9.
This is our example 1D convnet for the IMDB dataset:
```
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Embedding(max_features, 128, input_length=max_len))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.MaxPooling1D(5))
model.add(layers.Conv1D(32, 7, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.summary()
model.compile(
optimizer=RMSprop(lr=1e-4),
loss='binary_crossentropy',
metrics=['acc']
)
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=128,
validation_split=0.2
)
```
Here are our training and validation results: validation accuracy is slightly lower than that of the LSTM example we used two sections ago, but runtime is faster, both on CPU and GPU (albeit the exact speedup will vary greatly depending on your exact configuration).
At that point, we could re-train this model for the right number of epochs (8), and run it on the test set. This is a convincing demonstration that a 1D convnet can offer a fast, cheap alternative to a recurrent network on a word-level sentiment classification task.
```
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
## Combining CNNs and RNNs to process long sequences
Because 1D convnets process input patches independently, **they are not sensitive to the order of the timesteps** (beyond a local scale, the size of the convolution windows), unlike RNNs. Of course, in order to be able to recognize longer-term patterns, one could stack many convolution layers and pooling layers, resulting in upper layers that would "see" long chunks of the original inputs -- but that's still a fairly weak way to induce order-sensitivity.
One way to evidence this weakness is to try 1D convnets on the temperature forecasting problem from the previous notebook, where **order-sensitivity was key to produce good predictions**:
```
import numpy as np
import os
# Import data
data_dir = './datasets/jena'
fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')
f = open(fname)
data = f.read()
f.close()
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:]
print(header)
print()
print(len(lines))
# Preprocessing
float_data = np.zeros((len(lines), len(header) - 1))
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
# Create datasets
def generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows), lookback // step, data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
lookback = 1440
step = 6
delay = 144
batch_size = 128
train_gen = generator(
float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size
)
val_gen = generator(
float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size
)
test_gen = generator(
float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step,
batch_size=batch_size
)
# This is how many steps to draw from `val_gen` in order to see the whole validation set:
val_steps = (300000 - 200001 - lookback) // batch_size
# This is how many steps to draw from `test_gen` in order to see the whole test set:
test_steps = (len(float_data) - 300001 - lookback) // batch_size
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Conv1D(32, 5, activation='relu', input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GlobalMaxPooling1D())
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit(
train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps
)
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
The validation MAE stays in the low 0.40s: **we cannot even beat our common-sense baseline using the small convnet**. Again, this is because **our convnet looks for patterns anywhere in the input timeseries, and has no knowledge of the temporal position of a pattern it sees** (e.g. towards the beginning, towards the end, etc.). Since more recent datapoints should be interpreted differently from older datapoints in the case of this specific forecasting problem, the convnet fails at producing meaningful results here. **This limitation of convnets was not an issue on IMDB**, because **patterns of keywords that are associated with a positive or a negative sentiment will be informative independently of where they are found in the input sentences**.
One strategy to combine the speed and lightness of convnets with the order-sensitivity of RNNs is to use a 1D convnet as a preprocessing step before a RNN. **This is especially beneficial when dealing with sequences that are so long that they couldn't realistically be processed with RNNs**, e.g. sequences with thousands of steps. The convnet will turn the long input sequence into much shorter (downsampled) sequences of higher-level features. This sequence of extracted features then becomes the input to the RNN part of the network.
Because this strategy allows us to manipulate much longer sequences, we could either look at data from further back (by increasing the `lookback` parameter of the data generator), or look at high-resolution timeseries (by decreasing the step parameter of the generator). Here, we will chose (somewhat arbitrarily) to use a `step` twice smaller, resulting in twice longer timeseries, where the weather data is being sampled at a rate of one point per 30 minutes.
```
# This was previously set to 6 (one point per hour). Now 3 (one point per 30 min).
step = 3
lookback = 720 # Unchanged
delay = 144 # Unchanged
train_gen = generator(
float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step
)
val_gen = generator(
float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step
)
test_gen = generator(
float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step
)
val_steps = (300000 - 200001 - lookback) // 128
test_steps = (len(float_data) - 300001 - lookback) // 128
```
This is our new model, **starting with two `Conv1D` layers and following-up with a `GRU` layer**:
```
model = Sequential()
model.add(layers.Conv1D(32, 5, activation='relu',input_shape=(None, float_data.shape[-1])))
model.add(layers.MaxPooling1D(3))
model.add(layers.Conv1D(32, 5, activation='relu'))
model.add(layers.GRU(32, dropout=0.1, recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.summary()
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit(
train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps
)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
```
Judging from the validation loss, **this setup is not quite as good as the regularized GRU alone, but it's significantly faster**. It is looking at twice more data, which in this case doesn't appear to be hugely helpful, but may be important for other datasets.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/samarth0174/Face-Recognition-pca-svm/blob/master/Facial_Recognition(Exercise).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **In this project we implement the Identification system using Machine Learning concepts such as Principal Component Analysis (PCA) and Support Vector Machine (SVM).**
## Steps Involved:
- Importing Libraries
- Loading the Dataset
- Data Exploration
- Splitting the dataset
- Compute PCA(eigen faces)
- Train a SVM classification model
- * Using GridSearch to find best Parameters
- Model Evaluation
- Conclusion
## **Importing Libraries**
* We need to first import the scikit-learn library for using the PCA function API that is provided into this library.
* The scikit-learn library also provided an API to fetch **LFW_peoples dataset**.
* We also required matplotlib to plot faces.
```
#downnlading datasets sklearn
from sklearn.datasets import fetch_lfw_people
#todo import other libraries such sklearn for pca,svc,classification report,plotting
```
## **Loading the dataset**
```
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people('data', min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
#todo:check shape
# for machine learning we use the data directly (as relative pixel
# position info is ignored by this model)
'''todo:assign X for model'''
## the label to predict is the id of the person
'''Todo:assign y for model ie. the no. of classes''''
```
## **Data Exploration**
```
# plot and explore images and their respective classes
# hint: use matplotlib
```
## **Splitting the dataset**
```
#use sklearn test-train split
```
## **Compute PCA**
We can now compute a PCA (eigenfaces) on the face dataset (treated as unlabeled dataset): unsupervised feature extraction / dimensionality reduction.
```
#Apply the PCA algorithm on the training dataset which computes EigenFaces.
#Here, take n_components = 150 or 300 means we extract the top 150 (or 300) Eigenfaces from the algorithm.
#Also print the time taken to apply this algorithm.
# TODO: Create an instance of PCA, initializing with n_components=n_components and whiten=True
#TODO: pass the training dataset (X_train) to pca's 'fit()' method
```
## **Train a SVM classification model**
Fit a SVM classifier to the training set.Use GridSearchCV to find a good set of parameters for the classifier.
```
#todo : SVM with Gridsearch algo
```
## **Evaluation of the model quality on the test set**
```
#TODO: Test the model and Generate a classification report
```
# **plot the eigen faces for your visualisation**
```
#TODO:plot most significant eigen faces
```
## **Conclusion**
```
```
|
github_jupyter
|
```
!pip install torch # framework
!pip install --upgrade reedsolo
!pip install --upgrade librosa
!pip install torchvision
#!pip install torchaudio
#!pip install tensorboard
#!pip install soundfile
!pip install librosa==0.7.1
from google.colab import drive
drive.mount('/content/drive',force_remount=True)
%cd /content/drive/My\ Drive/
import numpy as np
import librosa
import librosa.display
import datetime
import matplotlib.pyplot as plt
from torch.nn.functional import binary_cross_entropy_with_logits, mse_loss
from torchvision import datasets, transforms
from IPython.display import clear_output
import torchvision
from torchvision.datasets.vision import VisionDataset
from torch.optim import Adam
from tqdm import notebook
import torch
import os.path
import os
import gc
import sys
from PIL import ImageFile, Image
#from torchaudio import transforms as audiotransforms
#import torchaudio
#import soundfile
#from IPython.display import Audio
import random
ImageFile.LOAD_TRUNCATED_IMAGES = True
epochs = 64
data_depth = 4
hidden_size = 32
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
LOAD_MODEL=True
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_0.041_2020-07-25_15:31:19.dat'
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.003_2020-07-24_20:01:33.dat'
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.022_2020-07-24_05:11:17.dat'
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.041_2020-07-23_23:01:25.dat'
PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_0.042_2020-07-23_02:08:27.dat' ##Depth4Epoch64
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_0.005_2020-07-22_20:05:49.dat'
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.019_2020-07-22_15:02:29.dat'
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_-0.020_2020-07-22_13:43:02.dat'
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_+0.048_2020-07-22_12:21:23.dat'
#PATH='/content/drive/My Drive/myresults/model/DenseEncoder_DenseDecoder_+0.017_2020-07-22_08:18:00.dat'
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
# -*- coding: utf-8 -*-
import zlib
from math import exp
import torch
from reedsolo import RSCodec
from torch.nn.functional import conv2d
rs = RSCodec(250)
def text_to_bits(text):
"""Convert text to a list of ints in {0, 1}"""
return bytearray_to_bits(text_to_bytearray(text))
def bits_to_text(bits):
"""Convert a list of ints in {0, 1} to text"""
return bytearray_to_text(bits_to_bytearray(bits))
def bytearray_to_bits(x):
"""Convert bytearray to a list of bits"""
result = []
for i in x:
bits = bin(i)[2:]
bits = '00000000'[len(bits):] + bits
result.extend([int(b) for b in bits])
return result
def bits_to_bytearray(bits):
"""Convert a list of bits to a bytearray"""
ints = []
for b in range(len(bits) // 8):
byte = bits[b * 8:(b + 1) * 8]
ints.append(int(''.join([str(bit) for bit in byte]), 2))
return bytearray(ints)
def text_to_bytearray(text):
"""Compress and add error correction"""
assert isinstance(text, str), "expected a string"
x = zlib.compress(text.encode("utf-8"))
x = rs.encode(bytearray(x))
return x
def bytearray_to_text(x):
"""Apply error correction and decompress"""
try:
#print('1: ',x)
text = rs.decode(x)[0]
#print('2: ',x)
text = zlib.decompress(text)
#print('3: ',x)
return text.decode("utf-8")
except BaseException as e:
print(e)
return False
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
import torch
from torch import nn
import numpy
class BasicEncoder(nn.Module):
"""
The BasicEncoder module takes an cover image and a data tensor and combines
them into a steganographic image.
"""
def _name(self):
return "BasicEncoder"
def _conv2d(self, in_channels, out_channels):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1
)
def _build_models(self):
self.conv1 = nn.Sequential(
self._conv2d(3, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv2 = nn.Sequential(
self._conv2d(self.hidden_size + self.data_depth, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv3 = nn.Sequential(
self._conv2d(self.hidden_size, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv4 = nn.Sequential(
self._conv2d(self.hidden_size, 3),
)
return self.conv1, self.conv2, self.conv3, self.conv4
def __init__(self, data_depth, hidden_size):
super().__init__()
self.data_depth = data_depth
self.hidden_size = hidden_size
self._models = self._build_models()
self.name = self._name()
def forward(self, image, data):
x = self._models[0](image)
x_1 = self._models[1](torch.cat([x] + [data], dim=1))
x_2 = self._models[2](x_1)
x_3 = self._models[3](x_2)
return x_3
class ResidualEncoder(BasicEncoder):
def _name(self):
return "ResidualEncoder"
def forward(self, image, data):
return image + super().forward(self, image, data)
class DenseEncoder(BasicEncoder):
def _name(self):
return "DenseEncoder"
def _build_models(self):
self.conv1 = super()._build_models()[0]
self.conv2 = super()._build_models()[1]
self.conv3 = nn.Sequential(
self._conv2d(self.hidden_size * 2 +
self.data_depth, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv4 = nn.Sequential(
self._conv2d(self.hidden_size * 3 + self.data_depth, 3)
)
return self.conv1, self.conv2, self.conv3, self.conv4
def forward(self, image, data):
x = self._models[0](image)
x_list = [x]
x_1 = self._models[1](torch.cat(x_list+[data], dim=1))
x_list.append(x_1)
x_2 = self._models[2](torch.cat(x_list+[data], dim=1))
x_list.append(x_2)
x_3 = self._models[3](torch.cat(x_list+[data], dim=1))
x_list.append(x_3)
return image + x_3
import torch
from torch import nn
#from torch.nn import Sigmoid
#from torch.distributions import Bernoulli
class BasicDecoder(nn.Module):
"""
The BasicDecoder module takes an steganographic image and attempts to decode
the embedded data tensor.
Input: (N, 3, H, W)
Output: (N, D, H, W)
"""
def _name(self):
return "BasicDecoder"
def _conv2d(self, in_channels, out_channels):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=1
)
def _build_models(self):
self.conv1 = nn.Sequential(
self._conv2d(3, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv2 = nn.Sequential(
self._conv2d(self.hidden_size, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv3 = nn.Sequential(
self._conv2d(self.hidden_size, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv4 = nn.Sequential(
self._conv2d(self.hidden_size, self.data_depth),
#nn.Sigmoid(),
)
return self.conv1, self.conv2, self.conv3, self.conv4
def forward(self, image):
x = self._models[0](image)
x_1 = self._models[1](x)
x_2 = self._models[2](x_1)
x_3 = self._models[3](x_2)
#x_4 = Bernoulli(x_3).sample()
return x_3
def __init__(self, data_depth, hidden_size):
super().__init__()
self.data_depth = data_depth
self.hidden_size = hidden_size
self._models = self._build_models()
self.name = self._name()
class DenseDecoder(BasicDecoder):
def _name(self):
return "DenseDecoder"
def _build_models(self):
self.conv1 = super()._build_models()[0]
self.conv2 = super()._build_models()[1]
self.conv3 = nn.Sequential(
self._conv2d(self.hidden_size * 2, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size)
)
self.conv4 = nn.Sequential(
self._conv2d(self.hidden_size * 3, self.data_depth),
#nn.Sigmoid(),
)
return self.conv1, self.conv2, self.conv3, self.conv4
def forward(self, image):
x = self._models[0](image)
x_list = [x]
x_1 = self._models[1](torch.cat(x_list, dim=1))
x_list.append(x_1)
x_2 = self._models[2](torch.cat(x_list, dim=1))
x_list.append(x_2)
x_3 = self._models[3](torch.cat(x_list, dim=1))
x_list.append(x_3)
return x_3
import torch
from torch import nn
class BasicCritic(nn.Module):
"""
The BasicCritic module takes an image and predicts whether it is a cover
image or a steganographic image (N, 1).
Input: (N, 3, H, W)
Output: (N, 1)
"""
def _name(self):
return "BasicCritic"
def _conv2d(self, in_channels, out_channels):
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3
)
def _build_models(self):
self.conv1 = nn.Sequential(
self._conv2d(3, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv2 = nn.Sequential(
self._conv2d(self.hidden_size, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv3 = nn.Sequential(
self._conv2d(self.hidden_size, self.hidden_size),
nn.LeakyReLU(inplace=True),
nn.BatchNorm2d(self.hidden_size),
)
self.conv4 = nn.Sequential(
self._conv2d(self.hidden_size, 1)
)
return self.conv1,self.conv2,self.conv3,self.conv4
def __init__(self, hidden_size):
super().__init__()
self.hidden_size = hidden_size
self._models = self._build_models()
self.name = self._name()
def forward(self, image):
x = self._models[0](image)
x_1 = self._models[1](x)
x_2 = self._models[2](x_1)
x_3 = self._models[3](x_2)
return torch.mean(x_3.view(x_3.size(0), -1), dim=1)
def plot(name, train_epoch, values, path, save):
clear_output(wait=True)
plt.close('all')
fig = plt.figure()
fig = plt.ion()
fig = plt.subplot(1, 1, 1)
fig = plt.title('epoch: %s -> %s: %s' % (train_epoch, name, values[-1]))
fig = plt.ylabel(name)
fig = plt.xlabel('validation_set')
fig = plt.plot(values)
fig = plt.grid()
get_fig = plt.gcf()
fig = plt.draw() # draw the plot
fig = plt.pause(1) # show it for 1 second
if save:
now = datetime.datetime.now()
get_fig.savefig('%s/%s_%.3f_%d_%s.png' %
(path, name, train_epoch, values[-1], now.strftime("%Y-%m-%d_%H:%M:%S")))
def test(encoder,decoder,data_depth,train_epoch,cover,payload):
%matplotlib inline
generated = encoder.forward(cover, payload)
decoded = decoder.forward(generated)
decoder_loss = binary_cross_entropy_with_logits(decoded, payload)
decoder_acc = (decoded >= 0.0).eq(
payload >= 0.5).sum().float() / payload.numel() # .numel() calculate the number of element in a tensor
print("Decoder loss: %.3f"% decoder_loss.item())
print("Decoder acc: %.3f"% decoder_acc.item())
f, ax = plt.subplots(1, 2)
plt.title("%s_%s"%(encoder.name,decoder.name))
cover=np.transpose(np.squeeze(cover.cpu()), (1, 2, 0))
ax[0].imshow(cover)
ax[0].axis('off')
print(generated.shape)
generated_=np.transpose(np.squeeze((generated.cpu()).detach().numpy()), (1, 2, 0))
ax[1].imshow(generated_)
ax[1].axis('off')
#now = datetime.datetime.now()
#print("payload :")
#print(payload)
#print("decoded :")
#decoded[decoded<0]=0
#decoded[decoded>0]=1
#print(decoded)
# plt.savefig('results/samples/%s_%s_%d_%.3f_%d_%s.png' %
# (encoder.name,decoder.name, data_depth,decoder_acc, train_epoch, now.strftime("%Y-%m-%d_%H:%M:%S")))
return generated
def save_model(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,ep):
now = datetime.datetime.now()
cover_score = metrics['val.cover_score'][-1]
name = "%s_%s_%+.3f_%s.dat" % (encoder.name,decoder.name,cover_score,
now.strftime("%Y-%m-%d_%H:%M:%S"))
fname = os.path.join('.', 'myresults/model', name)
states = {
'state_dict_critic': critic.state_dict(),
'state_dict_encoder': encoder.state_dict(),
'state_dict_decoder': decoder.state_dict(),
'en_de_optimizer': en_de_optimizer.state_dict(),
'cr_optimizer': cr_optimizer.state_dict(),
'metrics': metrics,
'train_epoch': ep,
'date': now.strftime("%Y-%m-%d_%H:%M:%S"),
}
torch.save(states, fname)
path='myresults/plots/train_%s_%s_%s'% (encoder.name,decoder.name,now.strftime("%Y-%m-%d_%H:%M:%S"))
try:
os.mkdir(os.path.join('.', path))
except Exception as error:
print(error)
plot('encoder_mse', ep, metrics['val.encoder_mse'], path, True)
plot('decoder_loss', ep, metrics['val.decoder_loss'], path, True)
plot('decoder_acc', ep, metrics['val.decoder_acc'], path, True)
plot('cover_score', ep, metrics['val.cover_score'], path, True)
plot('generated_score', ep, metrics['val.generated_score'], path, True)
plot('ssim', ep, metrics['val.ssim'], path, True)
plot('psnr', ep, metrics['val.psnr'], path, True)
plot('bpp', ep, metrics['val.bpp'], path, True)
def fit_gan(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,train_loader,valid_loader):
for ep in range(epochs):
print("Epoch %d" %(ep+1))
for cover, _ in notebook.tqdm(train_loader):
gc.collect()
cover = cover.to(device)
N, _, H, W = cover.size()
# sampled from the discrete uniform distribution over 0 to 2
payload = torch.zeros((N, data_depth, H, W),
device=device).random_(0, 2)
generated = encoder.forward(cover, payload)
cover_score = torch.mean(critic.forward(cover))
generated_score = torch.mean(critic.forward(generated))
cr_optimizer.zero_grad()
(cover_score - generated_score).backward(retain_graph=False)
cr_optimizer.step()
for p in critic.parameters():
p.data.clamp_(-0.1, 0.1)
metrics['train.cover_score'].append(cover_score.item())
metrics['train.generated_score'].append(generated_score.item())
for cover, _ in notebook.tqdm(train_loader):
gc.collect()
cover = cover.to(device)
N, _, H, W = cover.size()
# sampled from the discrete uniform distribution over 0 to 2
payload = torch.zeros((N, data_depth, H, W),
device=device).random_(0, 2)
generated = encoder.forward(cover, payload)
decoded = decoder.forward(generated)
encoder_mse = mse_loss(generated, cover)
decoder_loss = binary_cross_entropy_with_logits(decoded, payload)
decoder_acc = (decoded >= 0.0).eq(
payload >= 0.5).sum().float() / payload.numel()
generated_score = torch.mean(critic.forward(generated))
en_de_optimizer.zero_grad()
(100 * encoder_mse + decoder_loss +
generated_score).backward() # Why 100?
en_de_optimizer.step()
metrics['train.encoder_mse'].append(encoder_mse.item())
metrics['train.decoder_loss'].append(decoder_loss.item())
metrics['train.decoder_acc'].append(decoder_acc.item())
for cover, _ in notebook.tqdm(valid_loader):
gc.collect()
cover = cover.to(device)
N, _, H, W = cover.size()
# sampled from the discrete uniform distribution over 0 to 2
payload = torch.zeros((N, data_depth, H, W),
device=device).random_(0, 2)
generated = encoder.forward(cover, payload)
decoded = decoder.forward(generated)
encoder_mse = mse_loss(generated, cover)
decoder_loss = binary_cross_entropy_with_logits(decoded, payload)
decoder_acc = (decoded >= 0.0).eq(
payload >= 0.5).sum().float() / payload.numel()
generated_score = torch.mean(critic.forward(generated))
cover_score = torch.mean(critic.forward(cover))
metrics['val.encoder_mse'].append(encoder_mse.item())
metrics['val.decoder_loss'].append(decoder_loss.item())
metrics['val.decoder_acc'].append(decoder_acc.item())
metrics['val.cover_score'].append(cover_score.item())
metrics['val.generated_score'].append(generated_score.item())
metrics['val.ssim'].append(
ssim(cover, generated).item())
metrics['val.psnr'].append(
10 * torch.log10(4 / encoder_mse).item())
metrics['val.bpp'].append(
data_depth * (2 * decoder_acc.item() - 1))
print('encoder_mse: %.3f - decoder_loss: %.3f - decoder_acc: %.3f - cover_score: %.3f - generated_score: %.3f - ssim: %.3f - psnr: %.3f - bpp: %.3f'
%(encoder_mse.item(),decoder_loss.item(),decoder_acc.item(),cover_score.item(),generated_score.item(), ssim(cover, generated).item(),10 * torch.log10(4 / encoder_mse).item(),data_depth * (2 * decoder_acc.item() - 1)))
save_model(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,ep)
if __name__ == '__main__':
for func in [
lambda: os.mkdir(os.path.join('.', 'results')),
lambda: os.mkdir(os.path.join('.', 'results/model')),
lambda: os.mkdir(os.path.join('.', 'results/plots'))]: # create directories
try:
func()
except Exception as error:
print(error)
continue
METRIC_FIELDS = [
'val.encoder_mse',
'val.decoder_loss',
'val.decoder_acc',
'val.cover_score',
'val.generated_score',
'val.ssim',
'val.psnr',
'val.bpp',
'train.encoder_mse',
'train.decoder_loss',
'train.decoder_acc',
'train.cover_score',
'train.generated_score',
]
print('image')
data_dir = 'div2k'
mu = [.5, .5, .5]
sigma = [.5, .5, .5]
transform = transforms.Compose([transforms.RandomHorizontalFlip(),
transforms.RandomCrop(
360, pad_if_needed=True),
transforms.ToTensor(),
transforms.Normalize(mu, sigma)])
train_set = datasets.ImageFolder(os.path.join(
data_dir, "train/"), transform=transform)
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=4, shuffle=True)
valid_set = datasets.ImageFolder(os.path.join(
data_dir, "val/"), transform=transform)
valid_loader = torch.utils.data.DataLoader(
valid_set, batch_size=4, shuffle=False)
encoder = DenseEncoder(data_depth, hidden_size).to(device)
decoder = DenseDecoder(data_depth, hidden_size).to(device)
critic = BasicCritic(hidden_size).to(device)
cr_optimizer = Adam(critic.parameters(), lr=1e-4)
en_de_optimizer = Adam(list(decoder.parameters()) + list(encoder.parameters()), lr=1e-4)
metrics = {field: list() for field in METRIC_FIELDS}
if LOAD_MODEL:
if torch.cuda.is_available():
checkpoint = torch.load(PATH)
else:
checkpoint = torch.load(PATH, map_location=lambda storage, loc: storage)
critic.load_state_dict(checkpoint['state_dict_critic'])
encoder.load_state_dict(checkpoint['state_dict_encoder'])
decoder.load_state_dict(checkpoint['state_dict_decoder'])
en_de_optimizer.load_state_dict(checkpoint['en_de_optimizer'])
cr_optimizer.load_state_dict(checkpoint['cr_optimizer'])
metrics=checkpoint['metrics']
ep=checkpoint['train_epoch']
date=checkpoint['date']
critic.train(mode=False)
encoder.train(mode=False)
decoder.train(mode=False)
print('GAN loaded: ', ep)
print(critic)
print(encoder)
print(decoder)
print(en_de_optimizer)
print(cr_optimizer)
print(date)
else:
fit_gan(encoder,decoder,critic,en_de_optimizer,cr_optimizer,metrics,train_loader,valid_loader)
from collections import Counter
def make_payload(width, height, depth, text):
"""
This takes a piece of text and encodes it into a bit vector. It then
fills a matrix of size (width, height) with copies of the bit vector.
"""
message = text_to_bits(text) + [0] * 32
payload = message
while len(payload) < width * height * depth:
payload += message
payload = payload[:width * height * depth]
return torch.FloatTensor(payload).view(1, depth, height, width)
def make_message(image):
#image = torch.FloatTensor(image).permute(2, 1, 0).unsqueeze(0)
image = image.to(device)
image = decoder(image).view(-1) > 0
image=torch.tensor(image, dtype=torch.uint8)
# split and decode messages
candidates = Counter()
bits = image.data.cpu().numpy().tolist()
for candidate in bits_to_bytearray(bits).split(b'\x00\x00\x00\x00'):
candidate = bytearray_to_text(bytearray(candidate))
if candidate:
#print(candidate)
candidates[candidate] += 1
# choose most common message
if len(candidates) == 0:
raise ValueError('Failed to find message.')
candidate, count = candidates.most_common(1)[0]
return candidate
```
###Check a sample from validation dataset
```
# to see one image
cover,*rest = next(iter(valid_set))
_, H, W = cover.size()
cover = cover[None].to(device)
text = "We are busy in Neural Networks project. Anyhow, how is your day going?"
payload = make_payload(W, H, data_depth, text)
payload = payload.to(device)
#generated = encoder.forward(cover, payload)
generated = test(encoder,decoder,data_depth,epochs,cover,payload)
text_return = make_message(generated)
print(text_return)
```
###Testing begins (from a loaded model)
####Test1 - Save steganographic images
```
##Take all images from test folder (one by one) and message requested by user to encode
from imageio import imread, imwrite
epochs = 64
data_depth = 4
test_folder = "div2k/myval/_"
save_dir = os.mkdir(os.path.join("div2k/myval",str(data_depth)+"_"+str(epochs)))
for filename in os.listdir(test_folder):
print(os.path.join(test_folder,filename))
cover_im = imread(os.path.join(test_folder,filename), pilmode='RGB') / 127.5 - 1.0
cover = torch.FloatTensor(cover_im).permute(2, 1, 0).unsqueeze(0)
cover_size = cover.size()
# _, _, height, width = cover.size()
text = "We are busy in Neural Networks project. The deadline is near. Anyhow, how is your day going?"
payload = make_payload(cover_size[3], cover_size[2], data_depth, text)
cover = cover.to(device)
payload = payload.to(device)
generated = encoder.forward(cover, payload)[0].clamp(-1.0, 1.0)
#print(generated.size())
generated = (generated.permute(2, 1, 0).detach().cpu().numpy() + 1.0) * 127.5
imwrite(os.path.join("div2k/myval/",str(data_depth)+"_"+str(epochs),(str(data_depth)+"_"+str(epochs)+"_"+filename)), generated.astype('uint8'))
```
####Test2 - Take a steganographic image from a folder and decode
```
##[Individual]Take an image requested by user to decode
from imageio import imread, imwrite
steg_folder = "div2k/myval/4_64"
filename = "4_64_0855.png"
image = imread(os.path.join(steg_folder,filename), pilmode='RGB') / 127.5 - 1.0
plt.imshow(image)
image = torch.FloatTensor(image).permute(2, 1, 0).unsqueeze(0)
text_return = make_message(image)
print(text_return)
#f = open(steg_folder+".csv", "a")
#f.write("\n" + filename + "\t" + str(text_return))
```
####Test3 - Encode to decode in one cell
```
##Input to outut (both encode decode in one cell)
from imageio import imread, imwrite
cover_im = imread("div2k/myval/_/0805.png", pilmode='RGB') / 127.5 - 1.0
plt.imshow(cover_im)
cover = torch.FloatTensor(cover_im).permute(2, 1, 0).unsqueeze(0)
cover_size = cover.size()
# _, _, height, width = cover.size()
text = "We are busy in Neural Networks project. Anyhow, how is your day going?"
payload = make_payload(cover_size[3], cover_size[2], data_depth, text)
cover = cover.to(device)
payload = payload.to(device)
generated = encoder.forward(cover, payload)
text_return = make_message(generated)
print(text_return)
```
####Generate Difference Image
```
from skimage.metrics import structural_similarity as ssim
from imageio import imread, imwrite
diff_epochs = 64
diff_data_depth = 4
cover_folder = "div2k/myval/_"
steg_folder = "div2k/myval/"+str(diff_data_depth)+"_"+str(diff_epochs)
for filename in os.listdir(cover_folder):
print(os.path.join(cover_folder,filename))
cover = imread(os.path.join(cover_folder,filename), as_gray=True)
gen = imread(os.path.join(steg_folder,str(diff_data_depth)+"_"+str(diff_epochs)+"_"+filename), as_gray=True)
(score, diff) = ssim(cover, gen, full=True)
imwrite("div2k/myval/"+str(diff_data_depth)+"_"+str(diff_epochs)+"/"+"%d_%d_diff_%s"%(diff_data_depth,diff_epochs,filename),diff)
print("Score: ",score)
```
|
github_jupyter
|
# **[Adversarial Disturbances for Controller Verification](http://proceedings.mlr.press/v144/ghai21a/ghai21a.pdf)**
[](https://colab.research.google.com/github/google/nsc-tutorial/blob/main/controller-verification.ipynb)
## Housekeeping
Imports [jax](https://github.com/google/jax), numpy, scipy, plotting utils...
```
#@title
import jax
import itertools
import numpy as onp
import jax.numpy as np
import matplotlib.pyplot as plt
import ipywidgets as widgets
from jax.numpy.linalg import inv, pinv
from scipy.linalg import solve_discrete_are as dare
from jax import jit, grad, hessian
from IPython import display
from toolz.dicttoolz import valmap, itemmap
from itertools import chain
def liveplot(costss, xss, wss, cmax=30, cumcmax=15, wmax=2, xmax=20, logcmax=100, logcumcmax=1000):
cummean = lambda x: np.cumsum(np.array(x))/np.arange(1, len(x)+1)
cumcostss = valmap(cummean, costss)
disturbances = valmap(lambda x: list(map(lambda w: w[0], x)), wss)
plt.style.use('seaborn')
colors = {
"Zero Control": "gray",
"LQR / H2": "green",
"Finite-horizon LQR / H2": "teal",
"Optimal LQG for GRW": "aqua",
"Robust / Hinf Control": "orange",
"GPC": "red"
}
fig, ax = plt.subplots(3, 2, figsize=(21, 12))
costssline = {}
for Cstr, costs in costss.items():
costssline[Cstr], = ax[0, 0].plot([], label=Cstr, color=colors[Cstr])
ax[0, 0].set_xlabel("Time")
ax[0, 0].set_ylabel("Instantaneous Cost")
ax[0, 0].set_ylim([-1, cmax])
ax[0, 0].set_xlim([0, 100])
ax[0, 0].legend()
cumcostssline = {}
for Cstr, costs in cumcostss.items():
cumcostssline[Cstr], = ax[0, 1].plot([], label=Cstr, color=colors[Cstr])
ax[0, 1].set_xlabel("Time")
ax[0, 1].set_ylabel("Average Cost")
ax[0, 1].set_ylim([-1, cumcmax])
ax[0, 1].set_xlim([0, 100])
ax[0, 1].legend()
perturblines = {}
for Cstr, W in disturbances.items():
perturblines[Cstr], = ax[1, 0].plot([], label=Cstr, color=colors[Cstr])
ax[1, 0].set_xlabel("Time")
ax[1, 0].set_ylabel("Generated Disturbances")
ax[1, 0].set_ylim([-wmax, wmax])
ax[1, 0].set_xlim([0, 100])
ax[1, 0].legend()
pointssline, trailssline = {}, {}
for Cstr, C in xss.items():
pointssline[Cstr], = ax[1,1].plot([], [], label=Cstr, color=colors[Cstr], ms=20, marker='s')
trailssline[Cstr], = ax[1,1].plot([], [], label=Cstr, color=colors[Cstr], lw=2)
ax[1, 1].set_xlabel("Position")
ax[1, 1].set_ylabel("")
ax[1, 1].set_ylim([-1, 6])
ax[1, 1].set_xlim([-xmax, xmax])
ax[1, 1].legend()
logcostssline = {}
for Cstr, costs in costss.items():
logcostssline[Cstr], = ax[2, 0].plot([1], label=Cstr, color=colors[Cstr])
ax[2, 0].set_xlabel("Time")
ax[2, 0].set_ylabel("Instantaneous Cost (Log Scale)")
ax[2, 0].set_xlim([0, 100])
ax[2, 0].set_ylim([0.1, logcmax])
ax[2, 0].set_yscale('log')
ax[2, 0].legend()
logcumcostssline = {}
for Cstr, costs in cumcostss.items():
logcumcostssline[Cstr], = ax[2, 1].plot([1], label=Cstr, color=colors[Cstr])
ax[2, 1].set_xlabel("Time")
ax[2, 1].set_ylabel("Average Cost (Log Scale)")
ax[2, 1].set_xlim([0, 100])
ax[2, 1].set_ylim([0.1, logcumcmax])
ax[2, 1].set_yscale('log')
ax[2, 1].legend()
def livedraw(t):
for Cstr, costsline in costssline.items():
costsline.set_data(np.arange(t), costss[Cstr][:t])
for Cstr, cumcostsline in cumcostssline.items():
cumcostsline.set_data(np.arange(t), cumcostss[Cstr][:t])
for i, (Cstr, pointsline) in enumerate(pointssline.items()):
pointsline.set_data(xss[Cstr][t][0], i)
for Cstr, perturbline in perturblines.items():
perturbline.set_data(np.arange(t), disturbances[Cstr][:t])
for i, (Cstr, trailsline) in enumerate(trailssline.items()):
trailsline.set_data(list(map(lambda x: x[0], xss[Cstr][max(t-10, 0):t])), i)
for Cstr, logcostsline in logcostssline.items():
logcostsline.set_data(np.arange(t), costss[Cstr][:t])
for Cstr, logcumcostsline in logcumcostssline.items():
logcumcostsline.set_data(np.arange(t), cumcostss[Cstr][:t])
return chain(costssline.values(), cumcostssline.values(), perturblines.values(), pointssline.values(), trailssline.values(), logcostssline.values(), logcumcostssline.values())
print("🧛 reanimating :) meanwhile...")
livedraw(99)
plt.show()
from matplotlib import animation
anim = animation.FuncAnimation(fig, livedraw, frames=100, interval=50, blit=True)
from IPython.display import HTML
display.clear_output(wait=True)
return HTML(anim.to_html5_video())
```
## A simple dynamical system
Defines a discrete-time [double-integrator](https://en.wikipedia.org/wiki/Double_integrator) -- a simple linear dynamical system that mirrors 1d kinematics -- along with a quadratic cost.
Below $\mathbf{x}_t$ is the state, $\mathbf{u}_t$ is the control input (or action), $\mathbf{w}_t$ is the disturbance.
$$ \mathbf{x}_{t+1} = A\mathbf{x}_t + B\mathbf{u}_t + \mathbf{w}_t, \qquad c(\mathbf{x},\mathbf{u}) = \mathbf{x}^\top Q \mathbf{x} + \mathbf{u}^\top R \mathbf{u}$$
$$ A = \begin{bmatrix}
1 & 1\\
0 & 1
\end{bmatrix},\quad B = \begin{bmatrix}
0\\
1
\end{bmatrix}, \quad Q = \begin{bmatrix}
1 & 0\\
0 & 1
\end{bmatrix}, \quad R = \begin{bmatrix}
1
\end{bmatrix}$$
In the task of controller verification, the **verifier** selects $\mathbf{w}_t$ adaptively as a function of past state-action pairs $(\mathbf{x}_s,\mathbf{u}_s:s\leq t)$.
```
dx, du, T = 2, 1, 100
A, B = np.array([[1.0, 1.0], [0.0, 1.0]]), np.array([[0.0], [1.0]])
Q, R = np.eye(dx), np.eye(du)
dyn = lambda x, u, w, t: A @ x + B @ u + w
cost = lambda x, u, t: x.T @ A @ x + u.T @ R @ u
# A basic control loop.
# (x, z) is the environ-controller state.
# w is disturbance and z_w disturbance generator state
def eval(control, disturbance):
x, z, z_w = np.zeros(dx), None, None
for t in range(T):
u, z = control(x, z, t)
w, z_w = disturbance(x, u, z_w, t)
c = cost(x, u, t)
yield (x, u, w, c)
x = dyn(x, u, w, t)
```
## Control Algorithms
The segment below puts forth a few basic control strategies, whose performance characteristics we would like to verify.
+ **Zero Control**: Executes $\mathbf{u}=\mathbf{0}$.
+ **LQR / H2**: A discrete-time [linear-quadratic regulator](https://en.wikipedia.org/wiki/Linear%E2%80%93quadratic_regulator).
+ **Finite-horizon LQR / H2**: A finite-horizon variant of the above.
+ **Robust / $H_\infty$ Control**: A worst-case [robust](https://en.wikipedia.org/wiki/H-infinity_methods_in_control_theory) controller.
+ **GPC**: [Gradient-perturbation](https://arxiv.org/abs/1902.08721) controller.
```
#@title
def zero():
return lambda x, z, t: (np.zeros(du), z)
def h2(A=A, B=B, Q=Q, R=R):
P = dare(A, B, Q, R)
K = - inv(R + B.T @ P @ B) @ (B.T @ P @ A)
return lambda x, z, t: (K @ x, z)
def h2nonstat(A=A, B=B, Q=Q, R=R, T=T):
dx, du = B.shape
P, K = [np.zeros((dx, dx)) for _ in range(T + 1)], [np.zeros((du, dx)) for _ in range(T)]
P[T] = Q
for t in range(T - 1, -1, -1):
P[t] = Q + A.T @ P[t + 1] @ A - (A.T @ P[t + 1] @ B) @ inv(R + B.T @ P[t + 1] @ B) @ (B.T @ P[t + 1] @ A)
K[t] = - inv(R + B.T @ P[t + 1] @ B) @ (B.T @ P[t + 1] @ A)
return lambda x, z, t: (K[t] @ x, z)
def hinf(A=A, B=B, Q=Q, R=R, T=T, gamma=1.0):
dx, du = B.shape
P, K = [np.zeros((dx, dx)) for _ in range(T + 1)], [np.zeros((du, dx)) for _ in range(T)],
P[T] = Q
for t in range(T - 1, -1, -1):
Lambda = np.eye(dx) + (B @ inv(R) @ B.T - gamma ** -2 * np.eye(dx)) @ P[t + 1]
P[t] = Q + A.T @ P[t + 1] @ pinv(Lambda) @ A
K[t] = - np.linalg.inv(R) @ B.T @ P[t + 1] @ pinv(Lambda) @ A
return lambda x, z, t: (K[t] @ x, z)
def gpc(A=A, B=B, Q=Q, R=R, T=T, H=3, M=3, lr=0.01, dyn=dyn, cost=cost):
dx, du = B.shape
P = dare(A, B, Q, R)
K = - np.array(inv(R + B.T @ P @ B) @ (B.T @ P @ A))
def proxy(E, off, W):
y = np.zeros(dx)
for h in range(H):
v = K @ y + np.tensordot(E, W[h: h + M], axes=([0, 2], [0, 1]))
y = dyn(y, v, W[h + M], h + M)
v = K @ y + np.tensordot(E, W[h: h + M], axes=([0, 2], [0, 1]))
c = cost(y, v, None)
return c
proxygrad = jit(grad(proxy, argnums=(0, 1)))
def gpc_u(x, z, t):
if z is None or t == 0:
z = np.zeros(dx), np.zeros(du), np.zeros((H + M, dx)), np.zeros((M, du, dx)), np.zeros(du)
xprev, uprev, W, E, off = z
W = jax.ops.index_update(W, 0, x - A @ xprev - B @ uprev)
W = np.roll(W, -1, axis=0)
if t >= H + M:
Edelta, offdelta = proxygrad(E, off, W)
E -= lr * Edelta
off -= lr * offdelta
u = K @ x + np.tensordot(E, W[-M:], axes=([0, 2], [0, 1])) + off
return u, (x, u, W, E, off)
return gpc_u
def controllers(gamma, H, M, lr):
return {
"Zero Control": zero(),
"LQR / H2": h2(),
"Finite-horizon LQR / H2": h2nonstat(),
"Robust / Hinf Control": hinf(gamma=gamma),
"GPC": gpc(H=H, M=M, lr=lr),
}
```
## [Memory Online Trust Region](https://arxiv.org/abs/2012.06695) (**MOTR**) disturbances
This is an online learning approach to disturbance generation, akin to nonstochastic control but with the role of control and disturbance swapped.
```
# Author: Udaya Ghai ([email protected])
def motr(A=A, B=B, Q=Q, R=R, r_off=0.5, r_E= 1.0, T=T, H=3, M=3, lr=0.001, dyn=dyn, cost=cost):
dx, du = B.shape
def proxy(E, off, U, X):
x = X[0]
for h in range(H):
w = np.tensordot(E, U[h: h + M], axes=([0, 2], [0, 1])) + off
x = dyn(x, U[h + H], w, h+M)
return np.sum(x.T @ Q @ x)
proxygrad = jit(grad(proxy, argnums=(0, 1)))
proxyhess = jit(hessian(proxy))
def project(x, r):
norm_x = np.linalg.norm(x)
return x if norm_x < r else (r / norm_x) * x
def motr_w(x, u, z_w, t):
if z_w is None or t == 0:
z_w = np.zeros((H+M, du, 1)),np.zeros((H, dx, 1)), np.zeros((M, dx, du)), np.ones((dx, 1))
U, X, E, off = z_w
U = jax.ops.index_update(U, 0, u)
U = np.roll(U, -1, axis=0)
X = jax.ops.index_update(X, 0, np.reshape(x, (dx,1)))
X = np.roll(X, -1, axis=0)
if t >= H + M:
Edelta, offdelta = proxygrad(E, off, U, X)
E = project(E + lr*Edelta, r_E)
off = project(off + lr * offdelta, r_off)
w = np.tensordot(E, U[-M:], axes=([0, 2], [0, 1])) + off
return np.squeeze(w), (U, X, E, off)
return motr_w
#@title MOTR Pertrubation
#@markdown Environment Parameters
motr_offset_radius = 1 #@param {type:"slider", min:0, max:2, step:0.01}
motr_radius = 0.4 #@param {type:"slider", min:0, max:2, step:0.01}
motr_lookback = 5 #@param {type:"slider", min:1, max:20, step:1}
motr_memory = 5 #@param {type:"slider", min:1, max:20, step:1}
motr_gen = motr(r_off=motr_offset_radius, r_E=motr_radius, M=motr_memory, H=motr_lookback)
#@markdown Constant Pertrubation: Control parameters
hinf_log_gamma = 2 #@param {type:"slider", min:-2, max:5, step:0.01}
hinf_gamma = 10**(hinf_log_gamma)
gpc_lookback = 5 #@param {type:"slider", min:1, max:20, step:1}
gpc_memory = 5 #@param {type:"slider", min:1, max:20, step:1}
gpc_log_lr = -3 #@param {type:"slider", min:-5, max:0, step:0.01}
gpc_lr = 10**(gpc_log_lr)
Cs = controllers(hinf_gamma, gpc_lookback, gpc_memory, gpc_lr)
print("🧛 evaluating controllers")
traces = {Cstr: list(zip(*eval(C, motr_gen))) for Cstr, C in Cs.items()}
xss = valmap(lambda x: x[0], traces)
uss = valmap(lambda x: x[1], traces)
wss = valmap(lambda x: x[2], traces)
costss = valmap(lambda x: x[3], traces)
liveplot(costss, xss, wss, 250, 200, 4, 20, 10**5, 10**5)
```
|
github_jupyter
|
## Appendix (Application of the mutual fund theorem)
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import FinanceDataReader as fdr
import pandas as pd
ticker_list = ['069500']
df_list = [fdr.DataReader(ticker, '2015-01-01', '2016-12-31')['Change'] for ticker in ticker_list]
df = pd.concat(df_list, axis=1)
#df.columns = ['005930', '000660', '005935', '035420', '005380', '207940', '012330', '068270', '051910', '055550', '069500']
df.columns = ['KODEX200']
r = df.dropna()
rf = 0.0125
#df = df.resample('Y').agg(lambda x:x.mean()*252)
# Calculate basic summary statistics for individual stocks
stock_volatility = r.std() * np.sqrt(252)
stock_return = r.mean() * 252
alpha = stock_return.values
sigma = stock_volatility.values
# cov_inv = np.linalg.inv(cov)
# temp = np.dot(cov_inv, (stock_return- rf))
# theta_opt = temp / temp.sum() # optimal weight in Risky Mutual fund
# alpha = np.dot(theta_opt, stock_return) # 0.5941
# sigma = np.sqrt(cov.dot(theta_opt).dot(theta_opt))
```
## (5B), (7B)
```
# g_B = 0 # in case of age over retirement (Second scenario in Problem(B))
X0 = 150. # Saving account at the beginning
l = 3
t = 45 # age in case of age over retirement (Second scenario in Problem(B))
gamma = -3. # risk averse measure
phi = rf + (alpha -rf)**2 / (2 * sigma**2 * (1-gamma)) # temporal function for f_B
rho = 0.04 # impatience factor for utility function
beta = 4.59364 # parameter for mu
delta = 0.05032 # parameter for mu
rf=0.02
def f_B(t):
if t < 65:
ds = 0.01
T = 65
T_tilde = 110
value = 0
for s in np.arange(T, T_tilde, ds):
w_s = np.exp(-rho*s/(1-gamma))
tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))
value += np.exp(-1/(1-gamma)*(tmp - gamma*tmp - gamma*phi *(s-t))) * w_s * ds
f = np.exp(-1/(1-gamma) *(tmp - gamma*tmp + gamma*phi*(T-t))) * value
return f
else: # 65~
ds = 0.01
T_tilde = 110
value = 0
for s in np.arange(t, T_tilde, ds):
w_s = np.exp(-rho*s/(1-gamma))
tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))
value += np.exp(-1/(1-gamma)*(tmp - gamma*tmp - gamma*phi *(s-t))) * w_s * ds
return value
# def f_B(t):
# ds = 0.01
# T_tilde = 110
# value = 0
# for s in np.arange(t, T_tilde, ds):
# w_s = np.exp(-rho*s/(1-gamma))
# tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))
# value += np.exp(- tmp + gamma/(1-gamma) * phi *(s-t)) * w_s * ds
# return value
# def V_B(t, x):
# f_b = f_B(t)
# value_fcn = 1/gamma * f_b **(1-gamma) * x **gamma
# return value_fcn
def C_star(t,X):
w_t = np.exp(-rho*t/(1-gamma))
f_b = f_B(t)
c_t = w_t/f_b * X
return c_t
def g_B(t, l):
ds=0.01
value = 0.
T=65 # retirement
if t < T:
for s in np.arange(t, T, ds):
tmp = (10**(beta + delta*s - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))
value += np.exp(-tmp)*l * ds
return value
else:
return 0.
pi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (X0 + g_B(t, l))/X0 # Optimal weight for Risky Asset (7B)
print(pi_opt) # 0.25
# print(C_star(t, X))
```
## Simulation
```
import time
start = time.time()
dt = 1
def mu(t): # Mortality rate in next year
value = (10**(beta + delta*(t+dt) - 10)- 10**(beta + delta*t - 10))/(delta * np.log(10))
return value
n_simulation = 10000
Asset = np.empty(37)
Asset_stack = []
C_stack = []
for i in range(n_simulation):
Asset[0] = 150 # initial wealth
C_list = []
for t in range(45, 81):
if t < 65: # before retirement
l_t = 3 # payment to pension fund
pi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (Asset[t-45] + g_B(t, l_t))/Asset[t-45]
C_t = 0 #
Z = np.random.randn()
Asset[t-45+1] = Asset[t-45]*np.exp(((1-pi_opt)*rf + pi_opt*alpha + mu(t)+ l_t/Asset[t-45] \
-pi_opt**2 * sigma**2/2)*dt + pi_opt * sigma * np.sqrt(dt) * Z)
else : # after retirement
l_t = 0 # payment duty is 0 after retirement
pi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (Asset[t-45] + g_B(t, l_t))/Asset[t-45]
C_t = C_star(t=t, X = Asset[t-45])
Z = np.random.randn()
Asset[t-45+1] = Asset[t-45]*np.exp(((1-pi_opt)*rf + pi_opt*alpha + mu(t)- C_t/Asset[t-45] \
-pi_opt**2 * sigma**2/2)*dt + pi_opt * sigma * np.sqrt(dt) * Z)
C_list.append(C_t)
Asset_stack.append(list(Asset))
C_stack.append(C_list)
end = time.time()
print(end - start)
```
## Check the Simulation Result
```
Asset_mean = np.mean(Asset_stack, axis=0) #(37,)
C_mean = np.mean(C_stack, axis=0) # (16,1)
plt.rcParams['figure.figsize'] = [30, 15]
plt.rcParams.update({'font.size': 30})
plt.title('Retirement planning')
plt.xlabel('Age')
plt.ylabel('Won(1000000)')
plt.plot(range(45,81),Asset_mean[:-1], label='Wealth')
plt.plot(range(65,81),C_mean, '--', color = 'r', label="Pension")
plt.legend()
plt.grid()
pi_opt_list=[]
for t in range(45, 81):
if t < 65:
l_t = 3
else :
l_t = 0
pi_opt = (alpha-rf)/(sigma**2 *(1-gamma)) * (Asset_mean[:-1][t-45] + g_B(t, l_t))/Asset_mean[:-1][t-45]
pi_opt_list.append(pi_opt)
plt.title('Optimal weight of risky-asset changing following ages')
plt.xlabel('Age')
plt.ylabel('Weight')
plt.bar(range(45,81),np.array(pi_opt_list).squeeze())
```
|
github_jupyter
|
## 前言
本文主要讨论如何把pandas移植到spark, 他们的dataframe共有一些特性如操作方法和模式。pandas的灵活性比spark强, 但是经过一些改动spark基本上能完成相同的工作。
同时又兼具了扩展性的优势,当然他们的语法和用法稍稍有些不同。
## 主要不同处:
### 分布式处理
pandas只能单机处理, 把dataframe放进内存计算。spark是集群分布式地,可以处理的数据可以大大超出集群的内存数。
### 懒执行
spark不执行任何`transformation`直到需要运行`action`方法,`action`一般是存储或者展示数据的操作。这种将`transformation`延后的做法可以让spark调度知道所有的执行情况,用于优化执行顺序和读取需要的数据。
懒执行也是scala的特性之一。通常,在pandas我们总是和数据打交道, 而在spark,我们总是在改变产生数据的执行计划。
### 数据不可变
scala的函数式编程通常倾向使用不可变对象, 每一个spark transformation会返回一个新的dataframe(除了一些meta info会改变)
### 没有索引
spark是没有索引概念的.
### 单条数据索引不方便
pandas可以快速使用索引找到数据,spark没有这个功能,因为在spark主要操作的是执行计划来展示数据, 而不是数据本身。
### spark sql
因为有了SQL功能的支持, spark更接近关系型数据库。
## pandas和pyspark使用的一些例子
```
import pandas as pd
import pyspark.sql
import pyspark.sql.functions as sf
from pyspark.sql import SparkSession
```
### Projections
pandas的投影可以直接通过[]操作
```
person_pd = pd.read_csv('data/persons.csv')
person_pd[["name", "sex", "age"]]
```
pyspark也可以直接`[]`来选取投影, 但是这是一个语法糖, 实际是用了`select`方法
```
spark = SparkSession.builder \
.master("local[*]") \
.config("spark.driver.memory","6G") \
.getOrCreate()
#person_pd[['age','name']]
person_sp = spark.read.option("inferSchema", True) \
.option("header", True) \
.csv('data/persons.csv')
person_sp.show()
person_sp[['age', 'name']].show()
```
### 简单transformation
spark的`dataframe.select`实际上接受任何column对象, 一个column对象概念上是dataframe的一列。一列可以是dataframe的一列输入,也可以是一个计算结果或者多个列的transformation结果。 以改变一列为大写为例:
```
ret = pd.DataFrame(person_pd['name'].apply(lambda x: x.upper()))
ret
result = person_sp.select(
sf.upper(person_sp.name)
)
result.show()
```
### 给dataframe增加一列
pandas给dataframe增加一列很方便,直接给df赋值就行了。spark需要使用`withColumn`函数。
```
def create_salutation(row):
sex = row[0]
name = row[1]
if sex == 'male':
return 'Mr '+name
else:
return "Mrs "+name
result = person_pd.copy()
result['salutation'] = result[['sex','name']].apply(create_salutation, axis=1, result_type='expand')
result
result = person_sp.withColumn(
"salutation",
sf.concat(sf.when(person_sp.sex == 'male', "Mr ").otherwise("Mrs "), person_sp.name)
)
result.show()
```
### 过滤
```
result = person_pd[person_pd['age'] > 20]
result
```
spark支持三种过滤写法
```
person_sp.filter(person_sp['age'] > 20).show()
person_sp[person_sp['age'] > 20].show()
person_sp.filter('age > 20').show()
```
### 分组和聚合
类似sql中的`select aggregation Group by grouping`语句功能,pandas和spark都定义了一些聚合函数,如:
- count
- sum
- avg
- corr
- first
- last
可以具体查看[PySpark Function Documentation](http://spark.apache.org/docs/latest/api/python/pyspark.sql.html#module-pyspark.sql.functions)
```
result = person_pd.groupby('sex').agg({'age': 'mean', 'height':['min', 'max']})
result
from pyspark.sql.functions import avg, min, max
result = person_sp.groupBy(person_sp.sex).agg(min(person_sp.height).alias('min height'), max(person_sp.height).alias('max height'),
avg(person_sp.age))
result.show()
person_sp.show()
```
### join
spark也支持跨dataframe做join, 让我们加个数据作例子。
```
addresses = spark.read.json('data/addresses.json')
addresses_pd = addresses.toPandas()
addresses_pd
pd_join = person_pd.merge(addresses_pd, left_on=['name'], right_on=['name'])
pd_join
sp_join = person_sp.join(addresses, person_sp.name==addresses.name)
sp_join.show()
sp_join_1 = person_sp.join(addresses, on=['name'])
sp_join_1.show()
```
### 重装dataframe
pandas可以很方便地将现有的一列数据赋给一个新的列, 但是spark做起来不是很方便,需要join操作。
```
df = person_pd[['name', 'age']]
col = person_pd['height']
result = df.copy()
result['h2'] = col
result
df = person_sp[['name', 'age']]
col = person_sp[['name', 'height']]
result = df.join(col, on=['name'])
result.show()
```
|
github_jupyter
|
# Putting the "Re" in Reformer: Ungraded Lab
This ungraded lab will explore Reversible Residual Networks. You will use these networks in this week's assignment that utilizes the Reformer model. It is based on on the Transformer model you already know, but with two unique features.
* Locality Sensitive Hashing (LSH) Attention to reduce the compute cost of the dot product attention and
* Reversible Residual Networks (RevNets) organization to reduce the storage requirements when doing backpropagation in training.
In this ungraded lab we'll start with a quick review of Residual Networks and their implementation in Trax. Then we will discuss the Revnet architecture and its use in Reformer.
## Outline
- [Part 1: Residual Networks](#1)
- [1.1 Branch](#1.1)
- [1.2 Residual Model](#1.2)
- [Part 2: Reversible Residual Networks](#2)
- [2.1 Trax Reversible Layers](#2.1)
- [2.2 Residual Model](#2.2)
```
import trax
from trax import layers as tl # core building block
import numpy as np # regular ol' numpy
from trax.models.reformer.reformer import (
ReversibleHalfResidualV2 as ReversibleHalfResidual,
) # unique spot
from trax import fastmath # uses jax, offers numpy on steroids
from trax import shapes # data signatures: dimensionality and type
from trax.fastmath import numpy as jnp # For use in defining new layer types.
from trax.shapes import ShapeDtype
from trax.shapes import signature
```
## Part 1.0 Residual Networks
[Deep Residual Networks ](https://arxiv.org/abs/1512.03385) (Resnets) were introduced to improve convergence in deep networks. Residual Networks introduce a shortcut connection around one or more layers in a deep network as shown in the diagram below from the original paper.
<center><img src = "Revnet7.PNG" height="250" width="250"></center>
<center><b>Figure 1: Residual Network diagram from original paper</b></center>
The [Trax documentation](https://trax-ml.readthedocs.io/en/latest/notebooks/layers_intro.html#2.-Inputs-and-Outputs) describes an implementation of Resnets using `branch`. We'll explore that here by implementing a simple resnet built from simple function based layers. Specifically, we'll build a 4 layer network based on two functions, 'F' and 'G'.
<img src = "Revnet8.PNG" height="200" width="1400">
<center><b>Figure 2: 4 stage Residual network</b></center>
Don't worry about the lengthy equations. Those are simply there to be referenced later in the notebook.
<a name="1.1"></a>
### Part 1.1 Branch
Trax `branch` figures prominently in the residual network layer so we will first examine it. You can see from the figure above that we will need a function that will copy an input and send it down multiple paths. This is accomplished with a [branch layer](https://trax-ml.readthedocs.io/en/latest/trax.layers.html#module-trax.layers.combinators), one of the Trax 'combinators'. Branch is a combinator that applies a list of layers in parallel to copies of inputs. Lets try it out! First we will need some layers to play with. Let's build some from functions.
```
# simple function taking one input and one output
bl_add1 = tl.Fn("add1", lambda x0: (x0 + 1), n_out=1)
bl_add2 = tl.Fn("add2", lambda x0: (x0 + 2), n_out=1)
bl_add3 = tl.Fn("add3", lambda x0: (x0 + 3), n_out=1)
# try them out
x = np.array([1])
print(bl_add1(x), bl_add2(x), bl_add3(x))
# some information about our new layers
print(
"name:",
bl_add1.name,
"number of inputs:",
bl_add1.n_in,
"number of outputs:",
bl_add1.n_out,
)
bl_3add1s = tl.Branch(bl_add1, bl_add2, bl_add3)
bl_3add1s
```
Trax uses the concept of a 'stack' to transfer data between layers.
For Branch, for each of its layer arguments, it copies the `n_in` inputs from the stack and provides them to the layer, tracking the max_n_in, or the largest n_in required. It then pops the max_n_in elements from the stack.
<img src = "branch1.PNG" height="260" width="600">
<center><b>Figure 3: One in, one out Branch</b></center>
On output, each layer, in succession pushes its results onto the stack. Note that the push/pull operations impact the top of the stack. Elements that are not part of the operation (n, and m in the diagram) remain intact.
```
# n_in = 1, Each bl_addx pushes n_out = 1 elements onto the stack
bl_3add1s(x)
# n = np.array([10]); m = np.array([20]) # n, m will remain on the stack
n = "n"
m = "m" # n, m will remain on the stack
bl_3add1s([x, n, m])
```
Each layer in the input list copies as many inputs from the stack as it needs, and their outputs are successively combined on stack. Put another way, each element of the branch can have differing numbers of inputs and outputs. Let's try a more complex example.
```
bl_addab = tl.Fn(
"addab", lambda x0, x1: (x0 + x1), n_out=1
) # Trax figures out how many inputs there are
bl_rep3x = tl.Fn(
"add2x", lambda x0: (x0, x0, x0), n_out=3
) # but you have to tell it how many outputs there are
bl_3ops = tl.Branch(bl_add1, bl_addab, bl_rep3x)
```
In this case, the number if inputs being copied from the stack varies with the layer
<img src = "branch2.PNG" height="260" width="600">
<center><b>Figure 4: variable in, variable out Branch</b></center>
The stack when the operation is finished is 5 entries reflecting the total from each layer.
```
# Before Running this cell, what is the output you are expecting?
y = np.array([3])
bl_3ops([x, y, n, m])
```
Branch has a special feature to support Residual Network. If an argument is 'None', it will pull the top of stack and push it (at its location in the sequence) onto the output stack
<img src = "branch3.PNG" height="260" width="600">
<center><b>Figure 5: Branch for Residual</b></center>
```
bl_2ops = tl.Branch(bl_add1, None)
bl_2ops([x, n, m])
```
<a name="1.2"></a>
### Part 1.2 Residual Model
OK, your turn. Write a function 'MyResidual', that uses `tl.Branch` and `tl.Add` to build a residual layer. If you are curious about the Trax implementation, you can see the code [here](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.py).
```
def MyResidual(layer):
return tl.Serial(
### START CODE HERE ###
# tl.----,
# tl.----,
### END CODE HERE ###
)
# Lets Try it
mr = MyResidual(bl_add1)
x = np.array([1])
mr([x, n, m])
```
**Expected Result**
(array([3]), 'n', 'm')
Great! Now, let's build the 4 layer residual Network in Figure 2. You can use `MyResidual`, or if you prefer, the tl.Residual in Trax, or a combination!
```
Fl = tl.Fn("F", lambda x0: (2 * x0), n_out=1)
Gl = tl.Fn("G", lambda x0: (10 * x0), n_out=1)
x1 = np.array([1])
resfg = tl.Serial(
### START CODE HERE ###
# None, #Fl # x + F(x)
# None, #Gl # x + F(x) + G(x + F(x)) etc
# None, #Fl
# None, #Gl
### END CODE HERE ###
)
# Lets try it
resfg([x1, n, m])
```
**Expected Results**
(array([1089]), 'n', 'm')
<a name="2"></a>
## Part 2.0 Reversible Residual Networks
The Reformer utilized RevNets to reduce the storage requirements for performing backpropagation.
<img src = "Reversible2.PNG" height="260" width="600">
<center><b>Figure 6: Reversible Residual Networks </b></center>
The standard approach on the left above requires one to store the outputs of each stage for use during backprop. By using the organization to the right, one need only store the outputs of the last stage, y1, y2 in the diagram. Using those values and running the algorithm in reverse, one can reproduce the values required for backprop. This trades additional computation for memory space which is at a premium with the current generation of GPU's/TPU's.
One thing to note is that the forward functions produced by two networks are similar, but they are not equivalent. Note for example the asymmetry in the output equations after two stages of operation.
<img src = "Revnet1.PNG" height="340" width="1100">
<center><b>Figure 7: 'Normal' Residual network (Top) vs REversible Residual Network </b></center>
### Part 2.1 Trax Reversible Layers
Let's take a look at how this is used in the Reformer.
```
refm = trax.models.reformer.ReformerLM(
vocab_size=33000, n_layers=2, mode="train" # Add more options.
)
refm
```
Eliminating some of the detail, we can see the structure of the network.
<img src = "Revnet2.PNG" height="300" width="350">
<center><b>Figure 8: Key Structure of Reformer Reversible Network Layers in Trax </b></center>
We'll review the Trax layers used to implement the Reversible section of the Reformer. First we can note that not all of the reformer is reversible. Only the section in the ReversibleSerial layer is reversible. In a large Reformer model, that section is repeated many times making up the majority of the model.
<img src = "Revnet3.PNG" height="650" width="1600">
<center><b>Figure 9: Functional Diagram of Trax elements in Reformer </b></center>
The implementation starts by duplicating the input to allow the two paths that are part of the reversible residual organization with [Dup](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/combinators.py#L666). Note that this is accomplished by copying the top of stack and pushing two copies of it onto the stack. This then feeds into the ReversibleHalfResidual layer which we'll review in more detail below. This is followed by [ReversibleSwap](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.py#L83). As the name implies, this performs a swap, in this case, the two topmost entries in the stack. This pattern is repeated until we reach the end of the ReversibleSerial section. At that point, the topmost 2 entries of the stack represent the two paths through the network. These are concatenated and pushed onto the stack. The result is an entry that is twice the size of the non-reversible version.
Let's look more closely at the [ReversibleHalfResidual](https://github.com/google/trax/blob/190ec6c3d941d8a9f30422f27ef0c95dc16d2ab1/trax/layers/reversible.py#L154). This layer is responsible for executing the layer or layers provided as arguments and adding the output of those layers, the 'residual', to the top of the stack. Below is the 'forward' routine which implements this.
<img src = "Revnet4.PNG" height="650" width="1600">
<center><b>Figure 10: ReversibleHalfResidual code and diagram </b></center>
Unlike the previous residual function, the value that is added is from the second path rather than the input to the set of sublayers in this layer. Note that the Layers called by the ReversibleHalfResidual forward function are not modified to support reverse functionality. This layer provides them a 'normal' view of the stack and takes care of reverse operation.
Let's try out some of these layers! We'll start with the ones that just operate on the stack, Dup() and Swap().
```
x1 = np.array([1])
x2 = np.array([5])
# Dup() duplicates the Top of Stack and returns the stack
dl = tl.Dup()
dl(x1)
# ReversibleSwap() duplicates the Top of Stack and returns the stack
sl = tl.ReversibleSwap()
sl([x1, x2])
```
You are no doubt wondering "How is ReversibleSwap different from Swap?". Good question! Lets look:
<img src = "Revnet5.PNG" height="389" width="1000">
<center><b>Figure 11: Two versions of Swap() </b></center>
The ReverseXYZ functions include a "reverse" compliment to their "forward" function that provides the functionality to run in reverse when doing backpropagation. It can also be run in reverse by simply calling 'reverse'.
```
# Demonstrate reverse swap
print(x1, x2, sl.reverse([x1, x2]))
```
Let's try ReversibleHalfResidual, First we'll need some layers..
```
Fl = tl.Fn("F", lambda x0: (2 * x0), n_out=1)
Gl = tl.Fn("G", lambda x0: (10 * x0), n_out=1)
```
Just a note about ReversibleHalfResidual. As this is written, it resides in the Reformer model and is a layer. It is invoked a bit differently that other layers. Rather than tl.XYZ, it is just ReversibleHalfResidual(layers..) as shown below. This may change in the future.
```
half_res_F = ReversibleHalfResidual(Fl)
print(type(half_res_F), "\n", half_res_F)
half_res_F([x1, x1]) # this is going to produce an error - why?
# we have to initialize the ReversibleHalfResidual layer to let it know what the input is going to look like
half_res_F.init(shapes.signature([x1, x1]))
half_res_F([x1, x1])
```
Notice the output: (DeviceArray([3], dtype=int32), array([1])). The first value, (DeviceArray([3], dtype=int32) is the output of the "Fl" layer and has been converted to a 'Jax' DeviceArray. The second array([1]) is just passed through (recall the diagram of ReversibleHalfResidual above).
The final layer we need is the ReversibleSerial Layer. This is the reversible equivalent of the Serial layer and is used in the same manner to build a sequence of layers.
<a name="2.2"></a>
### Part 2.2 Build a reversible model
We now have all the layers we need to build the model shown below. Let's build it in two parts. First we'll build 'blk' and then a list of blk's. And then 'mod'.
<center><img src = "Revnet6.PNG" height="800" width="1600"> </center>
<center><b>Figure 12: Reversible Model we will build using Trax components </b></center>
```
blk = [ # a list of the 4 layers shown above
### START CODE HERE ###
None,
None,
None,
None,
]
blks = [None, None]
### END CODE HERE ###
mod = tl.Serial(
### START CODE HERE ###
None,
None,
None,
### END CODE HERE ###
)
mod
```
**Expected Output**
```
Serial[
Dup_out2
ReversibleSerial_in2_out2[
ReversibleHalfResidualV2_in2_out2[
Serial[
F
]
]
ReversibleSwap_in2_out2
ReversibleHalfResidualV2_in2_out2[
Serial[
G
]
]
ReversibleSwap_in2_out2
ReversibleHalfResidualV2_in2_out2[
Serial[
F
]
]
ReversibleSwap_in2_out2
ReversibleHalfResidualV2_in2_out2[
Serial[
G
]
]
ReversibleSwap_in2_out2
]
Concatenate_in2
]
```
```
mod.init(shapes.signature(x1))
out = mod(x1)
out
```
**Expected Result**
DeviceArray([ 65, 681], dtype=int32)
OK, now you have had a chance to try all the 'Reversible' functions in Trax. On to the Assignment!
|
github_jupyter
|
The visualization used for this homework is based on Alexandr Verinov's code.
# Generative models
In this homework we will try several criterions for learning an implicit model. Almost everything is written for you, and you only need to implement the objective for the game and play around with the model.
**0)** Read the code
**1)** Implement objective for a vanilla [Generative Adversarial Networks](https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf) (GAN). The hyperparameters are already set in the code. The model will converge if you implement the objective (1) right.
**2)** Note the discussion in the paper, that the objective for $G$ can be of two kinds: $min_G log(1 - D)$ and $min_G - log(D)$. Implement the second objective and ensure model converges. Most likely, in this example you will not notice the difference, but people usually use the second objective, it really matters in more complicated scenarios.
**3 & 4)** Implement [Wasserstein GAN](https://arxiv.org/abs/1701.07875) ([WGAN](https://arxiv.org/abs/1704.00028)) and WGAN-GP. To make the discriminator have Lipschitz property you need to clip discriminator's weights to $[-0.01, 0.01]$ range (WGAN) or use gradient penalty (WGAN-GP). You will need to make few modifications to the code: 1) remove sigmoids from discriminator 2) add weight clipping clipping / gradient penaly. 3) change objective. See [implementation 1](https://github.com/martinarjovsky/WassersteinGAN/) / [implementation 2](https://github.com/caogang/wgan-gp). They also use different optimizer. The default hyperparameters may not work, spend time to tune them.
**5) Bonus: same thing without GANs** Implement maximum mean discrepancy estimator (MMD). MMD is discrepancy measure between distributions. In our case we use it to calculate discrepancy between real and fake data. You need to implement RBF kernel $k(x,x')=\exp \left(-{\frac {1}{2\sigma ^{2}}}||x-x'||^{2}\right)$ and an MMD estimator (see eq.8 from https://arxiv.org/pdf/1505.03906.pdf). MMD is then used instead of discriminator.
```
"""
Please, implement everything in one notebook, using if statements to switch between the tasks
"""
TASK = 1 # 2, 3, 4, 5
```
# Imports
```
import numpy as np
import time
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(12345)
lims=(-5, 5)
```
# Define sampler from real data and Z
```
from scipy.stats import rv_discrete
MEANS = np.array(
[[-1,-3],
[1,3],
[-2,0],
])
COVS = np.array(
[[[1,0.8],[0.8,1]],
[[1,-0.5],[-0.5,1]],
[[1,0],[0,1]],
])
PROBS = np.array([
0.2,
0.5,
0.3
])
assert len(MEANS) == len(COVS) == len(PROBS), "number of components mismatch"
COMPONENTS = len(MEANS)
comps_dist = rv_discrete(values=(range(COMPONENTS), PROBS))
def sample_true(N):
comps = comps_dist.rvs(size=N)
conds = np.arange(COMPONENTS)[:,None] == comps[None,:]
arr = np.array([np.random.multivariate_normal(MEANS[c], COVS[c], size=N)
for c in range(COMPONENTS)])
return np.select(conds[:,:,None], arr).astype(np.float32)
NOISE_DIM = 20
def sample_noise(N):
return np.random.normal(size=(N,NOISE_DIM)).astype(np.float32)
```
# Visualization functions
```
def vis_data(data):
"""
Visualizes data as histogram
"""
hist = np.histogram2d(data[:, 1], data[:, 0], bins=100, range=[lims, lims])
plt.pcolormesh(hist[1], hist[2], hist[0], alpha=0.5)
fixed_noise = sample_noise(1000)
def vis_g():
"""
Visualizes generator's samples as circles
"""
data = generator(Variable(torch.Tensor(fixed_noise))).data.numpy()
if np.isnan(data).any():
return
plt.scatter(data[:,0], data[:,1], alpha=0.2, c='b')
plt.xlim(lims)
plt.ylim(lims)
def vis_d():
"""
Visualizes discriminator's gradient on grid
"""
X, Y = np.meshgrid(np.linspace(lims[0], lims[1], 30), np.linspace(lims[0], lims[1], 30))
X = X.flatten()
Y = Y.flatten()
grid = Variable(torch.Tensor(np.vstack([X, Y]).T), requires_grad=True)
data_gen = generator(Variable(torch.Tensor(fixed_noise)))
loss = d_loss(discriminator(data_gen), discriminator(grid))
loss.backward()
grads = - grid.grad.data.numpy()
plt.quiver(X, Y, grads[:, 0], grads[:, 1], color='black',alpha=0.9)
```
# Define architectures
After you've passed task 1 you can play with architectures.
#### Generator
```
class Generator(nn.Module):
def __init__(self, noise_dim, out_dim, hidden_dim=100):
super(Generator, self).__init__()
self.fc1 = nn.Linear(noise_dim, hidden_dim)
nn.init.xavier_normal(self.fc1.weight)
nn.init.constant(self.fc1.bias, 0.0)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
nn.init.xavier_normal(self.fc2.weight)
nn.init.constant(self.fc2.bias, 0.0)
self.fc3 = nn.Linear(hidden_dim, out_dim)
nn.init.xavier_normal(self.fc3.weight)
nn.init.constant(self.fc3.bias, 0.0)
def forward(self, z):
"""
Generator takes a vector of noise and produces sample
"""
h1 = F.tanh(self.fc1(z))
h2 = F.leaky_relu(self.fc2(h1))
y_gen = self.fc3(h2)
return y_gen
```
#### Discriminator
```
class Discriminator(nn.Module):
def __init__(self, in_dim, hidden_dim=100):
super(Discriminator, self).__init__()
self.fc1 = nn.Linear(in_dim, hidden_dim)
nn.init.xavier_normal(self.fc1.weight)
nn.init.constant(self.fc1.bias, 0.0)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
nn.init.xavier_normal(self.fc2.weight)
nn.init.constant(self.fc2.bias, 0.0)
self.fc3 = nn.Linear(hidden_dim, hidden_dim)
nn.init.xavier_normal(self.fc3.weight)
nn.init.constant(self.fc3.bias, 0.0)
self.fc4 = nn.Linear(hidden_dim, 1)
nn.init.xavier_normal(self.fc4.weight)
nn.init.constant(self.fc4.bias, 0.0)
def forward(self, x):
h1 = F.tanh(self.fc1(x))
h2 = F.leaky_relu(self.fc2(h1))
h3 = F.leaky_relu(self.fc3(h2))
score = F.sigmoid(self.fc4(h3))
return score
```
# Define updates and losses
```
generator = Generator(NOISE_DIM, out_dim = 2)
discriminator = Discriminator(in_dim = 2)
lr = 0.001
g_optimizer = optim.Adam(generator.parameters(), lr=lr, betas=(0.5, 0.999))
d_optimizer = optim.Adam(discriminator.parameters(), lr=lr, betas=(0.5, 0.999))
```
Notice we are using ADAM optimizer with `beta1=0.5` for both discriminator and discriminator. This is a common practice and works well. Motivation: models should be flexible and adapt itself rapidly to the distributions.
You can try different optimizers and parameters.
```
################################
# IMPLEMENT HERE
# Define the g_loss and d_loss here
# these are the only lines of code you need to change to implement GAN game
def g_loss():
# if TASK == 1:
# do something
return # TODO
def d_loss():
# if TASK == 1:
# do something
return # TODO
################################
```
# Get real data
```
data = sample_true(100000)
def iterate_minibatches(X, batchsize, y=None):
perm = np.random.permutation(X.shape[0])
for start in range(0, X.shape[0], batchsize):
end = min(start + batchsize, X.shape[0])
if y is None:
yield X[perm[start:end]]
else:
yield X[perm[start:end]], y[perm[start:end]]
plt.rcParams['figure.figsize'] = (12, 12)
vis_data(data)
vis_g()
vis_d()
```
**Legend**:
- Blue dots are generated samples.
- Colored histogram at the back shows density of real data.
- And with arrows we show gradients of the discriminator -- they are the directions that discriminator pushes generator's samples.
# Train the model
```
from IPython import display
plt.xlim(lims)
plt.ylim(lims)
num_epochs = 100
batch_size = 64
# ===========================
# IMPORTANT PARAMETER:
# Number of D updates per G update
# ===========================
k_d, k_g = 4, 1
accs = []
try:
for epoch in range(num_epochs):
for input_data in iterate_minibatches(data, batch_size):
# Optimize D
for _ in range(k_d):
# Sample noise
noise = Variable(torch.Tensor(sample_noise(len(input_data))))
# Do an update
inp_data = Variable(torch.Tensor(input_data))
data_gen = generator(noise)
loss = d_loss(discriminator(data_gen), discriminator(inp_data))
d_optimizer.zero_grad()
loss.backward()
d_optimizer.step()
# Optimize G
for _ in range(k_g):
# Sample noise
noise = Variable(torch.Tensor(sample_noise(len(input_data))))
# Do an update
data_gen = generator(noise)
loss = g_loss(discriminator(data_gen))
g_optimizer.zero_grad()
loss.backward()
g_optimizer.step()
# Visualize
plt.clf()
vis_data(data); vis_g(); vis_d()
display.clear_output(wait=True)
display.display(plt.gcf())
except KeyboardInterrupt:
pass
```
# Describe your findings here
A ya tomat.
|
github_jupyter
|
```
import sys
import os
import h5py
import json
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
from stimuli_f0_labels import get_f0_bins, f0_to_label
fn = '/om4/group/mcdermott/user/msaddler/pitchnet_dataset/pitchnetDataset/assets/data/processed/dataset_2019-11-22-2300/PND_sr32000_v08.hdf5'
# fn = '/om4/group/mcdermott/user/msaddler/pitchnet_dataset/pitchnetDataset/assets/data/processed/dataset_2019-11-16-2300/PND_sr32000_v07.hdf5'
# fn = '/om4/group/mcdermott/user/msaddler/pitchnet_dataset/pitchnetDataset/assets/data/processed/dataset_2019-08-16-1200/PND_sr32000_v04.hdf5'
f = h5py.File(fn, 'r')
for v in f.values():
print(v)
for v in f['config'].values():
print(v)
file_indexes = f['source_file_index'][:]
segment_indexes = f['source_file_row'][:]
f0_values = f['nopad_f0_mean'][:]
source_file_encoding_dict = f['config/source_file_encoding_dict'][0]
source_file_encoding_dict = source_file_encoding_dict.replace('"', '"""')
source_file_encoding_dict = source_file_encoding_dict.replace('\'', '"')
source_file_encoding_dict = json.loads(source_file_encoding_dict)
f.close()
file_index_to_filename_map = {}
for key in source_file_encoding_dict.keys():
file_index_to_filename_map[source_file_encoding_dict[key]] = os.path.basename(key)
f0_bins = get_f0_bins()
dataset_separated_histograms = {}
dataset_separated_unique_segments = {}
dataset_separated_total_segments = {}
for file_index in np.unique(file_indexes):
f0_values_from_file_idx = f0_values[file_indexes == file_index]
segment_indexes_from_file_idx = segment_indexes[file_indexes == file_index]
counts, bins = np.histogram(f0_values_from_file_idx, bins=f0_bins)
dset_key = file_index_to_filename_map[file_index]
dataset_separated_histograms[dset_key] = counts
dataset_separated_unique_segments[dset_key] = np.unique(segment_indexes_from_file_idx).shape[0]
dataset_separated_total_segments[dset_key] = segment_indexes_from_file_idx.shape[0]
dataset_details = {
'RWC': {
'key': 'f0_segments_2019AUG16_rwc.hdf5',
'plot_kwargs': {'color': [0, 0.8, 0]},
},
'NSYNTH': {
'key': 'f0_segments_2019AUG16_nsynth.hdf5',
'plot_kwargs': {'color': [0, 0.6, 0]}
},
'WSJ': {
'key': 'f0_segments_2019AUG16_wsj.hdf5',
'plot_kwargs': {'color': [0.6, 0.6, 0.6]}
},
'SWC': {
'key': 'f0_segments_2019AUG16_swc.hdf5',
'plot_kwargs': {'color': [0.4, 0.4, 0.4]}
},
'CSLUKIDS': {
'key': 'f0_segments_2019NOV16_cslu_kids.hdf5',
'plot_kwargs': {'color': [0.2, 0.2, 0.2]}
},
'CMUKIDS': {
'key': 'f0_segments_2019NOV22_cmu_kids.hdf5',
'plot_kwargs': {'color': [0.8, 0.8, 0.8]}
},
}
dataset_list = ['RWC', 'NSYNTH', 'WSJ', 'SWC', 'CSLUKIDS', 'CMUKIDS']
dataset_list.reverse()
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 6))
x = np.arange(0, len(counts))
bottom = np.zeros_like(x)
for dataset in dataset_list:
key = dataset_details[dataset]['key']
if key in dataset_separated_histograms.keys():
y = dataset_separated_histograms[key]
plot_kwargs = dataset_details[dataset]['plot_kwargs']
label = '{:s} ({} total; {} unique; {:.1f} mean repeats)'.format(
dataset,
dataset_separated_total_segments[key],
dataset_separated_unique_segments[key],
dataset_separated_total_segments[key] / dataset_separated_unique_segments[key])
ax.fill_between(x, y1=bottom, y2=bottom+y, **plot_kwargs, lw=0, label=label)
bottom = bottom + y
else:
print(key)
ax.legend(loc='upper right', framealpha=1, facecolor='w', edgecolor='w', fontsize=10)
ax.set_xlim([x[0], x[-1]])
ax.set_ylim([0, np.max(bottom)])
ax.set_ylabel('Number of stimuli')
ax.set_xlabel('F0 bin (Hz)')
class_indexes = np.linspace(x[0], x[-1], 15, dtype=int)
f0_class_labels = ['{:.0f}'.format(_) for _ in f0_bins[class_indexes]]
ax.set_xticks(class_indexes)
ax.set_xticklabels(f0_class_labels)
plt.tight_layout()
plt.show()
# save_dir = '/om2/user/msaddler/pitchnet/assets_psychophysics/figures/archive_2019_12_05_PNDv08_archSearch01/'
# save_fn = '2019NOV27_PND_v08_dataset_composition.pdf'
# print(os.path.join(save_dir, save_fn))
# fig.savefig(os.path.join(save_dir, save_fn), bbox_inches='tight')
# Check how many bins are spanned by speech-only and music-only datasets
f0_bin_min=80
f0_bin_max=1e3
f0_min=80
f0_max=450.91752190019395
binwidth_in_octaves=1/192
f0_values = np.arange(f0_min, f0_max+1e-2, 1e-2)
f0_bins = get_f0_bins(f0_min=f0_bin_min, f0_max=f0_bin_max, binwidth_in_octaves=binwidth_in_octaves)
f0_labels = f0_to_label(f0_values, f0_bins, right=False)
# Slightly hacky way to determine the correct value of f0_max to ensure all bins are equally wide
f0_min_label = np.squeeze(np.argwhere(f0_bins >= f0_min))[0]
f0_max_label = np.squeeze(np.argwhere(f0_bins < f0_max))[-1] + 1
f0_min_label, f0_max_label
import sys
import os
import h5py
import glob
import numpy as np
import scipy.signal
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/PND_sr32000_v08_*.hdf5'
list_fn = sorted(glob.glob(regex_fn))
fn = list_fn[-1]
with h5py.File(fn, 'r') as f:
sr = f['sr'][0]
IDX = np.random.randint(0, f['nopad_f0_mean'].shape[0])
f0 = f['nopad_f0_mean'][IDX]
y_fg = util_stimuli.set_dBSPL(f['stimuli/signal'][IDX], 60.0)
y_bg = util_stimuli.set_dBSPL(f['stimuli/noise'][IDX], 60.0)
fxx, pxx = util_stimuli.power_spectrum(y_fg, sr)
fenv, penv = util_stimuli.get_spectral_envelope_lp(y_fg, sr, M=6)
penv = penv - penv.max() + pxx.max()
print(util_stimuli.get_dBSPL(y_fg))
print(util_stimuli.get_dBSPL(y_bg))
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 2.5))
ax.plot(fxx, pxx, color='k', lw=1.0)
ax.plot(fenv, penv, color='r', lw=1.0)
ax = util_figures.format_axes(ax,
xscale='linear',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB SPL)',
xlimits=[40, sr/2],
ylimits=None,
spines_to_hide=['right', 'top'])
plt.show()
ipd.display(ipd.Audio(y_fg, rate=sr))
y = y_fg
t = np.arange(0, len(y)) / sr
x = np.zeros_like(y)
for f in np.arange(f0, sr/2, f0):
x = x + np.sin(2*np.pi*f*t)
# x = np.random.randn(*t.shape)
b_lp, a_lp = util_stimuli.get_spectral_envelope_lp_coefficients(y, M=6)
x = scipy.signal.lfilter(b_lp, a_lp, x)
x = util_stimuli.set_dBSPL(x, 60.0)
fyy, pyy = util_stimuli.power_spectrum(y, sr)
fxx, pxx = util_stimuli.power_spectrum(x, sr)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 2.5))
ax.plot(fyy, pyy, color='k', lw=1.0)
ax.plot(fxx, pxx, color='r', lw=1.0)
ax = util_figures.format_axes(ax,
xscale='linear',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB SPL)',
xlimits=[40, sr/2],
ylimits=None,
spines_to_hide=['right', 'top'])
plt.show()
plt.figure(figsize=(8, 1.5))
plt.plot(t, y, color='k')
plt.plot(t, x, color='r')
plt.show()
ipd.display(ipd.Audio(y, rate=sr))
ipd.display(ipd.Audio(x, rate=sr))
import sys
import os
import h5py
import json
import glob
import numpy as np
import scipy.signal
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
# regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10_filter_signalLPv01/SPECTRAL_STATISTICS_v00/*.hdf5'
# regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/SPECTRAL_STATISTICS_v00/*.hdf5'
# regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_mfcc/matchedPNDv08_snr_neg10pos10_phase0/SPECTRAL_STATISTICS_v00/*.hdf5'
regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_mfcc/PNDv08negated12_TLASmatched12_snr_neg10pos10_phase3/SPECTRAL_STATISTICS_v00/*.hdf5'
list_fn = sorted(glob.glob(regex_fn))
list_key = ['stimuli/signal', 'stimuli/noise']
dict_mfcc = {key: [] for key in list_key}
dict_mean_spectra = {}
for itr_fn, fn in enumerate(list_fn):
with h5py.File(fn, 'r') as f:
sr = f['sr'][0]
freqs = f['freqs'][0]
nopad_start = f['nopad_start'][0]
nopad_end = f['nopad_end'][0]
for key in list_key:
if itr_fn == 0:
dict_mean_spectra[key] = {
'freqs': freqs,
'summed_power_spectrum': np.zeros_like(freqs),
'count': 0,
'nfft': nopad_end - nopad_start,
}
nrows = f[key + '_power_spectrum'].shape[0]
nrows_steps = np.linspace(0, nrows, 2, dtype=int)
for nrow_start, nrow_end in zip(nrows_steps[:-1], nrows_steps[1:]):
all_spectra = f[key + '_power_spectrum'][nrow_start:nrow_end]
# TRUNCATE = -20
# all_spectra[all_spectra < TRUNCATE] = TRUNCATE
IDX = np.isfinite(np.sum(all_spectra, axis=1))
dict_mean_spectra[key]['summed_power_spectrum'] += np.sum(all_spectra[IDX], axis=0)
dict_mean_spectra[key]['count'] += np.sum(IDX, axis=0)
dict_mfcc[key].append(f[key + '_mfcc'][:])
if itr_fn % 5 == 0:
print(itr_fn, os.path.basename(fn), dict_mean_spectra[key]['count'])
for key in list_key:
print('concatenating {} mfcc arrays'.format(key))
dict_mfcc[key] = np.concatenate(dict_mfcc[key], axis=0)
results_dict = {}
for key in sorted(dict_mfcc.keys()):
mfcc_cov = np.cov(dict_mfcc[key], rowvar=False)
mfcc_mean = np.mean(dict_mfcc[key], axis=0)
results_dict[key] = {
'mfcc_mean': mfcc_mean,
'mfcc_cov': mfcc_cov,
'sr': sr,
'mean_power_spectrum': dict_mean_spectra[key]['summed_power_spectrum'] / dict_mean_spectra[key]['count'],
'mean_power_spectrum_freqs': dict_mean_spectra[key]['freqs'],
'mean_power_spectrum_count': dict_mean_spectra[key]['count'],
'mean_power_spectrum_n_fft': dict_mean_spectra[key]['nfft'],
}
# results_dict[key]['mean_power_spectrum'] = 10*np.log10(results_dict[key]['mean_power_spectrum'])
print(results_dict[key]['mean_power_spectrum'].max())
print(results_dict[key]['mean_power_spectrum'].min())
fn_results_dict = os.path.join(os.path.dirname(fn), 'results_dict.json')
with open(fn_results_dict, 'w') as f:
json.dump(results_dict, f, sort_keys=True, cls=util_misc.NumpyEncoder)
print(fn_results_dict)
for k0 in sorted(results_dict.keys()):
for k1 in sorted(results_dict[k0].keys()):
val = np.array(results_dict[k0][k1])
if len(val.reshape([-1])) > 10:
print(k0, k1, val.shape)
else:
print(k0, k1, val)
# nvars = dict_mfcc[key].shape[1]
# ncols = 4
# nrows = int(np.ceil(nvars/ncols))
# fig, ax = plt.subplots(ncols=ncols,
# nrows=nrows,
# figsize=(3*ncols, 2*nrows))
# ax = ax.reshape([-1])
# for ax_idx in range(nvars):
# bins = 100#np.linspace(-2.5, 2.5, 100)
# for key in sorted(results_dict.keys()):
# vals = dict_mfcc[key][:, ax_idx]
# ax[ax_idx].hist(vals, bins=bins, alpha=0.5)
# ax[ax_idx] = util_figures.format_axes(ax[ax_idx],
# str_xlabel='mfcc {}'.format(ax_idx + 1),
# str_ylabel='Count',
# xlimits=None,
# ylimits=None)
# for ax_idx in range(nvars, ax.shape[0]):
# ax[ax_idx].axis('off')
# plt.tight_layout()
# plt.show()
import sys
import os
import h5py
import json
import glob
import numpy as np
import librosa
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
data_dir = '/om/scratch/Fri/msaddler/data_pitchnet/'
basename = 'SPECTRAL_STATISTICS_v00/results_dict_v00.json'
list_dataset_tag = [
('PND_v08/noise_TLAS_snr_neg10pos10', 'Natural sounds'),
# 'PND_v08/noise_TLAS_snr_neg10pos10_filter_signalLPv01', 'Natural sounds (lowpass)'),
# 'PND_v08/noise_TLAS_snr_neg10pos10_filter_signalHPv00', 'Natural sounds (highpass)'),
# 'PND_v08inst/noise_TLAS_snr_neg10pos10',
# 'PND_v08spch/noise_TLAS_snr_neg10pos10',
('PND_mfcc/PNDv08matched12_TLASmatched12_snr_neg10pos10_phase0', 'Synthetic (12-MFCC-matched to natural)'),
# 'PND_mfcc/negatedPNDv08_snr_neg10pos10_phase0',
# ('PND_mfcc/debug', 'Synthetic (flat spectrum)'),
# ('PND_mfcc/PNDv08matched12_TLASmatched12_snr_neg10pos10_phase3', 'Synthetic (12-MFCC-matched)'),
# ('PND_mfcc/PNDv08negated12_TLASmatched12_snr_neg10pos10_phase3', 'Synthetic (12-MFCC-negated)'),
]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 7.5))
clist = 'krbmgyc'
for cidx, (dataset_tag, label_tag) in enumerate(list_dataset_tag):
fn_results_dict = os.path.join(data_dir, dataset_tag, basename)
with open(fn_results_dict, 'r') as f:
results_dict = json.load(f)
for key in sorted(results_dict.keys()):
MEAN_FXX = np.array(results_dict[key]['mean_power_spectrum_freqs'])
MEAN_PXX = np.array(results_dict[key]['mean_power_spectrum_envelope'])
# MEAN_PXX -= MEAN_PXX.max()
# sr = np.array(results_dict[key]['sr'])
# nfft = results_dict[key]['mean_power_spectrum_n_fft']
# mfcc_mean = np.array(results_dict[key]['mfcc_mean'])
# mfcc_mean[12:] = 0
# M = librosa.filters.mel(sr, nfft, n_mels=len(mfcc_mean))
# Minv = np.linalg.pinv(M)
# power_spectrum = util_stimuli.get_power_spectrum_from_mfcc(mfcc_mean, Minv)
# MEAN_FXX = np.fft.rfftfreq(nfft, d=1/sr)
# MEAN_PXX = 10*np.log10(power_spectrum)
color = clist[cidx]
ls = '-'
if 'noise' in key:
ls = '--'
ax.plot(MEAN_FXX,
MEAN_PXX,
label='{} : {}'.format(label_tag, key),
lw=2.5,
color=color,
ls=ls)
MEAN_MFCC = np.array(results_dict[key]['mean_power_spectrum_freqs'])
ax.legend(loc='lower left')
ax = util_figures.format_axes(ax,
xscale='log',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB)',
xlimits=[40, None],
ylimits=[-40, None],
spines_to_hide=['right', 'top'])
plt.show()
import sys
import os
import h5py
import json
import glob
import copy
import pdb
import numpy as np
import scipy.signal
import librosa
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
fn_results_dict = '/om/scratch/Mon/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/SPECTRAL_STATISTICS_v00/results_dict.json'
with open(fn_results_dict, 'r') as f:
results_dict = json.load(f)
N = 1
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.5, 5.0))
for itrN in range(N):
for key in sorted(results_dict.keys())[1:]:
mfcc_mean = np.array(results_dict[key]['mfcc_mean'])
mfcc_cov = np.array(results_dict[key]['mfcc_cov'])
sr = results_dict[key]['sr']
dur = 0.150
nfft = int(dur*sr)
M = librosa.filters.mel(sr, nfft, n_mels=len(mfcc_mean))
Minv = np.linalg.pinv(M)
mfcc = np.random.multivariate_normal(mfcc_mean, mfcc_cov)
mfcc[0:] = 0
power_spectrum = util_stimuli.get_power_spectrum_from_mfcc(mfcc, Minv)
power_spectrum_freqs = np.fft.rfftfreq(nfft, d=1/sr)
f0 = 250.0
frequencies = np.arange(f0, sr/2, f0)
amplitudes = np.interp(frequencies,
power_spectrum_freqs,
np.sqrt(power_spectrum))
signal = util_stimuli.complex_tone(f0,
sr,
dur,
harmonic_numbers=None,
frequencies=frequencies,
amplitudes=amplitudes,
phase_mode='sine',
offset_start=True,
strict_nyquist=True)
# signal = util_stimuli.impose_power_spectrum(signal, power_spectrum)
if 'noise' in key:
signal = np.random.randn(nfft)
signal = util_stimuli.impose_power_spectrum(signal, power_spectrum)
kwargs_plot = {
'ls': '-',
'color': 'b',
}
if 'noise' in key:
kwargs_plot['ls'] = '-'
kwargs_plot['color'] = 'k'
fxx, pxx = util_stimuli.power_spectrum(signal, sr)
ax.plot(fxx, pxx-pxx.max(), lw=0.25, color='m')
power_spectrum = 10*np.log10(power_spectrum)
ax.plot(fxx, power_spectrum-power_spectrum.max(), lw=0.5, **kwargs_plot)
MEAN_FXX = np.array(results_dict[key]['mean_power_spectrum_freqs'])
MEAN_PXX = np.array(results_dict[key]['mean_power_spectrum'])
# MEAN_FXX = np.fft.rfftfreq(nfft, d=1/sr)
# MEAN_PXX = util_stimuli.get_power_spectrum_from_mfcc(mfcc_mean, Minv)
# MEAN_PXX = 10*np.log10(MEAN_PXX)
ax.plot(MEAN_FXX, MEAN_PXX-MEAN_PXX.max(), lw=2.5, **kwargs_plot)
# mfcc2 = util_stimuli.get_mfcc(signal, M)
# mfcc2[12:] = 0
# pxx2 = util_stimuli.get_power_spectrum_from_mfcc(mfcc2, Minv)
# pxx2 = 10*np.log10(pxx2)
# fxx2 = np.fft.rfftfreq(len(signal), d=1/sr)
# ax.plot(fxx2, pxx2-pxx2.max(), color='g')
# Use this function to specify axis scaling, limits, labels, etc.
ax = util_figures.format_axes(ax,
xscale='linear',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB)',
xlimits=[40, None],
ylimits=[-80, None],
spines_to_hide=['right', 'top'])
plt.show()
ipd.display(ipd.Audio(signal, rate=sr))
import stimuli_generate_random_synthetic_tones
import importlib
importlib.reload(stimuli_generate_random_synthetic_tones)
spectral_statistics_filename = '/om/scratch/Mon/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/SPECTRAL_STATISTICS_v00/results_dict.json'
stimuli_generate_random_synthetic_tones.spectrally_shaped_synthetic_dataset(
'tmp.hdf5',
500,
spectral_statistics_filename,
fs=32e3,
dur=0.150,
phase_modes=['cos'],
range_f0=[80.0, 1001.3713909809752],
range_snr=[-10., 10.],
range_dbspl=[30., 90.],
n_mfcc=12,
invert_signal_filter=0,
invert_noise_filter=False,
generate_signal_in_fft_domain=False,
out_combined_key='stimuli/signal_in_noise',
out_signal_key='stimuli/signal',
out_noise_key='stimuli/noise',
out_snr_key='snr',
out_augmentation_prefix='augmentation/',
random_seed=0,
disp_step=50)
import sys
import os
import h5py
import json
import glob
import copy
import pdb
import librosa
import numpy as np
import scipy.signal
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
key_list = ['stimuli/signal_in_noise']#, 'stimuli/signal', 'stimuli/noise']
with h5py.File('tmp.hdf5', 'r') as f:
sr = f['sr'][0]
y = {}
for k in key_list:
y[k] = f[k][np.random.randint(500)]
for k in util_misc.get_hdf5_dataset_key_list(f):
print(k, f[k])
ipd.display(ipd.Audio(y['stimuli/signal_in_noise'], rate=sr))
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(7.5, 5.0))
for k in key_list:
fxx, pxx = util_stimuli.power_spectrum(y[k], sr)
ax.plot(fxx, pxx-pxx.max(), lw=0.5, label=k)
ax.legend()
ax = util_figures.format_axes(ax,
xscale='linear',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB SPL)',
xlimits=[40, None],
ylimits=[-60, None],
spines_to_hide=['right', 'top'])
plt.show()
import sys
import os
import h5py
import glob
import numpy as np
import scipy.signal
import scipy.fftpack
import librosa
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/PND_sr32000_v08_*.hdf5'
regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_mfcc/PNDv08PYSmatched12_TLASmatched12_snr_neg10pos10_phase3/stim_0000000-0002100.hdf5'
list_fn = sorted(glob.glob(regex_fn))
fn = list_fn[-1]
key_y = 'stimuli/signal'
key_f0 = 'nopad_f0_mean'
key_f0 = 'f0'
with h5py.File(fn, 'r') as f:
sr = f['sr'][0]
IDX = np.random.randint(0, f[key_f0].shape[0])
f0 = f[key_f0][IDX]
y = util_stimuli.set_dBSPL(f[key_y][IDX], 60.0)
fxx, pxx = util_stimuli.power_spectrum(y, sr)
print(f0)
harmonic_frequencies = np.arange(f0, sr/2, f0)
IDX = np.digitize(harmonic_frequencies, fxx)
harmonic_freq_bins = fxx[IDX]
spectrum_freq_bins = pxx[IDX]
envelope_spectrum = np.interp(fxx, harmonic_freq_bins, spectrum_freq_bins)
# philbert = np.abs(scipy.signal.hilbert(pxx+50))
fenv, penv = util_stimuli.get_spectral_envelope_lp(y, sr, M=12)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 5))
ax.plot(fxx, pxx, color='k', lw=1.0)
ax.plot(fxx, envelope_spectrum, color='r', lw=2.0)
# ax.plot(fenv, penv, color='r', lw=1.0)
ax = util_figures.format_axes(ax,
xscale='linear',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB SPL)',
xlimits=[40, sr/2],
ylimits=[-20, None],
spines_to_hide=['right', 'top'])
plt.show()
ipd.display(ipd.Audio(y, rate=sr))
# import sys
# import os
# import h5py
# import json
# import glob
# import numpy as np
# import scipy.signal
# %matplotlib inline
# import matplotlib.pyplot as plt
# import IPython.display as ipd
# sys.path.append('/packages/msutil')
# import util_stimuli
# import util_misc
# import util_figures
import importlib
import stimuli_compute_statistics
importlib.reload(stimuli_compute_statistics)
import stimuli_analyze_pystraight
importlib.reload(stimuli_analyze_pystraight)
# regex_fn = '/om/scratch/Fri/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/PYSTRAIGHT_v01_foreground/PND*.hdf5'
regex_fn = '/om/scratch/Fri/msaddler/data_pitchnet/PND_mfcc/PNDv08PYSnegated12_TLASmatched12_snr_neg10pos10_phase3/PYSTRAIGHT_v01_foreground/*.hdf5'
print(regex_fn)
stimuli_analyze_pystraight.summarize_pystraight_statistics(
regex_fn,
fn_results='results_dict.json',
key_sr='sr',
key_signal_list=['stimuli/signal'])
# regex_fn = '/om/scratch/Fri/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/SPECTRAL_STATISTICS_v00/PND*.hdf5'
# regex_fn = '/om/scratch/Fri/msaddler/data_pitchnet/PND_mfcc/PNDv08PYSmatched12_TLASmatched12_snr_neg10pos10_phase3/SPECTRAL_STATISTICS_v00/*.hdf5'
# print(regex_fn)
# stimuli_compute_statistics.summarize_spectral_statistics(regex_fn,
# fn_results='results_dict.json',
# key_sr='sr',
# key_f0=None,
# key_signal_list=['stimuli/signal', 'stimuli/noise'])
import sys
import os
import h5py
import glob
import numpy as np
import scipy.signal
import scipy.fftpack
import librosa
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/PYSTRAIGHT_v01_foreground/*.hdf5'
# regex_fn = '/om/scratch/*/msaddler/data_pitchnet/PND_mfcc/debug_PNDv08PYSmatched12_TLASmatched12_snr_neg10pos10_phase3/PYSTRAIGHT_v01_foreground/*.hdf5'
list_fn = sorted(glob.glob(regex_fn))
fn = list_fn[0]
key_f0 = 'f0'
key_y = 'stimuli/signal_INTERP_interp_signal'
with h5py.File(fn, 'r') as f:
for k in util_misc.get_hdf5_dataset_key_list(f):
print(k, f[k].shape)
sr = f['sr'][0]
IDX = np.random.randint(0, f[key_y].shape[0])
# IDX = 5
y = util_stimuli.set_dBSPL(f[key_y][IDX], 60.0)
if True:#key_f0 in f:
f0 = f[key_f0][IDX]
print('------------>', f0, f['pystraight_success'][IDX])
NTMP = f[key_y].shape[1]
power = 0
while NTMP > 2:
NTMP /= 2
power += 1
n_fft = int(2 ** power)
M = librosa.filters.mel(sr, n_fft, n_mels=40)
Minv = np.linalg.pinv(M)
fxx = np.fft.rfftfreq(n_fft, d=1/sr)
pxx = f['stimuli/signal_FILTER_spectrumSTRAIGHT'][IDX]
mfcc = f['stimuli/signal_FILTER_spectrumSTRAIGHT_mfcc'][IDX]
# mfcc = scipy.fftpack.dct(np.log(np.matmul(M, pxx)), norm='ortho')
mfcc[12:] = 0
pxx_mfcc = np.matmul(Minv, np.exp(scipy.fftpack.idct(mfcc, norm='ortho')))
pxx_mfcc[pxx_mfcc < 0] = 0
pxx_mfcc = 10*np.log10(pxx_mfcc)
pxx_mfcc -= pxx_mfcc.max()
pxx_straight = pxx
pxx_straight = 10*np.log10(pxx_straight)
pxx_straight -= pxx_straight.max()
fyy, pyy = util_stimuli.power_spectrum(y, sr)
pyy -= pyy.max()
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 5))
ax.plot(fyy, pyy, color='k', lw=1.0)
ax.plot(fxx, pxx_straight, color='r', lw=2.0)
ax.plot(fxx, pxx_mfcc, color='g', lw=2.0)
ax = util_figures.format_axes(ax,
xscale='linear',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB SPL)',
xlimits=[40, sr/2],
# ylimits=[-100, 10],
spines_to_hide=['right', 'top'])
plt.show()
ipd.display(ipd.Audio(y, rate=sr))
import sys
import os
import h5py
import json
import glob
import numpy as np
import librosa
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_stimuli
import util_misc
import util_figures
data_dir = '/om/scratch/Fri/msaddler/data_pitchnet/'
list_dataset_tag = [
# ('PND_v08/noise_TLAS_snr_neg10pos10/SPECTRAL_STATISTICS_v00/results_dict.json', 'Natural power spectrum'),
('PND_mfcc/debug_PNDv08PYSmatched12_TLASmatched12_snr_neg10pos10_phase3/SPECTRAL_STATISTICS_v00/results_dict.json', 'Synthetic power spectrum (12-MFCC-matched to natural)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural filter spectrum'),
# ('PND_mfcc/debug_PNDv08PYSmatched12_TLASmatched12_snr_neg10pos10_phase3/SPECTRAL_STATISTICS_v00/results_dict.json', 'Synthetic filter spectrum (12-MFCC-matched to natural)'),
# ('PND_mfcc/debug_PNDv08PYSnegated12_TLASmatched12_snr_neg10pos10_phase3/SPECTRAL_STATISTICS_v00/results_dict.json', 'Synthetic filter spectrum (12-MFCC-matched to natural)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalLPv01/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural foreground (lowpass-filtered)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalHPv00/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural foreground (highpass-filtered)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10/SPECTRAL_STATISTICS_v00/results_dict.json', 'Natural'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalLPv00/SPECTRAL_STATISTICS_v00/results_dict.json', 'Natural (lowpass_v00)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalLPv01/SPECTRAL_STATISTICS_v00/results_dict.json', 'Natural (lowpass_v01)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalHPv00/SPECTRAL_STATISTICS_v00/results_dict.json', 'Natural (highpass_v00)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalLPv00/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural (lowpass_v00)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalLPv01/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural (lowpass_v01)'),
# ('PND_v08/noise_TLAS_snr_neg10pos10_filter_signalHPv00/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural (highpass_v00)'),
# ('PND_mfcc/PNDv08matched12_TLASmatched12_snr_neg10pos10_phase0/SPECTRAL_STATISTICS_v00/results_dict.json', 'Synthetic foreground (12-MFCC-matched to natural)'),
# ('PND_mfcc/debugPNDv08negated12_TLASmatched12_snr_neg10pos10_phase0/SPECTRAL_STATISTICS_v00/results_dict.json', 'Synthetic foreground (12-MFCC-negated to natural)'),
# ('PND_v08spch/noise_TLAS_snr_neg10pos10/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural speech'),
# ('PND_v08inst/noise_TLAS_snr_neg10pos10/PYSTRAIGHT_v01_foreground/results_dict.json', 'Natural instruments'),
]
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 4))
clist = 'krbgmyc'
for cidx, (dataset_tag, label_tag) in enumerate(list_dataset_tag):
fn_results_dict = os.path.join(data_dir, dataset_tag)
print(fn_results_dict)
with open(fn_results_dict, 'r') as f:
results_dict = json.load(f)
if 'PYSTRAIGHT_v01_foreground' in fn_results_dict:
key_fxx = 'mean_filter_spectrum_freqs'
key_pxx = 'mean_filter_spectrum'
key_n_fft = 'mean_filter_spectrum_n_fft'
else:
key_fxx = 'mean_power_spectrum_freqs'
key_pxx = 'mean_power_spectrum'
key_n_fft = 'mean_power_spectrum_n_fft'
for key in sorted(results_dict.keys()):
MEAN_FXX = np.array(results_dict[key][key_fxx])
MEAN_PXX = np.array(results_dict[key][key_pxx])
if 'PYSTRAIGHT_v01_foreground' in fn_results_dict:
MEAN_PXX -= 10*np.log10(20e-6)
sr = results_dict[key]['sr']
mfcc_mean = np.array(results_dict[key]['mfcc_mean'])
mfcc_mean[12:0]
mfcc_cov = np.array(results_dict[key]['mfcc_cov'])
n_fft = np.array(results_dict[key][key_n_fft])
M = librosa.filters.mel(sr, n_fft, n_mels=mfcc_mean.shape[0])
Minv = np.linalg.pinv(M)
kwargs_plot = {
'ls': '-',
'color': clist[cidx],
'label': '{} : {}'.format(label_tag, key),
}
if 'noise' in key:
kwargs_plot['ls'] = '--'
kwargs_plot['color'] = [0.5] * 3
kwargs_plot['label'] = None
ax.plot(MEAN_FXX,
MEAN_PXX,#-MEAN_PXX.max(),
**kwargs_plot)
# PXX_MFCC = 10*np.log10(util_stimuli.get_power_spectrum_from_mfcc(mfcc_mean, Minv))
# ax.plot(MEAN_FXX,
# PXX_MFCC,#-PXX_MFCC.max(),
# **kwargs_plot)
# sample_PXX_MFCC = np.zeros_like(PXX_MFCC)
# nsamples=50
# for _ in range(nsamples):
# mfcc = np.random.multivariate_normal(mfcc_mean, mfcc_cov)
# mfcc[6:] = 0
# sample_PXX_MFCC += 10*np.log10(util_stimuli.get_power_spectrum_from_mfcc(mfcc, Minv))
# sample_PXX_MFCC /= nsamples
# ax.plot(MEAN_FXX,
# sample_PXX_MFCC-sample_PXX_MFCC.max(),
# **kwargs_plot)
ax.legend(loc='upper right', ncol=1)
ax = util_figures.format_axes(ax,
xscale='log',
str_xlabel='Frequency (Hz)',
str_ylabel='Power (dB)',
xlimits=[40, None],
ylimits=[-20, None],
spines_to_hide=['right', 'top'])
plt.show()
import sys
import h5py
import numpy as np
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_misc
import util_stimuli
# fn = '/om/user/msaddler/data_pitchnet/neurophysiology/bernox2005_SlidingFixedFilter_lharm01to30_phase0_f0min080_f0max320/sr20000_cf100_species002_spont070_BW10eN1_IHC3000Hz_IHC7order/bez2018meanrates_009216-012288.hdf5'
# fn = '/om/scratch/Fri/msaddler/data_pitchnet/PND_v08/noise_TLAS_snr_neg10pos10/sr20000_cf100_species002_spont070_BW10eN1_IHC3000Hz_IHC7order/bez2018meanrates_098_007000-014000.hdf5'
# fn = '/om/scratch/Fri/msaddler/data_pitchnet/PND_v08inst/noise_TLAS_snr_neg10pos10/PND_sr32000_v08inst_1422630-1437000.hdf5'
fn = '/om/scratch/Fri/msaddler/data_pitchnet/PND_v08spch/noise_TLAS_snr_neg10pos10/PND_sr32000_v08spch_1422630-1437000.hdf5'
# fn_new = 'PND_v08inst_examples_for_metamers.hdf5'
fn_new = 'PND_v08spch_examples_for_metamers.hdf5'
np.random.seed(998)
data_dict = {}
with h5py.File(fn, 'r') as f:
for k in util_misc.get_hdf5_dataset_key_list(f):
# print(k, f[k])
if f[k].shape[0] == 1:
data_dict[k] = f[k][:]
sr = f['sr'][0]
key_signal = 'stimuli/signal'
N = 15
for itrN in range(N):
IDX = np.random.randint(low=0, high=f['nopad_f0_mean'].shape[0])
for k in util_misc.get_hdf5_dataset_key_list(f):
if f[k].shape[0] > 1:
if k not in data_dict:
data_dict[k] = []
data_dict[k].append(f[k][IDX])
idx_start = f['nopad_start_index'][IDX] - f['segment_start_index'][IDX]
idx_end = f['nopad_end_index'][IDX] - f['segment_start_index'][IDX]
y = f['stimuli/signal'][IDX, idx_start:idx_end]
y_preprocessed = y[0:int(0.05*sr)]
y_preprocessed = util_stimuli.set_dBSPL(y_preprocessed, 60.0)
if itrN == 0:
data_dict['y'] = []
data_dict['y_preprocessed'] = []
data_dict['f0'] = []
data_dict['y'].append(y)
data_dict['y_preprocessed'].append(y_preprocessed)
data_dict['f0'].append(f['nopad_f0_mean'][IDX])
# ipd.display(ipd.Audio(y_preprocessed, rate=sr))
# f_new = h5py.File(fn_new, 'w')
# for k in sorted(data_dict.keys()):
# data_dict[k] = np.array(data_dict[k])
# # print(k, data_dict[k].shape, data_dict[k].dtype)
# f_new.create_dataset(k, data=data_dict[k])
# f_new.close()
print('F0:', data_dict['f0'])
import sys
import h5py
import numpy as np
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_misc
import util_stimuli
fn = '/om4/group/mcdermott/user/msaddler/pitchnet_dataset/pitchnetDataset/assets/data/interim/swcDataframe_interim_processed2019-07-15-1830_processedFile__noNaN_sr32000.pdh5'
fn = '/om4/group/mcdermott/user/msaddler/pitchnet_dataset/pitchnetDataset/assets/data/interim/sr32000_pystraight/swc_183928-184492.hdf5'
with h5py.File(fn) as f:
for k in util_misc.get_hdf5_dataset_key_list(f):
print(k, f[k])
for _ in range(10):
IDX = np.random.randint(f['interp_signal'].shape[0])
y = f['interp_signal'][IDX]
sr = f['sr'][0]
ipd.display(ipd.Audio(y, rate=sr))
import sys
import h5py
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import IPython.display as ipd
sys.path.append('/packages/msutil')
import util_misc
import util_stimuli
# fn = '/om/user/msaddler/data_pitchnet/bernox2005/lowharm_v01/stim.hdf5'
fn = '/om/user/msaddler/data_pitchnet/bernox2005/neurophysiology_v01_EqualAmpTEN_lharm01to15_phase0_f0min080_f0max640/stim.hdf5'
fn = '/om/user/msaddler/data_pitchnet/bernox2005/neurophysiology_v01_EqualAmpTEN_lharm01to30_phase0_f0min080_f0max320/stim.hdf5'
with h5py.File(fn, 'r') as f:
for k in util_misc.get_hdf5_dataset_key_list(f):
print(k, f[k])
print(np.unique(f['max_audible_harm'][:]))
base_f0 = f['base_f0'][:]
print(np.unique(base_f0).shape, base_f0.min(), base_f0.max())
IDX = -10000
sr = f['config_tone/fs'][0]
y = f['tone_in_noise'][IDX]
print(util_stimuli.get_dBSPL(y))
fxx, pxx = util_stimuli.power_spectrum(y, sr)
fig, ax = plt.subplots(figsize=(12, 2))
ax.plot(fxx, pxx)
ax.set_xlim([0, sr/2])
ax.set_ylim([-30, None])
plt.show()
ipd.display(ipd.Audio(y, rate=sr))
import importlib
import stimuli_generate_BernsteinOxenhamPureTone
importlib.reload(stimuli_generate_BernsteinOxenhamPureTone)
# hdf5_filename = '/om/user/msaddler/data_pitchnet/bernox2005/puretone_v01/stim.hdf5'
# stimuli_generate_BernsteinOxenhamPureTone.main(
# hdf5_filename,
# fs=32e3,
# dur=0.150,
# f0_min=80.0,
# f0_max=10240.0,
# f0_n=50,
# dbspl_min=20.0,
# dbspl_max=60.0,
# dbspl_step=0.25,
# noise_dBHzSPL=10.0,
# noise_attenuation_start=600.0,
# noise_attenuation_slope=2,
# disp_step=100)
# hdf5_filename = '/om/user/msaddler/data_pitchnet/bernox2005/puretone_v02/stim.hdf5'
# stimuli_generate_BernsteinOxenhamPureTone.main(
# hdf5_filename,
# fs=32e3,
# dur=0.150,
# f0_min=80.0,
# f0_max=10240.0,
# f0_n=50,
# dbspl_min=20.0,
# dbspl_max=60.0,
# dbspl_step=0.25,
# noise_dBHzSPL=12.0,
# noise_attenuation_start=600.0,
# noise_attenuation_slope=2,
# disp_step=100)
# hdf5_filename = '/om/user/msaddler/data_pitchnet/bernox2005/puretone_v03/stim.hdf5'
# stimuli_generate_BernsteinOxenhamPureTone.main(
# hdf5_filename,
# fs=32e3,
# dur=0.150,
# f0_min=80.0,
# f0_max=10240.0,
# f0_n=50,
# dbspl_min=20.0,
# dbspl_max=60.0,
# dbspl_step=0.25,
# noise_dBHzSPL=8.0,
# noise_attenuation_start=600.0,
# noise_attenuation_slope=2,
# disp_step=100)
```
|
github_jupyter
|
# Image Classification
The *Computer Vision* cognitive service provides useful pre-built models for working with images, but you'll often need to train your own model for computer vision. For example, suppose the Northwind Traders retail company wants to create an automated checkout system that identifies the grocery items customers want to buy based on an image taken by a camera at the checkout. To do this, you'll need to train a classification model that can classify the images to identify the item being purchased.
<p style='text-align:center'><img src='./images/image-classification.jpg' alt='A robot holding a clipboard, classifying pictures of an apple, a banana, and an orange'/></p>
In Azure, you can use the ***Custom Vision*** cognitive service to train an image classification model based on existing images. There are two elements to creating an image classification solution. First, you must train a model to recognize different classes using existing images. Then, when the model is trained you must publish it as a service that can be consumed by applications.
## Create a Custom Vision resource
To use the Custom Vision service, you need an Azure resource that you can use to *train* a model, and a resource with which you can *publish* it for applications to use. The resource for either (or both) tasks can be a general **Cognitive Services** resource, or a specific **Custom Vision** resource. You can use the same Cognitive Services resource for each of these tasks, or you can use different resources (in the same region) for each task to manage costs separately.
Use the following instructions to create a new **Custom Vision** resource.
1. In a new browser tab, open the Azure portal at [https://portal.azure.com](https://portal.azure.com), and sign in using the Microsoft account associated with your Azure subscription.
2. Select the **+Create a resource** button, search for *custom vision*, and create a **Custom Vision** resource with the following settings:
- **Create options**: Both
- **Subscription**: *Your Azure subscription*
- **Resource group**: *Create a new resource group with a unique name*
- **Name**: *Enter a unique name*
- **Training location**: *Choose any available region*
- **Training pricing tier**: F0
- **Prediction location**: *The same region as the training resource*
- **Prediction pricing tier**: F0
> **Note**: If you already have an F0 custom vision service in your subscription, select **S0** for this one.
3. Wait for the resources to be created, and note that two Custom Vision resources are provisioned; one for training, an another for prediction. You can view these by navigating to the resource group where you created them.
## Create a Custom Vision project
To train an object detection model, you need to create a Custom Vision project based on your training resource. To do this, you'll use the Custom Vision portal.
1. Download and extract the training images from https://aka.ms/fruit-images.
2. In another browser tab, open the Custom Vision portal at [https://customvision.ai](https://customvision.ai). If prompted, sign in using the Microsoft account associated with your Azure subscription and agree to the terms of service.
3. In the Custom Vision portal, create a new project with the following settings:
- **Name**: Grocery Checkout
- **Description**: Image classification for groceries
- **Resource**: *The Custom Vision resource you created previously*
- **Project Types**: Classification
- **Classification Types**: Multiclass (single tag per image)
- **Domains**: Food
4. Click **\[+\] Add images**, and select all of the files in the **apple** folder you extracted previously. Then upload the image files, specifying the tag *apple*, like this:
<p style='text-align:center'><img src='./images/upload_apples.jpg' alt='Upload apple with apple tag'/></p>
5. Repeat the previous step to upload the images in the **banana** folder with the tag *banana*, and the images in the **orange** folder with the tag *orange*.
6. Explore the images you have uploaded in the Custom Vision project - there should be 15 images of each class, like this:
<p style='text-align:center'><img src='./images/fruit.jpg' alt='Tagged images of fruit - 15 apples, 15 bananas, and 15 oranges'/></p>
7. In the Custom Vision project, above the images, click **Train** to train a classification model using the tagged images. Select the **Quick Training** option, and then wait for the training iteration to complete (this may take a minute or so).
8. When the model iteration has been trained, review the *Precision*, *Recall*, and *AP* performance metrics - these measure the prediction accuracy of the classification model, and should all be high.
## Test the model
Before publishing this iteration of the model for applications to use, you should test it.
1. Above the performance metrics, click **Quick Test**.
2. In the **Image URL** box, type `https://aka.ms/apple-image` and click ➔
3. View the predictions returned by your model - the probability score for *apple* should be the highest, like this:
<p style='text-align:center'><img src='./images/test-apple.jpg' alt='An image with a class prediction of apple'/></p>
4. Close the **Quick Test** window.
## Publish and consume the image classification model
Now you're ready to publish your trained model and use it from a client application.
9. Click **🗸 Publish** to publish the trained model with the following settings:
- **Model name**: groceries
- **Prediction Resource**: *The prediction resource you created previously*.
10. After publishing, click the *settings* (⚙) icon at the top right of the **Performance** page to view the project settings. Then, under **General** (on the left), copy the **Project Id** and paste it into the code cell below (replacing **YOUR_PROJECT_ID**).
<p style='text-align:center'><img src='./images/cv_project_settings.jpg' alt='Project ID in project settings'/></p>
> _**Note**: If you used a **Cognitive Services** resource instead of creating a **Custom Vision** resource at the beginning of this exercise, you can copy its key and endpoint from the right side of the project settings, paste it into the code cell below, and run it to see the results. Otherwise, continue completing the steps below to get the key and endpoint for your Custom Vision prediction resource._
11. At the top left of the **Project Settings** page, click the *Projects Gallery* (👁) icon to return to the Custom Vision portal home page, where your project is now listed.
12. On the Custom Vision portal home page, at the top right, click the *settings* (⚙) icon to view the settings for your Custom Vision service. Then, under **Resources**, expand your *prediction* resource (<u>not</u> the training resource) and copy its **Key** and **Endpoint** values to the code cell below, replacing **YOUR_KEY** and **YOUR_ENDPOINT**.
<p style='text-align:center'><img src='./images/cv_settings.jpg' alt='Prediction resource key and endpoint in custom vision settings'/></p>
13. Run the code cell below to set the variables to your project ID, key, and endpoint values.
```
project_id = 'YOUR_PROJECT_ID'
cv_key = 'YOUR_KEY'
cv_endpoint = 'YOUR_ENDPOINT'
model_name = 'groceries' # this must match the model name you set when publishing your model iteration (it's case-sensitive)!
print('Ready to predict using model {} in project {}'.format(model_name, project_id))
```
Client applications can use the details above to connect to and your custom vision classification model.
Run the following code cell to classifiy a selection of test images using your published model.
> **Note**: Don't worry too much about the details of the code. It uses the Computer Vision SDK for Python to get a class prediction for each image in the /data/image-classification/test-fruit folder
```
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
import matplotlib.pyplot as plt
from PIL import Image
import os
%matplotlib inline
# Get the test images from the data/vision/test folder
test_folder = os.path.join('data', 'image-classification', 'test-fruit')
test_images = os.listdir(test_folder)
# Create an instance of the prediction service
custom_vision_client = CustomVisionPredictionClient(cv_key, endpoint=cv_endpoint)
# Create a figure to display the results
fig = plt.figure(figsize=(16, 8))
# Get the images and show the predicted classes for each one
print('Classifying images in {} ...'.format(test_folder))
for i in range(len(test_images)):
# Open the image, and use the custom vision model to classify it
image_contents = open(os.path.join(test_folder, test_images[i]), "rb")
classification = custom_vision_client.classify_image(project_id, model_name, image_contents.read())
# The results include a prediction for each tag, in descending order of probability - get the first one
prediction = classification.predictions[0].tag_name
# Display the image with its predicted class
img = Image.open(os.path.join(test_folder, test_images[i]))
a=fig.add_subplot(len(test_images)/3, 3,i+1)
a.axis('off')
imgplot = plt.imshow(img)
a.set_title(prediction)
plt.show()
```
Hopefully, your image classification model has correctly identified the groceries in the images.
## Learn more
The Custom Vision service offers more capabilities than we've explored in this exercise. For example, you can also use the Custom Vision service to create *object detection* models; which not only classify objects in images, but also identify *bounding boxes* that show the location of the object in the image.
To learn more about the Custom Vision cognitive service, view the [Custom Vision documentation](https://docs.microsoft.com/azure/cognitive-services/custom-vision-service/home)
|
github_jupyter
|
## Deploy a simple S3 dispersed storage archive solution
#### Requirements
In order to be able to deploy this example deployment you will have to have the following components activated
- the 3Bot SDK, in the form of a local container with the SDK, or a grid based SDK container. Getting started instuctions are [here](https://github.com/Threefoldfoundation/info_projectX/tree/development/doc/jumpscale_SDK)
- if you use a locally installed container with the 3Bot SDK you need to have the wireguard software installed. Instructions to how to get his installed on your platform could be found [here](https://www.wireguard.com/install/)
- capacity reservation are not free so you will need to have some ThreeFold_Tokens (TFT) to play around with. Instructions to get tokens could be found [here](https://github.com/Threefoldfoundation/info_projectX/blob/development/doc/jumpscale_SDK_information/payment/FreeTFT_testtoken.md)
After following these install instructions you should end up having a local, working TF Grid SDK installed. You could work / connect to the installed SDK as described [here](https://github.com/Threefoldfoundation/info_projectX/blob/development/doc/jumpscale_SDK/SDK_getting_started.md)
### Overview
The design a simple S3 archive solution we need to follow a few simple steps:
- create (or identify and use) an overlay network that spans all of the nodes needed in the solution
- identify which nodes are involved in the archive for storage and which nodes are running the storage software
- create reservations on the storage nodes for low level storage. Create and deploy zero-DB's
- collect information of how to access and use the low level storage devices to be passed on to the S3 storage software
- design the architecture, data and parity disk design
- deploy the S3 software in a container
#### Create overlay network of identity an previously deployed overlay network
Each overlay network is private and contains private IP addresses. Each overlay network is deployed in such a way that is has no connection to the public (IPv4 or IPv6) network directly. In order to work with such a network a tunnel needs to be created between the overlay network on the grid and your local network. You could find instructions how to do that [here](https://github.com/Threefoldfoundation/info_projectX/blob/development/doc/jumpscale_SDK_examples/network/overlay_network.md)
#### Set up the capacity environment to find, reserve and configure
Make sure that your SDK points to the mainnet explorer for deploying this capacity example. Also make sure you have an identity loaded. The example code uses the default identity. Multiple identities could be stored in the TF Grid SDK. To check your available identities you could request the number of identities available for you by typing `j.tools.threebot.me` in the kosmos shell.
```
from Jumpscale import j
import time
j.clients.explorer.default_addr_set('explorer.grid.tf')
# Which identities are available in you SDK
j.tools.threebot.me
# Make sure I have an identity (set default one for mainnet of testnet)
me = j.tools.threebot.me.default
# Load the zero-os sal and reate empty reservation method
zos = j.sal.zosv2
r = zos.reservation_create()
```
#### Setup your overlay network (skip this step if you have a network setup and available)
An overlay network creates a private peer2peer network over selected nodes. In this notebook it is assumend you have created one by following this [notebook](https://github.com/Threefoldfoundation/info_projectX/blob/development/code/jupyter/SDK_examples/network/overlay_network.ipynb)
#### Design the S3 simple storage solution
You have created a network in the network creation [notebook](https://github.com/Threefoldfoundation/info_projectX/blob/development/code/jupyter/SDK_examples/network/overlay_network.ipynb) with the following details:
```
demo_ip_range="172.20.0.0/16"
demo_port=8030
demo_network_name="demo_network_name_01"
```
When you executed the reservation it also provided you with a data on order number, node ID and private network range on the node. All the nodes in the network are connected peer2peer with a wireguard tunnel. On these nodes we could now create a storage solution. For this solution we will using some of these nodes as raw storage provider nodes and others as the storage application nodes. Using the ouput of the network reservation notebook to describe the high level design of the storage solution:
| Nr. | Location | Node ID. | IPV4 network | Function. |
|--------|---|---|---|---|
| 1 | Salzburg | 9kcLeTuseybGHGWw2YXvdu4kk2jZzyZCaCHV9t6Axqqx | 172.20.15.0/24 | Storage sofware container, 10GB raw |
| 2 | Salzburg | 3h4TKp11bNWjb2UemgrVwayuPnYcs2M1bccXvi3jPR2Y | 172.20.16.0/24 | 10GB raw |
| 3 | Salzburg | FUq4Sz7CdafZYV2qJmTe3Rs4U4fxtJFcnV6mPNgGbmRg | 172.20.17.0/24 | 10GB raw |
| 4 | Vienna | 9LmpYPBhnrL9VrboNmycJoGfGDjuaMNGsGQKeqrUMSii | 172.20.28.0/24 | 10GB raw |
| 5 | Vienna | 3FPB4fPoxw8WMHsqdLHamfXAdUrcRwdZY7hxsFQt3odL | 172.20.29.0/24 | 10GB raw |
| 6 | Vienna | CrgLXq3w2Pavr7XrVA7HweH6LJvLWnKPwUbttcNNgJX7 | 172.20.30.0/24 | 10GB raw |
#### Reserve and deploy the low level ZeroDB storage nodes
First let's deploy low level storage capacity manager (Zero BD, more info [here](https://github.com/Threefoldtech/0-DB)). In the next piece of code we do the following:
- create some empty reservation and result structures
- select and set the node to container the S3 software
- select and load the nodes in a list to push them in the zero-DB reservation structure
```
# load the zero-os sal
zos = j.sal.zosv2
day=24*60*60
hour=60*60
# Node: 5 ID: 9kcLeTuseybGHGWw2YXvdu4kk2jZzyZCaCHV9t6Axqqx IPv4 address: 172.20.15.0/24
minio_node_id = '9kcLeTuseybGHGWw2YXvdu4kk2jZzyZCaCHV9t6Axqqx'
minio_node_ip = '172.20.15.16'
# ----------------------------------------------------------------------------------
reservation_network = zos.reservation_create()
reservation_zdbs = zos.reservation_create()
reservation_storage = zos.reservation_create()
rid_network=0
rid_zdbs=0
rid_storage=0
password = "supersecret"
# ----------------------------------------------------------------------------------
# Select and create a reservation for nodes to deploy a ZDB
# first find the node where to reserve 0-DB namespaces. Select all the salzburg nodes
# ----------------------------------------------------------------------------------
nodes_salzburg = zos.nodes_finder.nodes_search(farm_id=12775) # (IPv6 nodes)
nodes_vienna_1 = zos.nodes_finder.nodes_search(farm_id=82872) # (IPv6 nodes)
# ----------------------------------------------------------------------------------
# Definition of functional nodes
# ----------------------------------------------------------------------------------
nodes_all = nodes_salzburg[5:8] + nodes_vienna_1[5:8]
# ----------------------------------------------------------------------------------
# Create ZDB reservation for the selected nodes
# ----------------------------------------------------------------------------------
for node in nodes_all:
zos.zdb.create(
reservation=reservation_zdbs,
node_id=node.node_id,
size=10,
mode='seq',
password='supersecret',
disk_type="SSD",
public=False)
```
#### Prepare and deploy the S3 software container
The nodes that will run the storage solution needs some persistent storage. This will create a reservation for a volume on the same node as the software runs and attached this as a volume to the container that will run the storage software. For the reservation duration please set a period of time that allows for expermenting, in this case it is set for one day.
```
# Storage solution reservation time
nr_of_hours=24
# ----------------------------------------------------------------------------------
# Attach persistant storage to container - for storing metadata
# ----------------------------------------------------------------------------------
volume = zos.volume.create(reservation_storage,minio_node_id,size=10,type='SSD')
volume_rid = zos.reservation_register(reservation_storage, j.data.time.epoch+(nr_of_hours*hour), identity=me)
results = zos.reservation_result(volume_rid)
# ----------------------------------------------------------------------------------
# Actuate the reservation for the ZDB's The IP addresses are going to be selfassigned.
# ----------------------------------------------------------------------------------
expiration = j.data.time.epoch + (nr_of_hours*hour)
# register the reservation
rid_zdb = zos.reservation_register(reservation_zdbs, expiration, identity=me)
time.sleep(5)
results = zos.reservation_result(rid_zdb)
```
With the low level zero-DB reservations done and stored the `results` variable (these storage managers will get an IPv4 address assigned from the local `/24` node network. We need to store those addresses in `namespace_config` to pass it to the container running the storage software.
```
# ----------------------------------------------------------------------------------
# Read the IP address of the 0-DB namespaces after they are deployed
# we will need these IPs when creating the minio container
# ----------------------------------------------------------------------------------
namespace_config = []
for result in results:
data = result.data_json
cfg = f"{data['Namespace']}:{password}@[{data['IPs']}]:{data['Port']}"
namespace_config.append(cfg)
# All IP's for the zdb's are now known and stored in the namespace_config structure.
print(namespace_config)
```
```
['9012-4:supersecret@[2a04:7700:1003:1:54f0:edff:fe87:2c48]:9900', '9012-1:supersecret@[2a02:16a8:1000:0:5c2f:ddff:fe5a:1a70]:9900', '9012-2:supersecret@[2a02:16a8:1000:0:1083:59ff:fe38:ce71]:9900', '9012-7:supersecret@[2003:d6:2f32:8500:dc78:d6ff:fe04:7368]:9900', '9012-3:supersecret@[2a02:16a8:1000:0:fc7c:4aff:fec8:baf]:9900', '9012-5:supersecret@[2a04:7700:1003:1:acc0:2ff:fed3:1692]:9900', '9012-6:supersecret@[2a04:7700:1003:1:ac9d:f3ff:fe6a:47a9]:9900']
```
Last step is to design the redundacy policy for the storage solution. We have 6 low level devices available (over 6 nodes, in 2 different data centers and cities). So we could build any of the following configurations:
| Option | data storage devices | parity storage devices | total devices | overhead |
|--------|---|---|---|---|
| 1 | 3 | 3 | 6 | 50%% |
| 2 | 4 | 2 | 6 | 33% |
| 3 | 5 | 1 | 6 | 16% |
Now in this example real efficiency of this solution is not achieved, in a real life deployment we would do something like this:
| Option | data storage devices | parity storage devices | total devices | overhead |
|--------|---|---|---|---|
| 4 | 16 | 4 | 20 | 20% |
In that case it is highly unlikely that 4 distributed devices will fail at the same time, therefore this is a very robust storage solution
Here we choose to deploy scenario 2 with 4 data disks and 2 parity disks.
```
# ----------------------------------------------------------------------------------
# With the low level disk managers done and the IP adresses discovered we could now build
# the reservation for the min.io S3 interface.
# ----------------------------------------------------------------------------------
reservation_minio = zos.reservation_create()
# Make sure to adjust the node_id and network name to the appropriate in copy / paste mode :-)
minio_container=zos.container.create(reservation=reservation_minio,
node_id=minio_node_id,
network_name=u_networkname,
ip_address=minio_node_ip,
Flist='https://hub.grid.tf/azmy.3Bot/minio.Flist',
interactive=False,
entrypoint='/bin/entrypoint',
cpu=2,
memory=2048,
env={
"SHARDS":','.join(namespace_config),
"DATA":"4",
"PARITY":"2",
"ACCESS_KEY":"minio",
"SECRET_KEY":"passwordpassword",
})
```
With the definition of the S3 container done we now need to attached persistent storage on a volume to store metadata.
```
# ----------------------------------------------------------------------------------
# Attach persistant storage to container - for storing metadata
# ----------------------------------------------------------------------------------
zos.volume.attach_existing(
container=minio_container,
volume_id=f'{volume_rid}-{volume.workload_id}',
mount_point='/data')
```
Last but not least, execute the resevation for the storage manager.
```
# ----------------------------------------------------------------------------------
# Write reservation for min.io container in BCDB - end user interface
# ----------------------------------------------------------------------------------
expiration = j.data.time.epoch + (nr_of_hours*hour)
# register the reservation
rid = zos.reservation_register(reservation_minio, expiration, identity=me)
time.sleep(5)
results = zos.reservation_result(rid)
```
|
github_jupyter
|
```
%pylab inline
import numpy as np
import pandas as pd
import scipy.stats
from matplotlib.backends.backend_pdf import PdfPages
import sys
sys.path.append("../errortools/")
import errortools
```
# Fitting and predicting
```
ndim = 3
fit_intercept = True
ndata = 100
p_true = [2, 0, -2, 0]
np.random.seed(42)
X = np.random.uniform(low=-1, high=1, size=ndim*ndata).reshape(ndata, ndim)
p = scipy.stats.logistic.cdf(np.dot(np.concatenate((X, np.ones((X.shape[0],1), dtype=float)), axis=1), p_true))
y = (p > np.random.uniform(size=ndata)).astype(int)
fig, ax = plt.subplots(1, 3, figsize=(15,4))
ax[0].plot(X[y==0,0], X[y==0,1], 'o', color='orange', alpha=0.2, markersize=5)
ax[0].plot(X[y==1,0], X[y==1,1], 'o', color='green', alpha=0.2, markersize=5)
ax[0].set_xlabel("x0")
ax[0].set_ylabel("x1")
ax[1].plot(X[y==0,0], X[y==0,2], 'o', color='orange', alpha=0.2, markersize=5)
ax[1].plot(X[y==1,0], X[y==1,2], 'o', color='green', alpha=0.2, markersize=5)
ax[1].set_xlabel("x0")
ax[1].set_ylabel("x2")
ax[2].plot(X[y==0,1], X[y==0,2], 'o', color='orange', alpha=0.2, markersize=5)
ax[2].plot(X[y==1,1], X[y==1,2], 'o', color='green', alpha=0.2, markersize=5)
ax[2].set_xlabel("x1")
ax[2].set_ylabel("x2");
model = errortools.LogisticRegression(fit_intercept=True)
model.fit(X,y)
fig, ax = plt.subplots(1, 3, figsize=(20,5))
nstddvs = 1
p = model.parameters
cvr_mtx = model.cvr_mtx
prc_mtx = np.linalg.inv(cvr_mtx)
u = np.linspace(-2, 2, 100).reshape(-1,1)
a = np.zeros((100,1), dtype=float)
x = np.concatenate((u, a, a), axis=1)
f = model.predict(x)
el1, eu1 = model.estimate_errors(x, nstddvs)
es = model.estimate_errors_sampling(x, 100)
el = model.estimate_errors_linear(x, 1)
g = scipy.stats.logistic.cdf(np.dot(np.concatenate((x,np.ones((x.shape[0],1))),axis=1), p_true))
ax[0].plot(u, g, '-', color='black', alpha=1, label="true curve")
ax[0].plot(u, f, '-', color='red', label="fitted curve")
ax[0].fill_between(x=u.ravel(), y1=f-el1, y2=f+eu1, alpha=0.3, color='green', label="error")
ax[0].fill_between(x=u.ravel(), y1=f-nstddvs*es, y2=f+nstddvs*es, alpha=0.3, color='orange', label="sampled error")
ax[0].fill_between(x=u.ravel(), y1=f-nstddvs*el, y2=f+nstddvs*el, alpha=0.3, color='blue', label="linear error")
ax[0].set_xlabel("x0")
ax[0].set_ylabel("logistic prob")
ax[0].legend()
x = np.concatenate((a, u, a), axis=1)
f = model.predict(x)
el1, eu1 = model.estimate_errors(x, nstddvs)
es = model.estimate_errors_sampling(x, 100)
el = model.estimate_errors_linear(x, 1)
g = scipy.stats.logistic.cdf(np.dot(np.concatenate((x,np.ones((x.shape[0],1))),axis=1), p_true))
ax[1].plot(u, g, '-', color='black', alpha=1, label="true curve")
ax[1].plot(u, f, '-', color='red', label="fitted curve")
ax[1].fill_between(x=u.ravel(), y1=f-el1, y2=f+eu1, alpha=0.3, color='green', label="error")
ax[1].fill_between(x=u.ravel(), y1=f-nstddvs*es, y2=f+nstddvs*es, alpha=0.3, color='orange', label="sampled error")
ax[1].fill_between(x=u.ravel(), y1=f-nstddvs*el, y2=f+nstddvs*el, alpha=0.3, color='blue', label="linear error")
ax[1].set_xlabel("x1")
ax[1].set_ylabel("logistic prob")
ax[1].legend()
x = np.concatenate((a, a, u), axis=1)
f = model.predict(x)
el1, eu1 = model.estimate_errors(x, nstddvs)
es = model.estimate_errors_sampling(x, 100)
el = model.estimate_errors_linear(x, 1)
g = scipy.stats.logistic.cdf(np.dot(np.concatenate((x,np.ones((x.shape[0],1))),axis=1), p_true))
ax[2].plot(u, g, '-', color='black', alpha=1, label="true curve")
ax[2].plot(u, f, '-', color='red', label="fitted curve")
ax[2].fill_between(x=u.ravel(), y1=f-el1, y2=f+eu1, alpha=0.3, color='green', label="error")
ax[2].fill_between(x=u.ravel(), y1=f-nstddvs*es, y2=f+nstddvs*es, alpha=0.3, color='orange', label="sampled error")
ax[2].fill_between(x=u.ravel(), y1=f-nstddvs*el, y2=f+nstddvs*el, alpha=0.3, color='blue', label="linear error")
ax[2].set_xlabel("x2")
ax[2].set_ylabel("logistic prob")
ax[2].legend();
```
# Create report (2 ways)
```
features = ['x1', 'x2', 'x3', 'bias']
with PdfPages('Report.pdf') as pdf:
errortools.errortools.report_correlation_matrix(model, features, pdf)
errortools.errortools.report_parameter_error(model, features, pdf)
errortools.errortools.report_loss_versus_approximation(model, X, y, 0, 0, features, pdf)
errortools.report_error_indivial_pred(model, X[0], 'x1', features, 0, 20, 100, pdf)
errortools.report_error_indivial_pred(model, X[0], 'x2', features, 0, 20, 100, pdf)
errortools.report_model_positive_ratio(model, X, y, 1000, 10, pdf)
errortools.report_error_test_samples(model, X, pdf)
pdf = errortools.errortools.report_correlation_matrix(model, features=features)
pdf = errortools.errortools.report_parameter_error(model, features, pdf)
pdf = errortools.errortools.report_loss_versus_approximation(model, X, y, 0, 0, features, pdf)
pdf = errortools.report_error_indivial_pred(model, X[0], 'x1', features, 0, 20, 100, pdf)
pdf = errortools.report_error_indivial_pred(model, X[0], 'x2', features, 0, 20, 100, pdf)
pdf = errortools.report_model_positive_ratio(model, X, y, 1000, 10, pdf)
pdf = errortools.report_error_test_samples(model, X, pdf)
pdf.close()
```
|
github_jupyter
|
```
import this
print("this is my first program. ")
len("fazlullah")
a = 10
a
type(a)
b = 45.5
type(b)
c = "fazlullah"
type(c)
d = 5+6j
type(d)
g = True
type(g)
*a = 67
_a = 88
type(a)
a = 34
type(_a)
a, b, c, d, e = 124,"fazlullah",6+8j,False,88.2
a
b
c
a = "sudh"
a+str(4)
True + True
True - False
1 + True
a = input()
a
a = input()
int(a)+8
a
pwd
c
c.conjugate()
c.imag
c.real
s = "sudh"
s[1]
s[2]
s[3]
s[4]
s[100]
s[-1]
a = "my name is sudh"
a[:10]
b = "ineuron"
b[:3]
b[:300]
b[300]
b[-1]
b[-100]
b[-1:-4]
b[0:4]
a = 'kumar'
a[0:300]
a[0:300:1]
a[0:300:2]
a[0:300:3]
a[0:100:-1]
a[-1:-4]
a[-1:-4:-1]
a[-1:-10:-1]
a[0:-10:-1]
a[::]
a[-2:]
a[-2:-1]
a[::-1]
a[-1::-1]
a = "I am working with ineuron"
a
a[::-1]
a[-5:5]
a[-5:5:-1]
a[-2:-10:-1]
"sudh"*3
"sudh" + " kumar"
a
len(a)
a.find('a')
a.find('i')
a.find('ia')
a.find('in')
a.count('i')
a
a.count('x')
l = a.split()
l[0]
l[1]
l[2]
l[0:3]
a.split('w')
a.split('wo')
a.upper()
s = "sUdh"
s.swapcase()
s.title()
s.capitalize()
b = "sudh"
c = "ineuron"
b.join(c)
" ".join("sudh")
for i in reversed("sudh"):
print(i)
s = " sudh "
s[::-1]
s.rstrip()
s.lstrip()
s.strip()
s = "sudh"
s.replace("u", "xyz")
s.replace("t","xyz")
"sudh\tkumar".expandtabs()
s.center(40,'t')
s.isupper()
s = "Sudh"
s.isupper()
s = "SUDH"
s.isupper()
s.islower()
s.isspace()
s = " sudh"
s.isspace()
s = " "
s.isspace()
s = "sudh"
s.isdigit()
s = "456321"
s.isdigit()
s = "sudh"
s.endswith('h')
s.endswith('x')
s.startswith('s')
s.istitle()
s.encode()
l = ["sudh", "kumar",3456,4+9j, True, 354.25]
type(l)
l[0]
l[-1]
l[-5]
l[0:4]
l[::-1]
l[-1:6]
l[0]
l[0][1]
l[3].real
l1 = ["sudh", "kumar",4587]
l2 = ["xyz","pqr",456.25]
l1+l2
l1 + ["sudh"]
l1*4
l1*2
l1
l3 = l1[0].replace("sudh","Faiz")
l3
l3
l1[0] = "Faiz"
l1
l4 = l[1].replace('k','s')
l4
l1
len(l1)
32547 in l1
l2
l2.append("sudh")
l2
l2.pop()
l2.pop(2)
l2
l2.append(345687)
l2.insert(1,"faiz")
l2
l2.insert(3,[325,'bukhari',"kumar"])
l2
l2[::-1]
l2.reverse()
l2
l1
l2
l2[1][2]
l2
l2.count('xyz')
l2.append("munger")
l2.append([3,4,54,6])
l2
l1
l1.extend(['faiz',3548,2.25,True])
l1
#3:03/4
#Tuples
t = (1,2,3,4,5)
type(t)
t1 = ("sudh",345,45+6j, 45.50, True)
l = ["sudh",345,45+6j, 45.50, True]
type(t1)
type(l)
t2 = ()
type(t2)
t1
l
l[0:2]
t1[0:2]
t1[::-1]
t1[-1]
t1
t1[0::2]
l
l1 = [4,5,6,7]
l1
l[0] = "kumar"
l
t1
t1[0] = "xyz"
t1
t2 = (34,56,56.5,654)
t1+t2
l+l1
t1
t1*2
t1.count("sudh")
t1.index("sudh")
t = (45,456,23.5,("sudh",4,5,6),("sudh"))
t
t1 = ([1,2,30],("sudh",456,23.5,45),"sudh")
t1
t1[0][1] = "faiz"
t1[0][1]
t1
t1[0] = "faiz"
list(t1)
l
tuple(l)
# set
l = [1,2,3,4,5,6,5,5,55,6,4,7,8,6,4,5,55,5,5,5]
set(l)
s = {}
type(s)
s1 = {1,2,3,4}
type(s1)
s2 = {1,11,2,3,1,1,2,3,4,4,4,5,6,6,6,2,0,1,1,4,2,2,1,1}
s2
s2[0]
list(s2)
s2
s2.add(1234)
s2
s2.add("faiz")
s2
s2.add([1,2,3,4])
{[3,4,5],3,45,56,4}
{(3,4,5),3,45,56,4}
s = {(3, 4, 5),(3, 4, 5), 3, 4, 45, 56, 3, 4, 45, 56}
s
s.remove(4)
s
s.discard(45)
s
s.discard(45)
s
s.remove(4)
# set is neithe a mutable nor imutable
{"Faiz","faiz"}
{"sudh","Sudh"}
s = {1,2,3,4,4,5,1,2,1,2,3,2,4,"faiz","faiz"}
s
#dictionary
d = {}
type(d)
d = {1,5}
type(d)
d = {4:"sudh"}
d1 = {"key1":4554,"key2":"sudh",45:[3,4,5,6,8]}
d1
d1["key1"]
d1[45]
d = {3:["sudh",'faiz',4,5,6,4]}
d[3]
d = {_4:["sudh",'faiz',4,5,6,4]}
d = {.4:["sudh",'faiz',4,5,6,4]}
d = {"key":("sudh",'faiz',4,5,6,4)}
d = {"key":{"sudh",'faiz',4,5,6,4}}
d1 = {"key1":[2,3,4,5],"key2":"sudh","key1":45}
d1["key1"]
d1
d1 = {"key1":[2,3,4,5],"key2":"sudh","kumar":45}
d
d = {"name":"sudhanshu","mo_no":9873300865,"mail_id":"[email protected]","key1":[4,5,6,7],"key2":(3,4,5,6),
"key3":{4,7,8,5,78,5,4,5},"key4":{1:5,5:6}}
d
d["key3"]
type(d["key3"])
d["key4"]
d["key4"][5]
d.keys()
d.values()
d.items()
type(d.items())
d = {"key1":"sudh","key2":[1,2,3,4]}
d
d["key3"] = "kumar"
d
d[4] = [2,5,4,8,6]
d
d["key1"] = "Fazlullah"
d
del d["key1"]
d
del d
d
d1 = {"key1":"sudh","key2":[4,5,67,8]}
d1[[1,2,3]] = "ineuron"
d1
d1[(1,2,3)] = "ineuron"
d1
d1.get("key1")
d1 = {"key1":"ineuron","key":"FSDS"}
d2 = {"key2":456,"key3":[1,2,3,4,5]}
d1.update(d2)
d1
d2
d1+d2
t1 = ("faiz",1,1+5j,True)
t1.index(True)
set(t1)
d1
key = ("name","mobile_no","email_id")
value = "sudh"
d = d1.fromkeys(key,value)
d
#3:20/5
```
|
github_jupyter
|
# Deploy a Trained MXNet Model
In this notebook, we walk through the process of deploying a trained model to a SageMaker endpoint. If you recently ran [the notebook for training](get_started_mnist_deploy.ipynb) with %store% magic, the `model_data` can be restored. Otherwise, we retrieve the
model artifact from a public S3 bucket.
```
# setups
import os
import json
import boto3
import sagemaker
from sagemaker.mxnet import MXNetModel
from sagemaker import get_execution_role, Session
sess = Session()
role = get_execution_role()
%store -r mx_mnist_model_data
try:
mx_mnist_model_data
except NameError:
import json
# copy a pretrained model from a public public to your default bucket
with open("code/config.json", "r") as f:
CONFIG = json.load(f)
bucket = CONFIG["public_bucket"]
s3 = boto3.client("s3")
key = "datasets/image/MNIST/model/mxnet-training-2020-11-21-01-38-01-009/model.tar.gz"
target = os.path.join("/tmp", "model.tar.gz")
s3.download_file(bucket, key, target)
# upload to default bucket
mx_mnist_model_data = sess.upload_data(
path=os.path.join("/tmp", "model.tar.gz"),
bucket=sess.default_bucket(),
key_prefix="model/mxnet",
)
print(mx_mnist_model_data)
```
## MXNet Model Object
The `MXNetModel` class allows you to define an environment for making inference using your
model artifact. Like `MXNet` class we discussed [in this notebook for training an MXNet model](get_started_mnist_train.ipynb), it is high level API used to set up a docker image for your model hosting service.
Once it is properly configured, it can be used to create a SageMaker
Endpoint on an EC2 instance. The SageMaker endpoint is a containerized environment that uses your trained model
to make inference on incoming data via RESTful API calls.
Some common parameters used to initiate the `MXNetModel` class are:
- entry_point: A user defined python file to be used by the inference container as handlers of incoming requests
- source_dir: The directory of the `entry_point`
- role: An IAM role to make AWS service requests
- model_data: the S3 bucket URI of the compressed model artifact. It can be a path to a local file if the endpoint is to be deployed on the SageMaker instance you are using to run this notebook (local mode)
- framework_version: version of the MXNet package to be used
- py_version: python version to be used
We elaborate on the `entry_point` below.
```
model = MXNetModel(
entry_point="inference.py",
source_dir="code",
role=role,
model_data=mx_mnist_model_data,
framework_version="1.7.0",
py_version="py3",
)
```
### Entry Point for the Inference Image
Your model artifacts pointed by `model_data` is pulled by the `MXNetModel` and it is decompressed and saved in
in the docker image it defines. They become regular model checkpoint files that you would produce outside SageMaker. This means in order to use your trained model for serving,
you need to tell `MXNetModel` class how to a recover a MXNet model from the static checkpoint.
Also, the deployed endpoint interacts with RESTful API calls, you need to tell it how to parse an incoming
request to your model.
These two instructions needs to be defined as two functions in the python file pointed by `entry_point`.
By convention, we name this entry point file `inference.py` and we put it in the `code` directory.
To tell the inference image how to load the model checkpoint, you need to implement a function called
`model_fn`. This function takes one positional argument
- `model_dir`: the directory of the static model checkpoints in the inference image.
The return of `model_fn` is an MXNet model. In this example, the `model_fn`
looks like:
```python
def model_fn(model_dir):
"""Load the gluon model. Called once when hosting service starts.
:param: model_dir The directory where model files are stored.
:return: a model (in this case a Gluon network)
"""
net = gluon.SymbolBlock.imports(
symbol_file=os.path.join(model_dir, 'compiled-symbol.json'),
input_names=['data'],
param_file=os.path.join(model_dir, 'compiled-0000.params'))
return net
```
Next, you need to tell the hosting service how to handle the incoming data. This includes:
* How to parse the incoming request
* How to use the trained model to make inference
* How to return the prediction to the caller of the service
You do it by implementing a function
called `transform_fn`. This function takes 4 positional arguments:
- `net`: the return from `model_fn`
- `data`: the payload of the incoming request
- `content_type`: the content type of the incoming request
- `accept_type`: the conetent type of the response
In this example, the `transform_fn` looks like:
```python
def transform_fn(net, data, input_content_type, output_content_type):
assert input_content_type=='application/json'
assert output_content_type=='application/json'
# parsed should be a 1d array of length 728
parsed = json.loads(data)
parsed = parsed['inputs']
# convert to numpy array
arr = np.array(parsed).reshape(-1, 1, 28, 28)
# convert to mxnet ndarray
nda = mx.nd.array(arr)
output = net(nda)
prediction = mx.nd.argmax(output, axis=1)
response_body = json.dumps(prediction.asnumpy().tolist())
return response_body, output_content_type
```
The `content_type` is used by the function to parse the `data`.
In the following example, the functions requires the
content type of the payload to be a json string and it
parses the json string into a python dictionary by `json.loads`.
Moreover, it assumes the parsed dictionary contains a key `inputs`
that maps to the input data to be consumed by the model.
It also assumes the input data is a flattened 1D array representation
that can be reshaped into a numpy array of shape (-1, 1, 28, 28).
The input images of a MXNet model follows NCHW convention.
It also assumes the input data is already normalized and can be readily
consumed by the neural network.
After the inference, the function uses `accept_type` to encode the
prediction into the content type of the response. In this example,
the function requires the caller of the service to accept json string.
The return of `transform_fn` is always a tuple of encoded response body
and the content type to be accepted by the caller.
## Execute the inference container
Once the `MXNetModel` class is initiated, we can call its `deploy` method to run the container for the hosting
service. Some common parameters needed to call `deploy` methods are:
- initial_instance_count: the number of SageMaker instances to be used to run the hosting service.
- instance_type: the type of SageMaker instance to run the hosting service. Set it to `local` if you want run the hosting service on the local SageMaker instance. Local mode are typically used for debugging.
- serializer: A python callable used to serialize (encode) the request data.
- deserializer: A python callable used to deserialize (decode) the response data.
Commonly used serializers and deserialzers are implemented in `sagemaker.serializers` and `sagemaker.deserializer`
submodules of the SageMaker Python SDK.
Since in the `transform_fn` we declared that the incoming requests are json-encoded, we need use a json serializer,
to encode the incoming data into a json string. Also, we declared the return content type to be json string, we
need to use a json deserializer to parse the response into a an (in this case, an
integer represeting the predicted hand-written digit).
<span style="color:red"> Note: local mode is not supported in SageMaker Studio </span>
```
from sagemaker.serializers import JSONSerializer
from sagemaker.deserializers import JSONDeserializer
# set local_mode to False if you want to deploy on a remote
# SageMaker instance
local_mode = False
if local_mode:
instance_type = "local"
else:
instance_type = "ml.c4.xlarge"
predictor = model.deploy(
initial_instance_count=1,
instance_type=instance_type,
serializer=JSONSerializer(),
deserializer=JSONDeserializer(),
)
```
The `predictor` we get above can be used to make prediction requests agaist a SageMaker endpoint. For more
information, check [the api reference for SageMaker Predictor](
https://sagemaker.readthedocs.io/en/stable/api/inference/predictors.html#sagemaker.predictor.Predictor)
Now, let's test the endpoint with some dummy data.
```
import random
dummy_data = {"inputs": [random.random() for _ in range(784)]}
```
In `transform_fn`, we declared that the parsed data is a python dictionary with a key `inputs` and its value should
be a 1D array of length 784. Hence, the definition of `dummy_data`.
```
res = predictor.predict(dummy_data)
print("Predicted digit:", *map(int, res))
```
If the input data does not look exactly like `dummy_data`, the endpoint will raise an exception. This is because
of the stringent way we defined the `transform_fn`. Let's test the following example.
```
dummy_data = [random.random() for _ in range(784)]
```
When the `dummy_data` is parsed in `transform_fn`, it does not have an `inputs` field, so `transform_fn` will crush.
```
# uncomment the following line to make inference on incorrectly formated input data
# res = predictor.predict(dummy_data)
```
Now, let's use real MNIST test to test the endpoint. We use helper functions defined in `code.utils` to
download MNIST data set and normalize the input data.
```
import random
import boto3
import matplotlib.pyplot as plt
import os
import numpy as np
import gzip
import json
%matplotlib inline
# Donwload MNIST test set from a public bucket
with open("code/config.json", "rb") as f:
CONFIG = json.load(f)
fname = "t10k-images-idx3-ubyte.gz"
bucket = CONFIG["public_bucket"]
key = "datasets/image/MNIST/" + fname
target = os.path.join("/tmp", fname)
s3 = boto3.client("s3")
if not os.path.exists(target):
s3.download_file(bucket, key, target)
# parse to numpy
with gzip.open(target, "rb") as f:
images = np.frombuffer(f.read(), np.uint8, offset=16).reshape(-1, 28, 28)
# randomly sample 16 images to inspect
mask = random.sample(range(images.shape[0]), 16)
samples = images[mask]
# plot the images
fig, axs = plt.subplots(nrows=1, ncols=16, figsize=(16, 1))
for i, splt in enumerate(axs):
splt.imshow(samples[i])
```
First, let us use the model to infer the samples one-by-one. This is the typical use case
for an online application.
```
# convert to float and normalize normalize the input
def normalize(x, axis):
eps = np.finfo(float).eps
mean = np.mean(x, axis=axis, keepdims=True)
# avoid division by zero
std = np.std(x, axis=axis, keepdims=True) + eps
return (x - mean) / std
samples = normalize(samples.astype(np.float32), axis=(1, 2)) # mean 0; std 1
res = []
for img in samples:
data = {"inputs": img.flatten().tolist()}
res.append(predictor.predict(data)[0])
print("Predictions: ", *map(int, res))
```
Since in `transform_fn`, the parsed numpy array could have take on any value for its batch
dimension, we can send the entire `samples` at once and let the model do a batch inference.
```
data = {"inputs": samples.tolist()}
res = predictor.predict(data)
print("Predictions: ", *map(int, res))
```
## Test and debug the entry point before deployment
When deploying a model to a SageMaker endpoint, it is a good practice to test the entry
point. The following snippet shows you how you can test and debug the `model_fn` and
`transform_fn` you implemented in the entry point for the inference image.
```
!pygmentize code/test_inference.py
```
The `test` function simulates how the inference container works. It pulls the model
artifact and loads the model into memory by calling `model_fn` and parse it with `model_dir`. When it receives a request, it calls `transform_fn` and parse it with the loaded model, the payload of the request, request content type and response content type.
Implementing such a test function helps you debugging the entry point before put it into the production. If `test` runs correctly, then you can be certain that if the incoming
data and its content type are what they suppose to be, then the endpoint point is going
to work as expected.
## (Optional) Clean up
If you do not plan to use the endpoint, you should delete it to free up some computation
resource. If you use local, you will need to manually delete the docker container bounded
at port 8080 (the port that listens to the incoming request).
```
import os
if not local_mode:
predictor.delete_endpoint()
else:
# detach the inference container from port 8080 (in local mode)
os.system("docker container ls | grep 8080 | awk '{print $1}' | xargs docker container rm -f")
```
|
github_jupyter
|
```
%matplotlib inline
```
PyTorch 1.0 Distributed Trainer with Amazon AWS
===============================================
**Author**: `Nathan Inkawhich <https://github.com/inkawhich>`_
**Edited by**: `Teng Li <https://github.com/teng-li>`_
In this tutorial we will show how to setup, code, and run a PyTorch 1.0
distributed trainer across two multi-gpu Amazon AWS nodes. We will start
with describing the AWS setup, then the PyTorch environment
configuration, and finally the code for the distributed trainer.
Hopefully you will find that there is actually very little code change
required to extend your current training code to a distributed
application, and most of the work is in the one-time environment setup.
Amazon AWS Setup
----------------
In this tutorial we will run distributed training across two multi-gpu
nodes. In this section we will first cover how to create the nodes, then
how to setup the security group so the nodes can communicate with
eachother.
Creating the Nodes
~~~~~~~~~~~~~~~~~~
In Amazon AWS, there are seven steps to creating an instance. To get
started, login and select **Launch Instance**.
**Step 1: Choose an Amazon Machine Image (AMI)** - Here we will select
the ``Deep Learning AMI (Ubuntu) Version 14.0``. As described, this
instance comes with many of the most popular deep learning frameworks
installed and is preconfigured with CUDA, cuDNN, and NCCL. It is a very
good starting point for this tutorial.
**Step 2: Choose an Instance Type** - Now, select the GPU compute unit
called ``p2.8xlarge``. Notice, each of these instances has a different
cost but this instance provides 8 NVIDIA Tesla K80 GPUs per node, and
provides a good architecture for multi-gpu distributed training.
**Step 3: Configure Instance Details** - The only setting to change here
is increasing the *Number of instances* to 2. All other configurations
may be left at default.
**Step 4: Add Storage** - Notice, by default these nodes do not come
with a lot of storage (only 75 GB). For this tutorial, since we are only
using the STL-10 dataset, this is plenty of storage. But, if you want to
train on a larger dataset such as ImageNet, you will have to add much
more storage just to fit the dataset and any trained models you wish to
save.
**Step 5: Add Tags** - Nothing to be done here, just move on.
**Step 6: Configure Security Group** - This is a critical step in the
configuration process. By default two nodes in the same security group
would not be able to communicate in the distributed training setting.
Here, we want to create a **new** security group for the two nodes to be
in. However, we cannot finish configuring in this step. For now, just
remember your new security group name (e.g. launch-wizard-12) then move
on to Step 7.
**Step 7: Review Instance Launch** - Here, review the instance then
launch it. By default, this will automatically start initializing the
two instances. You can monitor the initialization progress from the
dashboard.
Configure Security Group
~~~~~~~~~~~~~~~~~~~~~~~~
Recall that we were not able to properly configure the security group
when creating the instances. Once you have launched the instance, select
the *Network & Security > Security Groups* tab in the EC2 dashboard.
This will bring up a list of security groups you have access to. Select
the new security group you created in Step 6 (i.e. launch-wizard-12),
which will bring up tabs called *Description, Inbound, Outbound, and
Tags*. First, select the *Inbound* tab and *Edit* to add a rule to allow
"All Traffic" from "Sources" in the launch-wizard-12 security group.
Then select the *Outbound* tab and do the exact same thing. Now, we have
effectively allowed all Inbound and Outbound traffic of all types
between nodes in the launch-wizard-12 security group.
Necessary Information
~~~~~~~~~~~~~~~~~~~~~
Before continuing, we must find and remember the IP addresses of both
nodes. In the EC2 dashboard find your running instances. For both
instances, write down the *IPv4 Public IP* and the *Private IPs*. For
the remainder of the document, we will refer to these as the
**node0-publicIP**, **node0-privateIP**, **node1-publicIP**, and
**node1-privateIP**. The public IPs are the addresses we will use to SSH
in, and the private IPs will be used for inter-node communication.
Environment Setup
-----------------
The next critical step is the setup of each node. Unfortunately, we
cannot configure both nodes at the same time, so this process must be
done on each node separately. However, this is a one time setup, so once
you have the nodes configured properly you will not have to reconfigure
for future distributed training projects.
The first step, once logged onto the node, is to create a new conda
environment with python 3.6 and numpy. Once created activate the
environment.
::
$ conda create -n nightly_pt python=3.6 numpy
$ source activate nightly_pt
Next, we will install a nightly build of Cuda 9.0 enabled PyTorch with
pip in the conda environment.
::
$ pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cu90/torch_nightly.html
We must also install torchvision so we can use the torchvision model and
dataset. At this time, we must build torchvision from source as the pip
installation will by default install an old version of PyTorch on top of
the nightly build we just installed.
::
$ cd
$ git clone https://github.com/pytorch/vision.git
$ cd vision
$ python setup.py install
And finally, **VERY IMPORTANT** step is to set the network interface
name for the NCCL socket. This is set with the environment variable
``NCCL_SOCKET_IFNAME``. To get the correct name, run the ``ifconfig``
command on the node and look at the interface name that corresponds to
the node's *privateIP* (e.g. ens3). Then set the environment variable as
::
$ export NCCL_SOCKET_IFNAME=ens3
Remember, do this on both nodes. You may also consider adding the
NCCL\_SOCKET\_IFNAME setting to your *.bashrc*. An important observation
is that we did not setup a shared filesystem between the nodes.
Therefore, each node will have to have a copy of the code and a copy of
the datasets. For more information about setting up a shared network
filesystem between nodes, see
`here <https://aws.amazon.com/blogs/aws/amazon-elastic-file-system-shared-file-storage-for-amazon-ec2/>`__.
Distributed Training Code
-------------------------
With the instances running and the environments setup we can now get
into the training code. Most of the code here has been taken from the
`PyTorch ImageNet
Example <https://github.com/pytorch/examples/tree/master/imagenet>`__
which also supports distributed training. This code provides a good
starting point for a custom trainer as it has much of the boilerplate
training loop, validation loop, and accuracy tracking functionality.
However, you will notice that the argument parsing and other
non-essential functions have been stripped out for simplicity.
In this example we will use
`torchvision.models.resnet18 <https://pytorch.org/docs/stable/torchvision/models.html#torchvision.models.resnet18>`__
model and will train it on the
`torchvision.datasets.STL10 <https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.STL10>`__
dataset. To accomodate for the dimensionality mismatch of STL-10 with
Resnet18, we will resize each image to 224x224 with a transform. Notice,
the choice of model and dataset are orthogonal to the distributed
training code, you may use any dataset and model you wish and the
process is the same. Lets get started by first handling the imports and
talking about some helper functions. Then we will define the train and
test functions, which have been largely taken from the ImageNet Example.
At the end, we will build the main part of the code which handles the
distributed training setup. And finally, we will discuss how to actually
run the code.
Imports
~~~~~~~
The important distributed training specific imports here are
`torch.nn.parallel <https://pytorch.org/docs/stable/nn.html#torch.nn.parallel.DistributedDataParallel>`__,
`torch.distributed <https://pytorch.org/docs/stable/distributed.html>`__,
`torch.utils.data.distributed <https://pytorch.org/docs/stable/data.html#torch.utils.data.distributed.DistributedSampler>`__,
and
`torch.multiprocessing <https://pytorch.org/docs/stable/multiprocessing.html>`__.
It is also important to set the multiprocessing start method to *spawn*
or *forkserver* (only supported in Python 3),
as the default is *fork* which may cause deadlocks when using multiple
worker processes for dataloading.
```
import time
import sys
import torch
if __name__ == '__main__':
torch.multiprocessing.set_start_method('spawn')
import torch.nn as nn
import torch.nn.parallel
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from torch.multiprocessing import Pool, Process
```
Helper Functions
~~~~~~~~~~~~~~~~
We must also define some helper functions and classes that will make
training easier. The ``AverageMeter`` class tracks training statistics
like accuracy and iteration count. The ``accuracy`` function computes
and returns the top-k accuracy of the model so we can track learning
progress. Both are provided for training convenience but neither are
distributed training specific.
```
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
```
Train Functions
~~~~~~~~~~~~~~~
To simplify the main loop, it is best to separate a training epoch step
into a function called ``train``. This function trains the input model
for one epoch of the *train\_loader*. The only distributed training
artifact in this function is setting the
`non\_blocking <https://pytorch.org/docs/stable/notes/cuda.html#use-pinned-memory-buffers>`__
attributes of the data and label tensors to ``True`` before the forward
pass. This allows asynchronous GPU copies of the data meaning transfers
can be overlapped with computation. This function also outputs training
statistics along the way so we can track progress throughout the epoch.
The other function to define here is ``adjust_learning_rate``, which
decays the initial learning rate at a fixed schedule. This is another
boilerplate trainer function that is useful to train accurate models.
```
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# Create non_blocking tensors for distributed training
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradients in a backward pass
optimizer.zero_grad()
loss.backward()
# Call step of optimizer to update model params
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def adjust_learning_rate(initial_lr, optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = initial_lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
```
Validation Function
~~~~~~~~~~~~~~~~~~~
To track generalization performance and simplify the main loop further
we can also extract the validation step into a function called
``validate``. This function runs a full validation step of the input
model on the input validation dataloader and returns the top-1 accuracy
of the model on the validation set. Again, you will notice the only
distributed training feature here is setting ``non_blocking=True`` for
the training data and labels before they are passed to the model.
```
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
```
Inputs
~~~~~~
With the helper functions out of the way, now we have reached the
interesting part. Here is where we will define the inputs for the run.
Some of the inputs are standard model training inputs such as batch size
and number of training epochs, and some are specific to our distributed
training task. The required inputs are:
- **batch\_size** - batch size for *each* process in the distributed
training group. Total batch size across distributed model is
batch\_size\*world\_size
- **workers** - number of worker processes used with the dataloaders in
each process
- **num\_epochs** - total number of epochs to train for
- **starting\_lr** - starting learning rate for training
- **world\_size** - number of processes in the distributed training
environment
- **dist\_backend** - backend to use for distributed training
communication (i.e. NCCL, Gloo, MPI, etc.). In this tutorial, since
we are using several multi-gpu nodes, NCCL is suggested.
- **dist\_url** - URL to specify the initialization method of the
process group. This may contain the IP address and port of the rank0
process or be a non-existant file on a shared file system. Here,
since we do not have a shared file system this will incorporate the
**node0-privateIP** and the port on node0 to use.
```
print("Collect Inputs...")
# Batch Size for training and testing
batch_size = 32
# Number of additional worker processes for dataloading
workers = 2
# Number of epochs to train for
num_epochs = 2
# Starting Learning Rate
starting_lr = 0.1
# Number of distributed processes
world_size = 4
# Distributed backend type
dist_backend = 'nccl'
# Url used to setup distributed training
dist_url = "tcp://172.31.22.234:23456"
```
Initialize process group
~~~~~~~~~~~~~~~~~~~~~~~~
One of the most important parts of distributed training in PyTorch is to
properly setup the process group, which is the **first** step in
initializing the ``torch.distributed`` package. To do this, we will use
the ``torch.distributed.init_process_group`` function which takes
several inputs. First, a *backend* input which specifies the backend to
use (i.e. NCCL, Gloo, MPI, etc.). An *init\_method* input which is
either a url containing the address and port of the rank0 machine or a
path to a non-existant file on the shared file system. Note, to use the
file init\_method, all machines must have access to the file, similarly
for the url method, all machines must be able to communicate on the
network so make sure to configure any firewalls and network settings to
accomodate. The *init\_process\_group* function also takes *rank* and
*world\_size* arguments which specify the rank of this process when run
and the number of processes in the collective, respectively.
The *init\_method* input can also be "env://". In this case, the address
and port of the rank0 machine will be read from the following two
environment variables respectively: MASTER_ADDR, MASTER_PORT. If *rank*
and *world\_size* arguments are not specified in the *init\_process\_group*
function, they both can be read from the following two environment
variables respectively as well: RANK, WORLD_SIZE.
Another important step, especially when each node has multiple gpus is
to set the *local\_rank* of this process. For example, if you have two
nodes, each with 8 GPUs and you wish to train with all of them then
$world\_size=16$ and each node will have a process with local rank
0-7. This local\_rank is used to set the device (i.e. which GPU to use)
for the process and later used to set the device when creating a
distributed data parallel model. It is also recommended to use NCCL
backend in this hypothetical environment as NCCL is preferred for
multi-gpu nodes.
```
print("Initialize Process Group...")
# Initialize Process Group
# v1 - init with url
dist.init_process_group(backend=dist_backend, init_method=dist_url, rank=int(sys.argv[1]), world_size=world_size)
# v2 - init with file
# dist.init_process_group(backend="nccl", init_method="file:///home/ubuntu/pt-distributed-tutorial/trainfile", rank=int(sys.argv[1]), world_size=world_size)
# v3 - init with environment variables
# dist.init_process_group(backend="nccl", init_method="env://", rank=int(sys.argv[1]), world_size=world_size)
# Establish Local Rank and set device on this node
local_rank = int(sys.argv[2])
dp_device_ids = [local_rank]
torch.cuda.set_device(local_rank)
```
Initialize Model
~~~~~~~~~~~~~~~~
The next major step is to initialize the model to be trained. Here, we
will use a resnet18 model from ``torchvision.models`` but any model may
be used. First, we initialize the model and place it in GPU memory.
Next, we make the model ``DistributedDataParallel``, which handles the
distribution of the data to and from the model and is critical for
distributed training. The ``DistributedDataParallel`` module also
handles the averaging of gradients across the world, so we do not have
to explicitly average the gradients in the training step.
It is important to note that this is a blocking function, meaning
program execution will wait at this function until *world\_size*
processes have joined the process group. Also, notice we pass our device
ids list as a parameter which contains the local rank (i.e. GPU) we are
using. Finally, we specify the loss function and optimizer to train the
model with.
```
print("Initialize Model...")
# Construct Model
model = models.resnet18(pretrained=False).cuda()
# Make model DistributedDataParallel
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=dp_device_ids, output_device=local_rank)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), starting_lr, momentum=0.9, weight_decay=1e-4)
```
Initialize Dataloaders
~~~~~~~~~~~~~~~~~~~~~~
The last step in preparation for the training is to specify which
dataset to use. Here we use the `STL-10
dataset <https://cs.stanford.edu/~acoates/stl10/>`__ from
`torchvision.datasets.STL10 <https://pytorch.org/docs/stable/torchvision/datasets.html#torchvision.datasets.STL10>`__.
The STL10 dataset is a 10 class dataset of 96x96px color images. For use
with our model, we resize the images to 224x224px in the transform. One
distributed training specific item in this section is the use of the
``DistributedSampler`` for the training set, which is designed to be
used in conjunction with ``DistributedDataParallel`` models. This object
handles the partitioning of the dataset across the distributed
environment so that not all models are training on the same subset of
data, which would be counterproductive. Finally, we create the
``DataLoader``'s which are responsible for feeding the data to the
processes.
The STL-10 dataset will automatically download on the nodes if they are
not present. If you wish to use your own dataset you should download the
data, write your own dataset handler, and construct a dataloader for
your dataset here.
```
print("Initialize Dataloaders...")
# Define the transform for the data. Notice, we must resize to 224x224 with this dataset and model.
transform = transforms.Compose(
[transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Initialize Datasets. STL10 will automatically download if not present
trainset = datasets.STL10(root='./data', split='train', download=True, transform=transform)
valset = datasets.STL10(root='./data', split='test', download=True, transform=transform)
# Create DistributedSampler to handle distributing the dataset across nodes when training
# This can only be called after torch.distributed.init_process_group is called
train_sampler = torch.utils.data.distributed.DistributedSampler(trainset)
# Create the Dataloaders to feed data to the training and validation steps
train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=(train_sampler is None), num_workers=workers, pin_memory=False, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(valset, batch_size=batch_size, shuffle=False, num_workers=workers, pin_memory=False)
```
Training Loop
~~~~~~~~~~~~~
The last step is to define the training loop. We have already done most
of the work for setting up the distributed training so this is not
distributed training specific. The only detail is setting the current
epoch count in the ``DistributedSampler``, as the sampler shuffles the
data going to each process deterministically based on epoch. After
updating the sampler, the loop runs a full training epoch, runs a full
validation step then prints the performance of the current model against
the best performing model so far. After training for num\_epochs, the
loop exits and the tutorial is complete. Notice, since this is an
exercise we are not saving models but one may wish to keep track of the
best performing model then save it at the end of training (see
`here <https://github.com/pytorch/examples/blob/master/imagenet/main.py#L184>`__).
```
best_prec1 = 0
for epoch in range(num_epochs):
# Set epoch count for DistributedSampler
train_sampler.set_epoch(epoch)
# Adjust learning rate according to schedule
adjust_learning_rate(starting_lr, optimizer, epoch)
# train for one epoch
print("\nBegin Training Epoch {}".format(epoch+1))
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
print("Begin Validation @ Epoch {}".format(epoch+1))
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint if desired
# is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
print("Epoch Summary: ")
print("\tEpoch Accuracy: {}".format(prec1))
print("\tBest Accuracy: {}".format(best_prec1))
```
Running the Code
----------------
Unlike most of the other PyTorch tutorials, this code may not be run
directly out of this notebook. To run, download the .py version of this
file (or convert it using
`this <https://gist.github.com/chsasank/7218ca16f8d022e02a9c0deb94a310fe>`__)
and upload a copy to both nodes. The astute reader would have noticed
that we hardcoded the **node0-privateIP** and $world\_size=4$ but
input the *rank* and *local\_rank* inputs as arg[1] and arg[2] command
line arguments, respectively. Once uploaded, open two ssh terminals into
each node.
- On the first terminal for node0, run ``$ python main.py 0 0``
- On the second terminal for node0 run ``$ python main.py 1 1``
- On the first terminal for node1, run ``$ python main.py 2 0``
- On the second terminal for node1 run ``$ python main.py 3 1``
The programs will start and wait after printing "Initialize Model..."
for all four processes to join the process group. Notice the first
argument is not repeated as this is the unique global rank of the
process. The second argument is repeated as that is the local rank of
the process running on the node. If you run ``nvidia-smi`` on each node,
you will see two processes on each node, one running on GPU0 and one on
GPU1.
We have now completed the distributed training example! Hopefully you
can see how you would use this tutorial to help train your own models on
your own datasets, even if you are not using the exact same distributed
envrionment. If you are using AWS, don't forget to **SHUT DOWN YOUR
NODES** if you are not using them or you may find an uncomfortably large
bill at the end of the month.
**Where to go next**
- Check out the `launcher
utility <https://pytorch.org/docs/stable/distributed.html#launch-utility>`__
for a different way of kicking off the run
- Check out the `torch.multiprocessing.spawn
utility <https://pytorch.org/docs/master/multiprocessing.html#spawning-subprocesses>`__
for another easy way of kicking off multiple distributed processes.
`PyTorch ImageNet Example <https://github.com/pytorch/examples/tree/master/imagenet>`__
has it implemented and can demonstrate how to use it.
- If possible, setup a NFS so you only need one copy of the dataset
|
github_jupyter
|
```
# Copyright 2021 Google LLC
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
# Notebook authors: Kevin P. Murphy ([email protected])
# and Mahmoud Soliman ([email protected])
# This notebook reproduces figures for chapter 1 from the book
# "Probabilistic Machine Learning: An Introduction"
# by Kevin Murphy (MIT Press, 2021).
# Book pdf is available from http://probml.ai
```
<a href="https://opensource.org/licenses/MIT" target="_parent"><img src="https://img.shields.io/github/license/probml/pyprobml"/></a>
<a href="https://colab.research.google.com/github/probml/pml-book/blob/main/pml1/figure_notebooks/chapter1_introduction_figures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Figure 1.1:<a name='1.1'></a> <a name='iris'></a>
Three types of Iris flowers: Setosa, Versicolor and Virginica. Used with kind permission of Dennis Kramb and SIGNA
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.1_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.1_B.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.1_C.png" width="256"/>
## Figure 1.2:<a name='1.2'></a> <a name='cat'></a>
Illustration of the image classification problem. From https://cs231n.github.io/ . Used with kind permission of Andrej Karpathy
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.2.png" width="256"/>
## Figure 1.3:<a name='1.3'></a> <a name='irisPairs'></a>
Visualization of the Iris data as a pairwise scatter plot. On the diagonal we plot the marginal distribution of each feature for each class. The off-diagonals contain scatterplots of all possible pairs of features.
Figure(s) generated by [iris_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_plot.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n iris_plot.py
```
## Figure 1.4:<a name='1.4'></a> <a name='dtreeIrisDepth2'></a>
Example of a decision tree of depth 2 applied to the Iris data, using just the petal length and petal width features. Leaf nodes are color coded according to the predicted class. The number of training samples that pass from the root to a node is shown inside each box; we show how many values of each class fall into this node. This vector of counts can be normalized to get a distribution over class labels for each node. We can then pick the majority class. Adapted from Figures 6.1 and 6.2 of <a href='#Geron2019'>[Aur19]</a> .
To reproduce this figure, click the open in colab button: <a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/master/notebooks/iris_dtree.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.4_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.4_B.png" width="256"/>
## Figure 1.5:<a name='1.5'></a> <a name='linreg'></a>
(a) Linear regression on some 1d data. (b) The vertical lines denote the residuals between the observed output value for each input (blue circle) and its predicted value (red cross). The goal of least squares regression is to pick a line that minimizes the sum of squared residuals.
Figure(s) generated by [linreg_residuals_plot.py](https://github.com/probml/pyprobml/blob/master/scripts/linreg_residuals_plot.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n linreg_residuals_plot.py
```
## Figure 1.6:<a name='1.6'></a> <a name='polyfit2d'></a>
Linear and polynomial regression applied to 2d data. Vertical axis is temperature, horizontal axes are location within a room. Data was collected by some remote sensing motes at Intel's lab in Berkeley, CA (data courtesy of Romain Thibaux). (a) The fitted plane has the form $ f ( \bm x ) = w_0 + w_1 x_1 + w_2 x_2$. (b) Temperature data is fitted with a quadratic of the form $ f ( \bm x ) = w_0 + w_1 x_1 + w_2 x_2 + w_3 x_1^2 + w_4 x_2^2$.
Figure(s) generated by [linreg_2d_surface_demo.py](https://github.com/probml/pyprobml/blob/master/scripts/linreg_2d_surface_demo.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n linreg_2d_surface_demo.py
```
## Figure 1.7:<a name='1.7'></a> <a name='linregPoly'></a>
(a-c) Polynomials of degrees 2, 14 and 20 fit to 21 datapoints (the same data as in \cref fig:linreg ). (d) MSE vs degree.
Figure(s) generated by [linreg_poly_vs_degree.py](https://github.com/probml/pyprobml/blob/master/scripts/linreg_poly_vs_degree.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n linreg_poly_vs_degree.py
```
## Figure 1.8:<a name='1.8'></a> <a name='eqn:irisClustering'></a>
(a) A scatterplot of the petal features from the iris dataset. (b) The result of unsupervised clustering using $K=3$.
Figure(s) generated by [iris_kmeans.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_kmeans.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n iris_kmeans.py
```
## Figure 1.9:<a name='1.9'></a> <a name='pcaDemo'></a>
(a) Scatterplot of iris data (first 3 features). Points are color coded by class. (b) We fit a 2d linear subspace to the 3d data using PCA. The class labels are ignored. Red dots are the original data, black dots are points generated from the model using $ \bm x = \mathbf W \bm z + \bm \mu $, where $ \bm z $ are latent points on the underlying inferred 2d linear manifold.
Figure(s) generated by [iris_pca.py](https://github.com/probml/pyprobml/blob/master/scripts/iris_pca.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n iris_pca.py
```
## Figure 1.10:<a name='1.10'></a> <a name='humanoid'></a>
Examples of some control problems. (a) Space Invaders Atari game. From https://gym.openai.com/envs/SpaceInvaders-v0/ . (b) Controlling a humanoid robot in the MuJuCo simulator so it walks as fast as possible without falling over. From https://gym.openai.com/envs/Humanoid-v2/
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.10_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.10_B.png" width="256"/>
## Figure 1.11:<a name='1.11'></a> <a name='cake'></a>
The three types of machine learning visualized as layers of a chocolate cake. This figure (originally from https://bit.ly/2m65Vs1 ) was used in a talk by Yann LeCun at NIPS'16, and is used with his kind permission
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.11.png" width="256"/>
## Figure 1.12:<a name='1.12'></a> <a name='emnist'></a>
(a) Visualization of the MNIST dataset. Each image is $28 \times 28$. There are 60k training examples and 10k test examples. We show the first 25 images from the training set.
Figure(s) generated by [mnist_viz_tf.py](https://github.com/probml/pyprobml/blob/master/scripts/mnist_viz_tf.py) [emnist_viz_pytorch.py](https://github.com/probml/pyprobml/blob/master/scripts/emnist_viz_pytorch.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n mnist_viz_tf.py
try_deimport()
%run -n emnist_viz_pytorch.py
```
## Figure 1.13:<a name='1.13'></a> <a name='CIFAR'></a>
(a) Visualization of the Fashion-MNIST dataset <a href='#fashion'>[XRV17]</a> . The dataset has the same size as MNIST, but is harder to classify. There are 10 classes: T-shirt/top, Trouser, Pullover, Dress, Coat, Sandal, Shirt, Sneaker, Bag, Ankle-boot. We show the first 25 images from the training set.
Figure(s) generated by [fashion_viz_tf.py](https://github.com/probml/pyprobml/blob/master/scripts/fashion_viz_tf.py) [cifar_viz_tf.py](https://github.com/probml/pyprobml/blob/master/scripts/cifar_viz_tf.py)
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
try_deimport()
%run -n fashion_viz_tf.py
try_deimport()
%run -n cifar_viz_tf.py
```
## Figure 1.14:<a name='1.14'></a> <a name='imagenetError'></a>
(a) Sample images from the \bf ImageNet dataset <a href='#ILSVRC15'>[Rus+15]</a> . This subset consists of 1.3M color training images, each of which is $256 \times 256$ pixels in size. There are 1000 possible labels, one per image, and the task is to minimize the top-5 error rate, i.e., to ensure the correct label is within the 5 most probable predictions. Below each image we show the true label, and a distribution over the top 5 predicted labels. If the true label is in the top 5, its probability bar is colored red. Predictions are generated by a convolutional neural network (CNN) called ``AlexNet'' (\cref sec:alexNet ). From Figure 4 of <a href='#Krizhevsky12'>[KSH12]</a> . Used with kind permission of Alex Krizhevsky. (b) Misclassification rate (top 5) on the ImageNet competition over time. Used with kind permission of Andrej Karpathy
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.14_A.png" width="256"/>
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.14_B.png" width="256"/>
## Figure 1.15:<a name='1.15'></a> <a name='termDoc'></a>
Example of a term-document matrix, where raw counts have been replaced by their TF-IDF values (see \cref sec:tfidf ). Darker cells are larger values. From https://bit.ly/2kByLQI . Used with kind permission of Christoph Carl Kling
```
#@title Click me to run setup { display-mode: "form" }
try:
if PYPROBML_SETUP_ALREADY_RUN:
print('skipping setup')
except:
PYPROBML_SETUP_ALREADY_RUN = True
print('running setup...')
!git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null
%cd -q /pyprobml/scripts
%reload_ext autoreload
%autoreload 2
!pip install superimport deimport -qqq
import superimport
def try_deimport():
try:
from deimport.deimport import deimport
deimport(superimport)
except Exception as e:
print(e)
print('finished!')
```
<img src="https://raw.githubusercontent.com/probml/pml-book/main/pml1/figures/images/Figure_1.15.png" width="256"/>
## References:
<a name='Geron2019'>[Aur19]</a> G. Aur'elien "Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques for BuildingIntelligent Systems (2nd edition)". (2019).
<a name='Krizhevsky12'>[KSH12]</a> A. Krizhevsky, I. Sutskever and G. Hinton. "Imagenet classification with deep convolutional neural networks". (2012).
<a name='ILSVRC15'>[Rus+15]</a> O. Russakovsky, J. Deng, H. Su, J. Krause, S. Satheesh, S. Ma, Z. Huang, A. Karpathy, A. Khosla, M. Bernstein, A. Berg and L. Fei-Fei. "ImageNet Large Scale Visual Recognition Challenge". In: ijcv (2015).
<a name='fashion'>[XRV17]</a> H. Xiao, K. Rasul and R. Vollgraf. "Fashion-MNIST: a Novel Image Dataset for Benchmarking Machine Learning Algorithms". abs/1708.07747 (2017). arXiv: 1708.07747
|
github_jupyter
|
```
import xarray as xr
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from scipy.io import loadmat
#where to find the data
adir= 'F:/data/fluxsat/WS_SST_Correlation/'
#read in the data
ds1=xr.open_dataset(adir+'Corr_High_redone.nc')
ds1.close()
ds2=xr.open_dataset(adir+'Corr_Full.nc') #Full: corelation using unfiltered daily data:
ds2.close()
tem = loadmat(adir+'fluxDifferences.mat')
ds_err = xr.Dataset({'err': (['lat', 'lon'], tem['combinedSD'].transpose())},
coords={'lon': (['lon'], tem['longitude'][:,0]),
'lat': (['lat'], tem['latitude'][:,0])})
#scientific colormaps
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
cm_data = np.loadtxt("C:/Users/gentemann/Google Drive/d_drive/ScientificColourMaps6/vik/vik.txt")
vik_map = LinearSegmentedColormap.from_list("vik", cm_data)
cm_data = np.loadtxt("C:/Users/gentemann/Google Drive/d_drive/ScientificColourMaps6/roma/roma.txt")
roma_map = LinearSegmentedColormap.from_list("roma", cm_data)
roma_map2 = LinearSegmentedColormap.from_list("roma", cm_data[-1::-1])
tem=xr.concat([ds2.sel(lon=slice(20,360)),ds2.sel(lon=slice(0,20))],dim='lon')
fig = plt.figure(figsize=(12, 4))
ax = plt.axes(projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-1,vmax=1,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6)
cax.set_label('Correlation Coefficient')
axt = plt.axes((.3, .8, .01, .01))
axt.axis('off')
axt.text(0,1.2,'a)',fontsize=16)
fig.savefig(adir+'no_filter_wh.png')
tem=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
fig = plt.figure(figsize=(12, 4))
ax = plt.axes(projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-1,vmax=1,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6)
cax.set_label('Correlation Coefficient High Pass')
axt = plt.axes((.3, .8, .01, .01))
axt.axis('off')
axt.text(0,1.2,'b)',fontsize=16)
fig.savefig(adir+'high_pass_wh.png')
tem1=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
tem=xr.concat([ds_err.sel(lon=slice(20,360)),ds_err.sel(lon=slice(0,20))],dim='lon')
fig = plt.figure(figsize=(12, 4))
ax = plt.axes(projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem1.lon,tem1.lat,tem1.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.err,vmin=0,vmax=30,cmap=roma_map2,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6)
cax.set_label('Standard deviation (W m$^{-2}$)')
axt = plt.axes((.3, .8, .01, .01))
axt.axis('off')
axt.text(0,1.2,'b)',fontsize=16)
fig.savefig(adir+'err.png')
vv=.75
tem=xr.concat([ds2.sel(lon=slice(20,360)),ds2.sel(lon=slice(0,20))],dim='lon')
fig = plt.figure(figsize=(15, 8))
ax = plt.subplot(211,projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient')
axt = plt.axes((.4, .8, .01, .01))
axt.axis('off')
axt.text(0,1.2,'a)',fontsize=16)
tem=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
ax = plt.subplot(212,projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient High Pass')
axt = plt.axes((.4, .4, .01, .01))
axt.axis('off')
axt.text(0,1.2,'b)',fontsize=16)
fig.savefig(adir+'both.png')
vv=.75
tem=xr.concat([ds2.sel(lon=slice(20,360)),ds2.sel(lon=slice(0,20))],dim='lon')
fig = plt.figure(figsize=(15, 12))
ax = plt.subplot(311,projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient')
axt = plt.axes((.4, .8, .01, .01))
axt.axis('off')
axt.text(0,1.2,'a)',fontsize=16)
tem=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
ax = plt.subplot(312,projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient \n High Pass')
axt = plt.axes((.4, .53, .01, .01))
axt.axis('off')
axt.text(0,1.2,'b)',fontsize=16)
tem1=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
tem=xr.concat([ds_err.sel(lon=slice(20,360)),ds_err.sel(lon=slice(0,20))],dim='lon')
ax = plt.subplot(313,projection=ccrs.Mollweide(central_longitude=-160))
ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem1.lon,tem1.lat,tem1.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.err,vmin=0,vmax=30,cmap=roma_map2,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Standard deviation (W m$^{-2}$)')
axt = plt.axes((.4, .26, .01, .01))
axt.axis('off')
axt.text(0,1.2,'c)',fontsize=16)
fig.savefig(adir+'ALL.png')
vv=.75
tem=xr.concat([ds2.sel(lon=slice(20,360)),ds2.sel(lon=slice(0,20))],dim='lon')
fig = plt.figure(figsize=(15, 12))
ax = plt.subplot(311,projection=ccrs.Mollweide(central_longitude=-160))
#ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient')
axt = plt.axes((.4, .8, .01, .01))
axt.axis('off')
axt.text(0,1.2,'a)',fontsize=16)
tem=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
ax = plt.subplot(312,projection=ccrs.Mollweide(central_longitude=-160))
#ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient \n High Pass')
axt = plt.axes((.4, .53, .01, .01))
axt.axis('off')
axt.text(0,1.2,'b)',fontsize=16)
tem1=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
tem=xr.concat([ds_err.sel(lon=slice(20,360)),ds_err.sel(lon=slice(0,20))],dim='lon')
ax = plt.subplot(313,projection=ccrs.Mollweide(central_longitude=-160))
#ax.stock_img()
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem1.lon,tem1.lat,tem1.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.err,vmin=0,vmax=30,cmap=roma_map2,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Standard deviation (W m$^{-2}$)')
axt = plt.axes((.4, .26, .01, .01))
axt.axis('off')
axt.text(0,1.2,'c)',fontsize=16)
fig.savefig(adir+'ALL_whiteland.png')
import cartopy.feature as cfeature
vv=.75
tem=xr.concat([ds2.sel(lon=slice(20,360)),ds2.sel(lon=slice(0,20))],dim='lon')
fig = plt.figure(figsize=(15, 12))
ax = plt.subplot(311,projection=ccrs.Mollweide(central_longitude=-160))
#ax.stock_img()
ax.add_feature(cfeature.LAND,facecolor='grey')
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient')
axt = plt.axes((.4, .8, .01, .01))
axt.axis('off')
axt.text(0,1.2,'a)',fontsize=16)
tem=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
ax = plt.subplot(312,projection=ccrs.Mollweide(central_longitude=-160))
#ax.stock_img()
ax.add_feature(cfeature.LAND,facecolor='grey')
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem.lon,tem.lat,tem.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.corrH,vmin=-vv,vmax=vv,cmap=vik_map,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Correlation Coefficient \n High Pass')
axt = plt.axes((.4, .53, .01, .01))
axt.axis('off')
axt.text(0,1.2,'b)',fontsize=16)
tem1=xr.concat([ds1.sel(lon=slice(20,360)),ds1.sel(lon=slice(0,20))],dim='lon')
tem=xr.concat([ds_err.sel(lon=slice(20,360)),ds_err.sel(lon=slice(0,20))],dim='lon')
ax = plt.subplot(313,projection=ccrs.Mollweide(central_longitude=-160))
#ax.stock_img()
ax.add_feature(cfeature.LAND,facecolor='grey')
ax.coastlines(resolution='50m', color='black', linewidth=1)
ax0=ax.pcolormesh(tem1.lon,tem1.lat,tem1.mask,vmin=0,vmax=10,cmap='binary',transform=ccrs.PlateCarree())
ax1=ax.pcolormesh(tem.lon,tem.lat,tem.err,vmin=0,vmax=30,cmap=roma_map2,transform=ccrs.PlateCarree())
cax=plt.colorbar(ax1,ax=ax, shrink=.6, pad=0.01)
cax.set_label('Standard deviation (W m$^{-2}$)')
axt = plt.axes((.4, .26, .01, .01))
axt.axis('off')
axt.text(0,1.2,'c)',fontsize=16)
fig.savefig(adir+'ALL_greyland.png')
```
|
github_jupyter
|
# Copy Task Plots
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from glob import glob
import json
import os
import sys
sys.path.append(os.path.abspath(os.getcwd() + "./../"))
%matplotlib inline
```
## Load training history
To generate the models and training history used in this notebook, run the following commands:
```
mkdir ./notebooks/copy
./train.py --seed 1 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
./train.py --seed 10 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
./train.py --seed 100 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
./train.py --seed 1000 --task copy --checkpoint-interval 500 --checkpoint-path ./notebooks/copy
```
```
batch_num = 40000
files = glob("./copy/*-{}.json".format(batch_num))
files
# Read the metrics from the .json files
history = [json.loads(open(fname, "rt").read()) for fname in files]
training = np.array([(x['cost'], x['loss'], x['seq_lengths']) for x in history])
print("Training history (seed x metric x sequence) =", training.shape)
# Average every dv values across each (seed, metric)
dv = 1000
training = training.reshape(len(files), 3, -1, dv).mean(axis=3)
print(training.shape)
# Average the seeds
training_mean = training.mean(axis=0)
training_std = training.std(axis=0)
print(training_mean.shape)
print(training_std.shape)
fig = plt.figure(figsize=(12, 5))
# X axis is normalized to thousands
x = np.arange(dv / 1000, (batch_num / 1000) + (dv / 1000), dv / 1000)
# Plot the cost
# plt.plot(x, training_mean[0], 'o-', linewidth=2, label='Cost')
plt.errorbar(x, training_mean[0], yerr=training_std[0], fmt='o-', elinewidth=2, linewidth=2, label='Cost')
plt.grid()
plt.yticks(np.arange(0, training_mean[0][0]+5, 5))
plt.ylabel('Cost per sequence (bits)')
plt.xlabel('Sequence (thousands)')
plt.title('Training Convergence', fontsize=16)
ax = plt.axes([.57, .55, .25, .25], facecolor=(0.97, 0.97, 0.97))
plt.title("BCELoss")
plt.plot(x, training_mean[1], 'r-', label='BCE Loss')
plt.yticks(np.arange(0, training_mean[1][0]+0.2, 0.2))
plt.grid()
plt.show()
loss = history[3]['loss']
cost = history[3]['cost']
seq_lengths = history[3]['seq_lengths']
unique_sls = set(seq_lengths)
all_metric = list(zip(range(1, batch_num+1), seq_lengths, loss, cost))
fig = plt.figure(figsize=(12, 5))
plt.ylabel('Cost per sequence (bits)')
plt.xlabel('Iteration (thousands)')
plt.title('Training Convergence (Per Sequence Length)', fontsize=16)
for sl in unique_sls:
sl_metrics = [i for i in all_metric if i[1] == sl]
x = [i[0] for i in sl_metrics]
y = [i[3] for i in sl_metrics]
num_pts = len(x) // 50
total_pts = num_pts * 50
x_mean = [i.mean()/1000 for i in np.split(np.array(x)[:total_pts], num_pts)]
y_mean = [i.mean() for i in np.split(np.array(y)[:total_pts], num_pts)]
plt.plot(x_mean, y_mean, label='Seq-{}'.format(sl))
plt.yticks(np.arange(0, 80, 5))
plt.legend(loc=0)
plt.show()
```
# Evaluate
```
import torch
from IPython.display import Image as IPythonImage
from PIL import Image, ImageDraw, ImageFont
import io
from tasks.copytask import dataloader
from train import evaluate
from tasks.copytask import CopyTaskModelTraining
model = CopyTaskModelTraining()
model.net.load_state_dict(torch.load("./copy/copy-task-10-batch-40000.model"))
seq_len = 60
_, x, y = next(iter(dataloader(1, 1, 8, seq_len, seq_len)))
result = evaluate(model.net, model.criterion, x, y)
y_out = result['y_out']
def cmap(value):
pixval = value * 255
low = 64
high = 240
factor = (255 - low - (255-high)) / 255
return int(low + pixval * factor)
def draw_sequence(y, u=12):
seq_len = y.size(0)
seq_width = y.size(2)
inset = u // 8
pad = u // 2
width = seq_len * u + 2 * pad
height = seq_width * u + 2 * pad
im = Image.new('L', (width, height))
draw = ImageDraw.ImageDraw(im)
draw.rectangle([0, 0, width, height], fill=250)
for i in range(seq_len):
for j in range(seq_width):
val = 1 - y[i, 0, j].data[0]
draw.rectangle([pad + i*u + inset,
pad + j*u + inset,
pad + (i+1)*u - inset,
pad + (j+1)*u - inset], fill=cmap(val))
return im
def im_to_png_bytes(im):
png = io.BytesIO()
im.save(png, 'PNG')
return bytes(png.getbuffer())
def im_vconcat(im1, im2, pad=8):
assert im1.size == im2.size
w, h = im1.size
width = w
height = h * 2 + pad
im = Image.new('L', (width, height), color=255)
im.paste(im1, (0, 0))
im.paste(im2, (0, h+pad))
return im
def make_eval_plot(y, y_out, u=12):
im_y = draw_sequence(y, u)
im_y_out = draw_sequence(y_out, u)
im = im_vconcat(im_y, im_y_out, u//2)
w, h = im.size
pad_w = u * 7
im2 = Image.new('L', (w+pad_w, h), color=255)
im2.paste(im, (pad_w, 0))
# Add text
font = ImageFont.truetype("./fonts/PT_Sans-Web-Regular.ttf", 13)
draw = ImageDraw.ImageDraw(im2)
draw.text((u,4*u), "Targets", font=font)
draw.text((u,13*u), "Outputs", font=font)
return im2
im = make_eval_plot(y, y_out, u=8)
IPythonImage(im_to_png_bytes(im))
```
## Create an animated GIF
Lets see how the prediction looks like in each checkpoint that we saved.
```
seq_len = 80
_, x, y = next(iter(dataloader(1, 1, 8, seq_len, seq_len)))
frames = []
font = ImageFont.truetype("./fonts/PT_Sans-Web-Regular.ttf", 13)
for batch_num in range(500, 10500, 500):
model = CopyTaskModelTraining()
model.net.load_state_dict(torch.load("./copy/copy-task-10-batch-{}.model".format(batch_num)))
result = evaluate(model.net, model.criterion, x, y)
y_out = result['y_out']
frame = make_eval_plot(y, y_out, u=10)
w, h = frame.size
frame_seq = Image.new('L', (w, h+40), color=255)
frame_seq.paste(frame, (0, 40))
draw = ImageDraw.ImageDraw(frame_seq)
draw.text((10, 10), "Sequence Num: {} (Cost: {})".format(batch_num, result['cost']), font=font)
frames += [frame_seq]
im = frames[0]
im.save("./copy-train-80.gif", save_all=True, append_images=frames[1:], loop=0, duration=1000)
im = frames[0]
im.save("./copy-train-80-fast.gif", save_all=True, append_images=frames[1:], loop=0, duration=100)
```
|
github_jupyter
|
# Imports & Installations
```
!pip install pyforest
!pip install plotnine
!pip install transformers
!pip install psycopg2-binary
!pip uninstall -y tensorflow-datasets
!pip install lit_nlp tfds-nightly transformers==4.1.1
# Automatic library importer (doesn't quite import everything yet)
from pyforest import *
# Expands Dataframe to view entire pandas dataframe
pd.options.display.max_colwidth = 750
# For tracking the duration of executed code cells
from time import time
# To connect to Blue Witness Labeler's DB
import psycopg2
# For visualizations
from plotnine import *
from plotnine.data import mpg
import plotly.graph_objects as go
from plotly.subplots import make_subplots
# For BERT model
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from transformers import get_linear_schedule_with_warmup
from tensorflow.keras.preprocessing.sequence import pad_sequences
```
# Reading in our Tweets
```
def get_df(db_url) -> pd.DataFrame:
'''
Connects to our Blue Witness Data Labeler and retrieves manually labelled text before converting them all into a pandas dataframe.
Parameters
----------
db_url: psycopg2 database
Returns
-------
df: pandas datafarme
Contains thousands of text with appropriate police (non-)violence labels
'''
conn = psycopg2.connect(db_url)
curs = conn.cursor()
curs.execute("SELECT * FROM training;")
cols = [k[0] for k in curs.description]
rows = curs.fetchall()
df = pd.DataFrame(rows, columns=cols)
curs.close()
conn.close()
return df
# ALWAYS REMEMBER TO REMOVE THE PostgreSQL URL ASSIGNED TO THIS VARIABLE WHEN COMITTING TO OUR REPO
db_url = ""
data_labeler_df = get_df(db_url)
data_labeler_df
def rank_wrangle():
'''
Loads in both synthetic tweets generated from GPT-2 and authentic tweets scraped and manually labelled from Twitter.
Combines both sets of tweets together into a single dataframe.
Drops any null values and duplicates.
rank2_syn.txt, rank3_syn.txt, and rank4_syn.txt can be found in notebooks/labs37_notebooks/synthetic_tweets
Parameters
----------
None
Returns
-------
df: pandas dataframe
Contains fully concatenated dataframe
'''
# Supplying our dataframes with proper labels
column_headers = ['tweets', 'labels']
# Reading in our three police force rank datasets
synthetic_tweets_cop_shot = pd.read_csv("/content/cop_shot_syn.txt", sep = '/', names=column_headers)
synthetic_tweets_run_over = pd.read_csv("/content/run_over_syn.txt", sep = '/', names=column_headers)
synthetic_tweets_rank2 = pd.read_csv("/content/rank2_syn.txt", sep = '/', names=column_headers)
synthetic_tweets_rank3 = pd.read_csv("/content/rank3_syn.txt", sep = '/', names=column_headers)
synthetic_tweets_rank4 = pd.read_csv("/content/rank4_syn.txt", sep = '/', names=column_headers)
# Concatenating all of our datasets into one
compiled = pd.concat([data_labeler_df, synthetic_tweets_cop_shot, synthetic_tweets_run_over, synthetic_tweets_rank2, synthetic_tweets_rank3, synthetic_tweets_rank4])
# Dropping unnecessary column
compiled.drop('id', axis=1, inplace=True)
# Discarding generated duplicates from GPT-2 while keeping the original Tweets
compiled.drop_duplicates(subset='tweets', keep='first', inplace=True)
# Dropping any possible NaNs
if compiled.isnull().values.any():
compiled.dropna(how='any', inplace=True)
return compiled
# Applying our function above to view the contents of our dataframe
force_ranks = rank_wrangle()
force_ranks
```
# Visualizations
```
%matplotlib inline
(ggplot(force_ranks) # defining what dataframe to use
+ aes(x='labels') # defining what variable/column to use
+ geom_bar(size=20) # defining the type of plot to use and its size
+ labs(title='Number of Tweets Reporting Police Violence per Force Rank', x='Force Rank', y='Number of Tweets')
)
# Creating custom donut chart with Plotly
labels = ['0 - No Police Presence', '5 - Lethal Force (Guns & Explosives)', '1 - Non-violent Police Presence', '3 - Blunt Force Trauma (Batons & Shields)', '4 - Chemical & Electric Weapons (Tasers & Pepper Spray)', '2 - Open Handed (Arm Holds & Pushing)']
values = force_ranks.labels.value_counts()
bw_colors = ['rgb(138, 138, 144)', 'rgb(34, 53, 101)', 'rgb(37, 212, 247)', 'rgb(59, 88, 181)', 'rgb(56, 75, 126)',
'rgb(99, 133, 242)']
# Using 'pull' on Rank 5 to accentuate the frequency of the most excessive use of force by police
# 'hole' determines the size of the donut chart
fig = go.Figure(data=[go.Pie(labels=labels,
values=values, pull=[0, 0.2, 0, 0, 0, 0],
hole=.3,
name='Blue Witness',
marker_colors=bw_colors)])
# Displaying our donut chart
fig.update(layout_title_text='Percentage of Tweets Reporting Police Violence per Force Rank')
fig = go.Figure(fig)
fig.show()
```
# Preparing Data for BERT
Splitting dataframe into training and testing sets before converting to parquet for later reference/resource.
```
def parquet_and_split():
'''
Splits our data into a format amicable to NLP modeling.
Saves our original dataframe as well as the two split dataframes into parquet files for later reference/use.
-----
Parameters
------
None
Returns
-------
df: pandas dataframes
Contains two split dataframes ready to be fit to and tested against a model
'''
# Splitting dataframe into training and testing sets for modeling
# 20% of our data will be reserved for testing
training, testing = train_test_split(force_ranks, test_size=0.2)
# Sanity Check
if force_ranks.shape[0] == training.shape[0] + testing.shape[0]:
print("Sanity Check - Succesful!")
else:
print("Sanity Check - Unsuccessful!")
# Converting dataframes to parquet format for later reference
# Using parquet as our new dataset storage format as they cannot be edited like CSVs can. They are immutable.
# For viewing in vscode, install the parquet-viewer extension: https://marketplace.visualstudio.com/items?itemName=dvirtz.parquet-viewer
training.to_parquet('synthetic_training.parquet')
testing.to_parquet('synthetic_testing.parquet')
force_ranks.to_parquet('synthetic_complete.parquet')
return training, testing
training, testing = parquet_and_split()
```
# BERT
## Training our NLP Multi-Class Classification Model
```
def bert_trainer(df, output_dir: str, epochs: int):
start = time()
max_len = 280
if torch.cuda.is_available():
print("CUDA Active")
device = torch.device("cuda")
else:
print("CPU Active")
device = torch.device("cpu")
sentences = df["tweets"].values
labels = df["labels"].values
tokenizer = BertTokenizer.from_pretrained(
'bert-base-uncased',
do_lower_case=True,
)
inputs = [
tokenizer.encode(sent, add_special_tokens=True) for sent in sentences
]
inputs_ids = pad_sequences(
inputs,
maxlen=max_len,
dtype="long",
value=0,
truncating="post",
padding="post",
)
attention_masks = [
[int(token_id != 0) for token_id in sent] for sent in inputs_ids
]
train_inputs = torch.tensor(inputs_ids)
train_labels = torch.tensor(labels)
train_masks = torch.tensor(attention_masks)
batch_size = 32
train_data = TensorDataset(train_inputs, train_masks, train_labels)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(
train_data,
sampler=train_sampler,
batch_size=batch_size,
)
model = BertForSequenceClassification.from_pretrained(
'bert-base-uncased',
num_labels=6,
output_attentions=False,
output_hidden_states=False,
)
if torch.cuda.is_available():
model.cuda()
optimizer = AdamW(model.parameters(), lr=2e-5, eps=1e-8)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps,
)
loss_values = []
print('\nTraining...')
for epoch_i in range(1, epochs + 1):
print(f"\nEpoch: {epoch_i}")
total_loss = 0
model.train()
for step, batch in enumerate(train_dataloader):
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
outputs = model(
b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
)
loss = outputs[0]
total_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_loss / len(train_dataloader)
loss_values.append(avg_train_loss)
print(f"Average Loss: {avg_train_loss}")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print(f"\nSaving model to {output_dir}")
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
end = time()
total_run_time_in_hours = (((end - start)/60)/60)
rounded_total_run_time_in_hours = np.round(total_run_time_in_hours, decimals=2)
print(f"Finished training in {rounded_total_run_time_in_hours} hours!")
!nvidia-smi
# If running on Colab, the best GPU to have in use is the NVIDIA Tesla P100
from google.colab import drive
drive.mount('/content/drive')
# Colab notebook may crash the first time this code cell is run.
# Running this cell again after runtime restart shouldn't produce any more issues.
bert_trainer(training, 'saved_model', epochs=50)
```
## Making Predictions
```
class FrankenBert:
"""
Implements BertForSequenceClassification and BertTokenizer
for binary classification from a saved model
"""
def __init__(self, path: str):
"""
If there's a GPU available, tell PyTorch to use the GPU.
Loads model and tokenizer from saved model directory (path)
"""
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
self.model = BertForSequenceClassification.from_pretrained(path)
self.tokenizer = BertTokenizer.from_pretrained(path)
self.model.to(self.device)
def predict(self, text: str):
"""
Makes a binary classification prediction based on saved model
"""
inputs = self.tokenizer(
text,
padding=True,
truncation=True,
max_length=280,
return_tensors='pt',
).to(self.device)
output = self.model(**inputs)
prediction = output[0].softmax(1)
tensors = prediction.detach().cpu().numpy()
result = np.argmax(tensors)
confidence = tensors[0][result]
return f"Rank: {result}, {100 * confidence:.2f}%"
model = FrankenBert('saved_model')
model.predict("Mickey Mouse is in the house")
model.predict("Cops gave me a speeding ticket for walking too fast")
model.predict("Officer Kelly was shot and killed")
model.predict("A Texas Department of Public Safety (DPS) trooper ran over and killed a man who was in the road near the State Capitol early Thursday morning, according to the Austin Police Department (APD). The crash happened at around 3:45 a.m. Thursday just west of the Texas State Capitol building. The trooper was heading northbound on Colorado Street and as he was turning left on 13th Street, the trooper hit the pedestrian. DPS said the crash happened while the trooper was patrolling the area.")
model.predict("Cop ran me over with his SUV")
model.predict("Cops hit her with a baton")
model.predict("Cops sprayed my mom with pepper spray")
model.predict("Cops shot rubber bullets at the crowd")
model.predict("Police used tear gas on a pedestrian for no reason")
model.predict("Cops killed that woman")
model.predict("Yesterday I saw a policeman hit a poor person behind my house. I wonder whats going on")
model.predict("Man ran up to me and pepper sprayed me. I've called the cops, but they have not gotten themselves involved yet.")
```
## Saving Trained Model
```
from google.colab import drive
drive.mount('/content/gdrive')
#path that contains folder you want to copy
%cd /content/gdrive/MyDrive/ColabNotebooks/Labs/saved_model
# copy local folder to folder on Google Drive
%cp -av /content/saved_model saved_model
```
|
github_jupyter
|
### 1. Gradient Descent Tips
*Nhắc lại*: Công thức cập nhật $\theta$ ở vòng lặp thứ $t$:
<center>$\theta_{t+1} := \theta_t - \alpha \Delta_{\theta} f(\theta_t)$</center>
Trong đó:
- $\alpha$: learning rate - tốc độ học tập.
- $\Delta_{\theta} f(\theta_t)$: đạo hàm của hàm số tại điểm $\theta$.
Việc lựa chọn giá trị $\alpha$ (learning rate) rất quan trọng. Nó quyết định việc bài toán có thể hội tụ tới giá trị global minimum cho hàm $f(\theta)$ hay không. Gradient Descent có thể làm việc hiệu quả hơn bằng cách chọn Learning Rate phù hợp.
Một số trường hợp về lựa chọn Learning Rate như sau (các bạn có thể thử thay đổi [tại đây](https://developers.google.com/machine-learning/crash-course/fitter/graph)):
**Learning Rate quá lớn** - Gradient Descent không thể hội tụ được về giá trị Minimum.
<img src="images/image-3.gif" style="width:50%;height:50%;">
**Learning Rate quá nhỏ**: - Gradient Descent có thể hội tụ được về giá trị Minimum trong bài toán này, nhưng mất tới 81 vòng lặp để hội tụ. Trong một số bài toán có nhiều giá trị cực tiểu địa phương - LR quá nhỏ có thể khiến hàm số bị mắc kẹt tại cực tiểu địa phương và không bao giờ hội tụ được về giá trị tối ưu.
<img src="images/image-5.png" style="width:50%;height:50%;">
**Learning Rate vừa:** nếu Learning Rate quá nhỏ khiến bài toán hội tụ lâu, các bạn hãy thử tăng giá trị này lên thêm. Trong bài toán này khi Learning Rate = 1.0 sẽ mất 6 vòng lặp để hội tụ.
<img src="images/image-4.png" style="width:50%;height:50%;">
**Learning Rate tối ưu:** trong thực tế rất khó có thể tìm ra được giá trị Learning Rate tối ưu. Việc tìm được giá trị Learning Rate tương đối với giá trị tối ưu sẽ giúp bài toán hội tụ nhanh hơn.
<img src="images/image-6.png" style="width:50%;height:50%;">
**Tổng kết:**
- Nếu Learning Rate quá nhỏ: mất quá nhiều thời gian để hội tụ, đồng thời có thể bị mắc kẹt ở cực tiểu địa phương.
- Nếu Learning Rate quá lớn: không thể hội tụ được.
### Một vài lời khuyên
- Trước khi bắt đầu bài toán, các bạn hãy chuẩn hoá dữ liệu về khoảng [-1;1] hay [0;1] sẽ giúp bài toán hội tụ nhanh hơn.
- Bắt đầu bài toán bằng Learning Rate nhỏ. Tăng dần Learning Rate nếu không thấy phù hợp.
- Với bài toán nhiều dữ liệu hãy sử dụng Mini-batch Gradient Descent (phương pháp này sẽ được đề cập trong bài tới).
- Sử dụng Momentum cho Gradient Descent (phương pháp này sẽ được đề cập trong bài tới).
### 2. Normal Equation
Normal Equation là phương pháp tìm nghiệm của bài toán Linear Regression mà không cần tới vòng lặp, không cần lựa chọn Learning Rate. Và cũng không cần phải Scaling dữ liệu.
Công thức toán đằng sau nghiệm của phương trình này các bạn có thể đọc thêm tại:
https://eli.thegreenplace.net/2014/derivation-of-the-normal-equation-for-linear-regression
Và công thức quan trọng nhất của chúng ta:
<center> $\theta = (X^T X)^{-1} X^Ty $ </center>
So sánh giữa Normal Equation và Gradient Descent:
<table>
<tr>
<td> Gradient Descent </td>
<td> Normal Equation </td>
</tr>
<tr>
<td> Cần phải chọn Learning Rate </td>
<td> Không cần chọn Learning Rate </td>
</tr>
<tr>
<td> Cần nhiều vòng lặp </td>
<td> Không cần vòng lặp </td>
</tr>
<tr>
<td> Thời gian tính: $O(kn^2)$ </td>
<td> Thời gian tính: $O(n^3)$, cần phải tính ma trận nghịch đảo </td>
</tr>
<tr>
<td> Hoạt động tốt với dữ liệu lớn </td>
<td> Rất chậm với dữ liệu lớn </td>
</tr>
</table>
Với Normal Equation, việc tính toán mất thời gian $O(n^3)$ nên với dữ liệu lớn (n > 10.000 dữ liệu) chúng ta nên sử dụng Gradient Descent.
### Tài liệu tham khảo
[1] [CS229 - Machine Learning Course](http://cs229.stanford.edu)
|
github_jupyter
|
# Lecture 8: p-hacking and Multiple Comparisons
[J. Nathan Matias](https://github.com/natematias)
[SOC412](https://natematias.com/courses/soc412/), February 2019
In Lecture 8, we discussed Stephanie Lee's story about [Brian Wansink](https://www.buzzfeednews.com/article/stephaniemlee/brian-wansink-cornell-p-hacking#.btypwrDwe5), a food researcher who was found guilty of multiple kinds of research misconduct, including "p-hacking," where researchers keep looking for an answer until they find one. In this lecture, we will discuss what p-hacking is and what researchers can do to protect against it in our own work.
This example uses the [DeclareDesign](http://declaredesign.org/) library, which supports the simulation and evaluation of experiment designs. We will be using DeclareDesign to help with designing experiments in this class.
What can you do in your research to protect yourself against the risk of p-hacking or against reductions in the credibility of your research if people accuse you of p-hacking?
* Conduct a **power analysis** to choose a sample size that is large enough to observe the effect you're looking for (see below)
* If you have multiple statistical tests in each experiment, [adjust your analysis for multiple comparisons](https://egap.org/methods-guides/10-things-you-need-know-about-multiple-comparisons).
* [Pre-register](https://cos.io/prereg/) your study, being clear about whether your research is exploratory or confirmatory, and committing in advance to the statistical tests you're using to analyze the results
* Use cross-validation with training and holdout samples to take an exploratory + confirmatory approach (requires a much larger sample size, typically greater than 2x)
# Load Libraries
```
options("scipen"=9, "digits"=4)
library(dplyr)
library(MASS)
library(ggplot2)
library(rlang)
library(corrplot)
library(Hmisc)
library(tidyverse)
library(viridis)
library(fabricatr)
library(DeclareDesign)
## Installed DeclareDesign 0.13 using the following command:
# install.packages("DeclareDesign", dependencies = TRUE,
# repos = c("http://R.declaredesign.org", "https://cloud.r-project.org"))
options(repr.plot.width=7, repr.plot.height=4)
set.seed(03456920)
sessionInfo()
```
# What is a p-value?
A p-value (which can be calculated differently for different kinds of statistical tests) is an estimate of the probability of rejecting a null hypothesis. When testing differences in means, we are usually testing the null hypothesis of no difference between the two distributions. In those cases, the p-value is the probability of observing a difference between the distributions that is at least as extreme as the one observed.
You can think of the p-value as the probability represented by the area under the following t distribution of all of the possible outcomes for a given difference between means if the null hypothesis is true:

### Illustrating The Null Hypothesis
In the following case, I generate 100 sets of normal distributions with exactly the same mean and standard deviation, and then plot the differences between those means:
```
### GENERATE n.samples simulations at n.sample.size observations
### using normal distributions at the specified means
### and record the difference in means and the p value of the observations
#
# `@diff.df: the dataframe to pass in
# `@n.sample.size: the sample sizes to draw from a normal distribution
generate.n.samples <- function(diff.df, n.sample.size = 500){
for(i in seq(nrow(diff.df))){
row = diff.df[i,]
a.dist = rnorm(n.sample.size, mean = row$a.mean, sd = row$a.sd)
b.dist = rnorm(n.sample.size, mean = row$b.mean, sd = row$a.sd)
t <- t.test(a.dist, b.dist)
diff.df[i,]$p.value <- t$p.value
diff.df[i,]$mean.diff <- mean(b.dist) - mean(a.dist)
}
diff.df
}
#expand.grid
n.samples = 1000
null.hypothesis.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
null.hypothesis.df <- generate.n.samples(null.hypothesis.df, 200)
ggplot(null.hypothesis.df, aes(mean.diff)) +
geom_histogram(binwidth=0.01) +
xlim(-1.2,1.2) +
ggtitle("Simulated Differences in means under the null hypothesis")
ggplot(null.hypothesis.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under the null hypothesis")
print("How often is the p-value < 0.05?")
summary(null.hypothesis.df$p.value > 0.05)
```
### Illustrating A Difference in Means (first with a small sample size)
```
#expand.grid
small.sample.diff.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1.2, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
small.sample.diff.df <- generate.n.samples(small.sample.diff.df, 20)
ggplot(small.sample.diff.df, aes(mean.diff)) +
geom_histogram(binwidth=0.01) +
xlim(-1.2,1.2) +
ggtitle("Simulated Differences in means under the a diff in means of 1 (n=20)")
ggplot(small.sample.diff.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under a diff in means of 0.2 (n = 20)")
print("How often is the p-value < 0.05?")
summary(small.sample.diff.df$p.value > 0.05)
print("How often is the p-value < 0.05? when the estimate is < 0 (false positive)?")
nrow(subset(small.sample.diff.df, mean.diff<0 &p.value < 0.05))
print("How often is the p-value >= 0.05 when the estimate is 0.2 or greater (false negative)?")
print(sprintf("%1.2f precent",
nrow(subset(small.sample.diff.df, mean.diff>=0.2 &p.value >= 0.05)) /
nrow(small.sample.diff.df)*100))
print("What is the smallest positive, statistically-significant result?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
min(subset(small.sample.diff.df, mean.diff>0 & p.value < 0.05)$mean.diff))
print("If we only published statistically-significant results, what we would we think the true effect would be?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
mean(subset(small.sample.diff.df, p.value < 0.05)$mean.diff))
print("If we published all experiment results, what we would we think the true effect would be?")
sprintf("%1.2f, which is very close to the true difference of 0.2",
mean(small.sample.diff.df$mean.diff))
```
### Illustrating A Difference in Means (with a larger sample size)
```
#expand.grid
larger.sample.diff.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1.2, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
larger.sample.diff.df <- generate.n.samples(larger.sample.diff.df, 200)
ggplot(larger.sample.diff.df, aes(mean.diff)) +
geom_histogram(binwidth=0.01) +
xlim(-1.2,1.2) +
ggtitle("Simulated Differences in means under the a diff in means of 1 (n=200)")
ggplot(larger.sample.diff.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under a diff in means of 0.2 (n = 200)")
print("If we only published statistically-significant results, what we would we think the true effect would be?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
mean(subset(larger.sample.diff.df, p.value < 0.05)$mean.diff))
print("How often is the p-value < 0.05?")
sprintf("%1.2f percent",
nrow(subset(larger.sample.diff.df,p.value < 0.05)) / nrow(larger.sample.diff.df)*100)
```
### Illustrating a Difference in Means (with an adequately large sample size)
```
adequate.sample.diff.df = data.frame(a.mean = 1, a.sd = 1,
b.mean = 1.2, b.sd = 1,
id=seq(n.samples),
mean.diff = NA,
p.value = NA)
adequate.sample.diff.df <- generate.n.samples(larger.sample.diff.df, 400)
ggplot(adequate.sample.diff.df, aes(mean.diff, p.value, color=factor(p.value < 0.05))) +
geom_point() +
geom_hline(yintercept = 0.05) +
ggtitle("Simulated p values under a diff in means of 0.2 (n = 400)")
print("How often is the p-value < 0.05?")
sprintf("%1.2f percent",
nrow(subset(adequate.sample.diff.df,p.value < 0.05)) / nrow(adequate.sample.diff.df)*100)
print("If we only published statistically-significant results, what we would we think the true effect would be?")
sprintf("%1.2f, which is greater than the true difference of 0.2",
mean(subset(adequate.sample.diff.df, p.value < 0.05)$mean.diff))
```
# The Problem of Multiple Comparisons
In the above example, I demonstrated that across 100 samples under the null hypothesis and a decision rule of p = 0.05, roughly 5% of the results are statistically significant. This is similarly true for a single experiment with multiple outcome variables.
```
## Generate n normally distributed outcome variables with no difference on average
#
#` @num.samples: sample size for the dataframe
#` @num.columns: how many outcome variables to observe
#` @common.mean: the mean of the outcomes
#` @common.sd: the standard deviation of the outcomes
generate.n.outcomes.null <- function( num.samples, num.columns, common.mean, common.sd){
df <- data.frame(id = seq(num.samples))
for(i in seq(num.columns)){
df[paste('row.',i,sep="")] <- rnorm(num.samples, mean=common.mean, sd=common.sd)
}
df
}
```
### With 10 outcome variables, if we look for correlations between every outcomes, we expect to see 5% false positives on average under the null hypothesis.
```
set.seed(487)
## generate the data
null.10.obs <- generate.n.outcomes.null(100, 10, 1, 3)
null.10.obs$id <- NULL
null.correlations <- cor(null.10.obs, method="pearson")
null.pvalues <- cor.mtest(null.10.obs, conf.level = 0.95, method="pearson")$p
corrplot(cor(null.10.obs, method="pearson"), sig.level = 0.05, p.mat = null.pvalues)
```
### With multiple comparisons, increasing the sample size does not make the problem go away. Here, we use a sample of 10000 instead of 100
```
null.10.obs.large <- generate.n.outcomes.null(10000, 10, 1, 3)
null.10.obs.large$id <- NULL
null.correlations <- cor(null.10.obs.large, method="pearson")
null.pvalues <- cor.mtest(null.10.obs.large, conf.level = 0.95, method="pearson")$p
corrplot(cor(null.10.obs.large, method="pearson"), sig.level = 0.05, p.mat = null.pvalues)
```
# Power Analysis
A power analysis is a process for deciding what sample size to use based on the chance of observing the minimum effect you are looking for in your study. This power analysis uses [DeclareDesign](http://declaredesign.org/). Another option is the [egap Power Analysis page.](https://egap.org/content/power-analysis-simulations-r)
(we will discuss this in further detail in a subsequent class)
```
mean.a <- 0
effect.b <- 0.1
sample.size <- 500
design <-
declare_population(
N = sample.size
) +
declare_potential_outcomes(
YA_Z_0 = rnorm(n=N, mean = mean.a, sd=1),
YA_Z_1 = rnorm(n=N, mean = mean.a + effect.b, sd=1)
) +
declare_assignment(num_arms = 2,
conditions = (c("0", "1"))) +
declare_estimand(ate_YA_1_0 = effect.b) +
declare_reveal(outcome_variables = c("YA")) +
declare_estimator(YA ~ Z, estimand="ate_YA_1_0")
design
diagnose_design(design, sims=500, bootstrap_sims=500)
```
|
github_jupyter
|
# Measles Incidence in Altair
This is an example of reproducing the Wall Street Journal's famous [Measles Incidence Plot](http://graphics.wsj.com/infectious-diseases-and-vaccines/#b02g20t20w15) in Python using [Altair](http://github.com/ellisonbg/altair/).
## The Data
We'll start by downloading the data. Fortunately, others have made the data available in an easily digestible form; a github search revealed the dataset in CSV format here:
```
import pandas as pd
url = 'https://raw.githubusercontent.com/blmoore/blogR/master/data/measles_incidence.csv'
data = pd.read_csv(url, skiprows=2, na_values='-')
data.head()
```
## Data Munging with Pandas
This data needs to be cleaned-up a bit; we can do this with the Pandas library.
We first need to aggregate the incidence data by year:
```
annual = data.drop('WEEK', axis=1).groupby('YEAR').sum()
annual.head()
```
Next, because Altair is built to handle data where each row corresponds to a single sample, we will stack the data, re-labeling the columns for clarity:
```
measles = annual.reset_index()
measles = measles.melt('YEAR', var_name='state', value_name='incidence')
measles.head()
```
## Initial Visualization
Now we can use Altair's syntax for generating a heat map:
```
import altair as alt
alt.Chart(measles).mark_rect().encode(
x='YEAR:O',
y='state:N',
color='incidence'
).properties(
width=600,
height=400
)
```
## Adjusting Aesthetics
All operative components of the visualization appear above, we now just have to adjust the aesthetic features to reproduce the original plot.
Altair allows a wide range of flexibility for such adjustments, including size and color of markings, axis labels and titles, and more.
Here is the data visualized again with a number of these adjustments:
```
# Define a custom colormape using Hex codes & HTML color names
colormap = alt.Scale(domain=[0, 100, 200, 300, 1000, 3000],
range=['#F0F8FF', 'cornflowerblue', 'mediumseagreen', '#FFEE00', 'darkorange', 'firebrick'],
type='sqrt')
alt.Chart(measles).mark_rect().encode(
alt.X('YEAR:O', axis=alt.Axis(title=None, ticks=False)),
alt.Y('state:N', axis=alt.Axis(title=None, ticks=False)),
alt.Color('incidence:Q', sort='ascending', scale=colormap, legend=None)
).properties(
width=800,
height=500
)
```
The result clearly shows the impact of the the measles vaccine introduced in the mid-1960s.
## Layering & Selections
Here is another view of the data, using layering and selections to allow zooming-in
```
hover = alt.selection_single(on='mouseover', nearest=True, fields=['state'], empty='none')
line = alt.Chart().mark_line().encode(
alt.X('YEAR:Q',
scale=alt.Scale(zero=False),
axis=alt.Axis(format='f', title='year')
),
alt.Y('incidence:Q', axis=alt.Axis(title='measles incidence')),
detail='state:N',
opacity=alt.condition(hover, alt.value(1.0), alt.value(0.1))
).properties(
width=800,
height=300
)
point = line.mark_point().encode(
opacity=alt.value(0.0)
).properties(
selection=hover
)
mean = alt.Chart().mark_line().encode(
x=alt.X('YEAR:Q', scale=alt.Scale(zero=False)),
y='mean(incidence):Q',
color=alt.value('black')
)
text = alt.Chart().mark_text(align='right').encode(
x='min(YEAR):Q',
y='mean(incidence):Q',
text='state:N',
detail='state:N',
opacity=alt.condition(hover, alt.value(1.0), alt.value(0.0))
)
alt.layer(point, line, mean, text, data=measles).interactive(bind_y=False)
```
|
github_jupyter
|
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
from __future__ import print_function
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import time
from pydrake.solvers.mathematicalprogram import MathematicalProgram, Solve
from pydrake.solvers.ipopt import IpoptSolver
mp = MathematicalProgram()
xy = mp.NewContinuousVariables(2, "xy")
#def constraint(xy):
# return np.array([xy[0]*xy[0] + 2.0*xy[1]*xy[1]])
#constraint_bounds = (np.array([0.]), np.array([1.]))
#mp.AddConstraint(constraint, constraint_bounds[0], constraint_bounds[1], xy)
def constraint(xy):
theta = 1.0
return np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]).dot(
np.array([xy[0], xy[1]]))
constraint_bounds = (np.array([-0.5, -0.5]), np.array([0.5, 0.5]))
mp.AddConstraint(constraint, constraint_bounds[0], constraint_bounds[1], xy)
def cost(xy):
return xy[0]*1.0 + xy[1]*1.0
mp.AddCost(cost, xy)
#solver = IpoptSolver()
#result = solver.Solve(mp, None, None)
result = Solve(mp)
xystar = result.GetSolution()
print("Successful: ", result.is_success())
print("Solver: ", result.get_solver_id().name())
print("xystar: ", xystar)
# Demo of pulling costs / constraints from MathematicalProgram
# and evaluating them / getting gradients.
from pydrake.forwarddiff import gradient, jacobian
costs = mp.GetAllCosts()
total_cost_gradient = np.zeros(xystar.shape)
for cost in costs:
print("Cost: ", cost)
print("Eval at xystar: ", cost.evaluator().Eval(xystar))
grad = gradient(cost.evaluator().Eval, xystar)
print("Gradient at xystar: ", grad)
total_cost_gradient += grad
constraints = mp.GetAllConstraints()
total_constraint_gradient = np.zeros(xystar.shape)
for constraint in constraints:
print("Constraint: ", constraint)
val = constraint.evaluator().Eval(xystar)
print("Eval at xystar: ", val)
jac = jacobian(constraint.evaluator().Eval, xystar)
print("Gradient at xystar: ", jac)
total_constraint_gradient -= (val <= constraint_bounds[0] + 1E-6).dot(jac)
total_constraint_gradient += (val >= constraint_bounds[1] - 1E-6).dot(jac)
if np.any(total_cost_gradient):
total_cost_gradient /= np.linalg.norm(total_cost_gradient)
if np.any(total_constraint_gradient):
total_constraint_gradient /= np.linalg.norm(total_constraint_gradient)
print("Total cost grad dir: ", total_cost_gradient)
print("Total constraint grad dir: ", total_constraint_gradient)
# Draw feasible region
x_bounds = [-2., 2.]
y_bounds = [-2., 2.]
n_pts = [200, 300]
X, Y = np.meshgrid(np.linspace(x_bounds[0], x_bounds[1], n_pts[0]),
np.linspace(y_bounds[0], y_bounds[1], n_pts[1]),
indexing="ij")
vals = np.ones(n_pts)
for constraint in mp.GetAllConstraints():
for i in range(n_pts[0]):
for j in range(n_pts[1]):
vals_here = constraint.evaluator().Eval(np.array([X[i, j], Y[i, j]]))
vals[i, j] = (
np.all(vals_here >= constraint.evaluator().lower_bound()) and
np.all(vals_here <= constraint.evaluator().upper_bound())
)
plt.imshow(vals, extent=x_bounds+y_bounds)
arrow_cost = plt.arrow(
xystar[0], xystar[1],
total_cost_gradient[0]/2., total_cost_gradient[1]/2.,
width=0.05, color="g")
arrow_constraint = plt.arrow(
xystar[0], xystar[1],
total_constraint_gradient[0]/2., total_constraint_gradient[1]/2.,
width=0.05, color="r")
plt.legend([arrow_cost, arrow_constraint, ], ["Cost Increase Dir", "Constraint Violation Dir"]);
```
|
github_jupyter
|
# Using the Prediction Model
## Environment
```
import getpass
import json
import os
import sys
import time
import pandas as pd
from tqdm import tqdm_notebook as tqdm
from seffnet.constants import (
DEFAULT_EMBEDDINGS_PATH, DEFAULT_GRAPH_PATH,
DEFAULT_MAPPING_PATH, DEFAULT_PREDICTIVE_MODEL_PATH,
RESOURCES
)
from seffnet.literature import query_europe_pmc
print(sys.version)
print(time.asctime())
print(getpass.getuser())
```
# Loading the Data
```
from seffnet.default_predictor import predictor
print(f"""Loaded default predictor using paths:
embeddings: {DEFAULT_EMBEDDINGS_PATH}
graph: {DEFAULT_GRAPH_PATH}
model: {DEFAULT_PREDICTIVE_MODEL_PATH}
mapping: {DEFAULT_MAPPING_PATH}
""")
```
# Examples of different kinds of predictions with literature evidence
## side effect - target association
```
r = predictor.find_new_relation(
source_name='EGFR_HUMAN',
target_name='Papulopustular rash',
)
print(json.dumps(r, indent=2))
#PMID: 18165622
r = predictor.find_new_relation(
source_id='9451', # Histamine receptor H1
target_id='331', # Drowsiness
)
print(json.dumps(r, indent=2))
#PMID: 26626077
r = predictor.find_new_relation(
source_id='9325', # SC6A2
target_id='56', # Tachycardia
)
print(json.dumps(r, indent=2))
#PMID: 30952858
r = predictor.find_new_relation(
source_id='8670', # ACES_HUMAN
target_id='309', # Bradycardia
)
print(json.dumps(r, indent=2))
#PMID: 30952858
```
## drug- side effect association
```
r = predictor.find_new_relation(
source_id='3534', # diazepam
target_id='670', # Libido decreased
)
print(json.dumps(r, indent=2))
#PMID: 29888057
r = predictor.find_new_relation(
source_id='1148', # Cytarabine
target_id='1149', # Anaemia megaloblastic
)
print(json.dumps(r, indent=2))
# PMID: 23157436
```
## drug-target association
```
r = predictor.find_new_relation(
source_id='14672', # Sertindole
target_id='9350', # CHRM1 receptor
)
print(json.dumps(r, indent=2))
# PMID: 29942259
```
# Example of predicting relations using node2vec model and embeddings
```
def get_predictions_df(curie, results_type=None):
results = predictor.find_new_relations(
node_curie=curie,
results_type=results_type,
k=50,
)
results_df = pd.DataFrame(results['predictions'])
results_df = results_df[['node_id', 'namespace', 'identifier', 'name', 'lor', 'novel']]
return results['query'], results_df
query, df = get_predictions_df('pubchem.compound:2159', 'phenotype')
print(json.dumps(query, indent=2))
df
query, df = get_predictions_df('pubchem.compound:4585', 'phenotype')
print(json.dumps(query, indent=2))
df
query, df = get_predictions_df('uniprot:P08172', 'phenotype')
print(json.dumps(query, indent=2))
df
query, df = get_predictions_df('uniprot:P08588', 'phenotype')
print(json.dumps(query, indent=2))
df
query, df = get_predictions_df('uniprot:P22303', 'phenotype')
print(json.dumps(query, indent=2))
df
query, df = get_predictions_df('uniprot:Q9UBN7', 'chemical')
print(json.dumps(query, indent=2))
df
query, df = get_predictions_df("umls:C0030567", 'chemical')
print(json.dumps(query, indent=2))
df
results = []
for ind, row in df.iterrows():
pmcid = []
lit = query_europe_pmc(
query_entity=row['name'],
target_entities=[
'umls:C0030567'
],
)
i = 0
for x in lit:
if i > 7:
pmcid.append('... ect')
lit.close()
break
pmcid.append(x['pmcid'])
i+=1
results.append((len(pmcid), pmcid))
df['co-occurance'] = results
df
df.to_csv(os.path.join(RESOURCES, 'parkinsons-chemicals.tsv'), sep='\t')
query, df = get_predictions_df('umls:C0242422', 'chemical')
print(json.dumps(query, indent=2))
df
query, df = get_predictions_df('pubchem.compound:5095', 'phenotype')
print(json.dumps(query, indent=2))
df
#PMID: 29241812
r = predictor.find_new_relation(
source_id='2071', #Amantadine
target_id='2248', #Parkinson's disease
)
print(json.dumps(r, indent=2))
#PMID: 21654146
r = predictor.find_new_relation(
source_id='5346', #Ropinirole
target_id='1348', #Restless legs syndrome
)
print(json.dumps(r, indent=2))
#PMID: 21654146
r = predictor.find_new_relation(
source_id='3627', #Disulfiram
target_id='2318', #Malignant melanoma
)
print(json.dumps(r, indent=2))
#PMID: 21654146
r = predictor.find_new_relation(
source_id='17528', #Brigatinib
target_id='5148', #Colorectal cancer
)
print(json.dumps(r, indent=2))
#PMID: 31410188
r = predictor.find_new_relation(
source_id='6995', #dasatinib
target_id='1179', #Diffuse large B-cell lymphoma
)
print(json.dumps(r, indent=2))
#PMID: 31383760
r = predictor.find_new_relation(
source_id='5265', #ribavirin
target_id='947', #Candida infection
)
print(json.dumps(r, indent=2))
#PMID: 31307986
```
|
github_jupyter
|
# ex05-Filtering a Query with WHERE
Sometimes, you’ll want to only check the rows returned by a query, where one or more columns meet certain criteria. This can be done with a WHERE statement. The WHERE clause is an optional clause of the SELECT statement. It appears after the FROM clause as the following statement:
>SELECT column_list FROM table_name WHERE search_condition;
```
%load_ext sql
```
### 1. Connet to the given database of demo.db3
```
%sql sqlite:///data/demo.db3
```
If you do not remember the tables in the demo data, you can always use the follow command to query. Here we select the table of watershed_yearly as an example.
```
%sql SELECT name FROM sqlite_master WHERE type='table'
```
### 2. Retrieving data with WHERE
Take the table of ***rch*** as an example.
#### 2.1 Check the table colums firstly.
```
%sql SELECT * From rch LIMIT 5
```
#### 2.2 Check the number of rows
There should be 8280 rows. This can be done with the SQLite ***COUNT*** function. We will touch other SQLite function over the next few notebooks.
```
%sql SELECT COUNT(*) as nrow From rch
```
#### 2.3 Use WHERE to retrieve data
Let’s say we are interested in records for only the year 1981. Using a WHERE is pretty straightforward for a simple criterion like this.
```
%sql SELECT RCH, YR, MO, FLOW_INcms, FLOW_OUTcms From rch WHERE YR=1981
```
#### 2.4 use *AND* to further filter data
There are 23 RCHs. We are only intersted in the 10th RCH. We can add another filter condition with an ***AND*** statement.
```
%sql SELECT RCH, YR, MO, FLOW_INcms, FLOW_OUTcms From rch WHERE YR=1981 AND RCH=10
```
#### 2.5 More combinations of filters
We also can further filter data with the operators of ***!=*** or ***<>*** to get data except 1981.
```
%sql SELECT RCH, YR, MO, FLOW_INcms, FLOW_OUTcms From rch WHERE YR<>1981 and RCH=10 and MO=6
```
We can further filter the data to spefic months using ***OR*** statement. For example, we'd like check the data in the months of 3, 6 and 9. However, we have to use ***()*** to make them as one condition.:) It is a trick. You can try!
```
%sql SELECT RCH, YR, MO, FLOW_INcms, FLOW_OUTcms From rch WHERE YR>2009 and RCH=10 and (MO=3 or MO=6 or MO=9 or MO=12)
```
Or we can simplify the above filter using the ***IN*** statement.
```
%sql SELECT RCH, YR, MO, FLOW_INcms, FLOW_OUTcms From rch WHERE YR>2009 and RCH=10 and MO in (3, 6, 9, 12)
```
Or the months are ***NOT*** in 3, 6, 9, 12
```
%sql SELECT RCH, YR, MO, FLOW_INcms, FLOW_OUTcms From rch WHERE YR>2009 and RCH=10 and MO NOT IN (3,6,9,12)
```
#### 2.6 Filter with math operators
For example, we could use the modulus operator (%) to filter the MOs.
```
%sql SELECT RCH, YR, MO, FLOW_INcms, FLOW_OUTcms From rch WHERE YR>2009 and RCH=10 and MO % 3 = 0
```
### Summary
In the WHERE statement, we can the combinations of ***NOT, IN, <>, !=, >=, >, <, <=, AND, OR, ()*** and even some of math operators (such as %, *, /, +, -)to retrieve the data we want easily and efficiently.
|
github_jupyter
|
```
!pip3 install qiskit
import qiskit
constant_index_dictionary = {}
constant_index_dictionary['0000'] = [0, 2]
constant_index_dictionary['0001'] = [2, 3]
constant_index_dictionary['0010'] = [0, 1]
constant_index_dictionary['0011'] = [1, 3]
constant_index_dictionary['0100'] = [2, 3]
constant_index_dictionary['0101'] = [1, 2]
constant_index_dictionary['0110'] = [0, 2]
constant_index_dictionary['0111'] = [0, 2]
constant_index_dictionary['1000'] = [0, 3]
constant_index_dictionary['1001'] = [0, 1]
constant_index_dictionary['1010'] = [1, 2]
constant_index_dictionary['1011'] = [0, 3]
constant_index_dictionary['1100'] = [1, 3]
constant_index_dictionary['1101'] = [2, 3]
constant_index_dictionary['1110'] = [1, 3]
constant_index_dictionary['1111'] = [0, 1]
import qiskit
import numpy as np
import time
CLASSICAL_REGISTER_LENGTH = 5
QUANTUM_REGISTER_LENGTH = 5
circuit_building_start_time = time.time()
simulator = qiskit.Aer.get_backend('qasm_simulator')
classical_register = qiskit.ClassicalRegister(CLASSICAL_REGISTER_LENGTH)
quantum_register = qiskit.QuantumRegister(QUANTUM_REGISTER_LENGTH)
circuit = qiskit.QuantumCircuit(quantum_register, classical_register)
circuit_building_end_time = time.time()
AND_gate_auxillary_qubit = QUANTUM_REGISTER_LENGTH - 1 # last qubit as the auxillary qubit
'''
Applies quantum AND operation to specified pair of qubits, stores the operation in AND_gate_auxillary_qubit,
and stores the result in a classical register
@PARAMS:
qubit1: position of the first qubit
qubit2: position of the second qubit
qubit1_one: whether the first qubit is NOT
qubit2_one: whether the second qubit is NOT
classical_register_position: the classical register position to store the measurement of AND_gate_auxillary_qubit
'''
def AND_2_qubit(qubit1, qubit2, qubit1_one, qubit2_one, classical_register_position):
if(qubit1_one):
circuit.x(quantum_register[qubit1])
if(qubit2_one):
circuit.x(quantum_register[qubit2])
circuit.ccx(quantum_register[qubit1], quantum_register[qubit2], quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[classical_register_position])
if(qubit1_one):
circuit.x(quantum_register[qubit1])
if(qubit2_one):
circuit.x(quantum_register[qubit2])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
'''
Applies the AND gate operation on a list of n qubits
@PARAMS:
qubit_list: list of qubits to perform the operation on
qubit_one_list: whether each of those qubits is NOT
@RETURN:
result of the n-qubit AND operation
'''
def AND_n_qubits(qubit_list, qubit_one_list):
length = len(qubit_list)
if(length != len(qubit_one_list)):
print("Incorrect dimensions")
return
classical_register_index = 0 # where to store pairwise AND operation results
# handling odd number of qubits by preprocessing the last qubit
if(length % 2 != 0):
if(qubit_one_list[length - 1] == 1):
circuit.x(quantum_register[qubit_list[length-1]])
circuit.cx(quantum_register[qubit_list[length - 1]], quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[classical_register_index])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
classical_register_index = classical_register_index + 1
if(qubit_one_list[length - 1] == 1):
circuit.x(quantum_register[qubit_list[length-1]])
length = length - 1
for index in range(length - 1, 0, -2):
AND_2_qubit(qubit_list[index], qubit_list[index - 1], qubit_one_list[index], qubit_one_list[index - 1], classical_register_index)
classical_register_index = classical_register_index + 1
job = qiskit.execute(circuit, simulator, shots=1)
result = job.result()
counts = str(result.get_counts())
counts = counts[counts.find('\'') + 1:]
counts = counts[:counts.find('\'')]
output = 1
for index in range(0, classical_register_index, 1):
output = output & int(counts[CLASSICAL_REGISTER_LENGTH - 1 - index])
return output
def controlled_n_qubit_h(qubit_list, qubit_one_list):
output = AND_n_qubits(qubit_list, qubit_one_list)
if(output == 1):
circuit.h(quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[0])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
job = qiskit.execute(circuit, simulator, shots=1)
result = job.result()
counts = str(result.get_counts())
counts = counts[counts.find('\'') + 1:]
counts = counts[:counts.find('\'')]
return int(counts[len(counts) - 1])
return 0
'''
the main circuit for the following truth table:
A, B, C, D = binary representation input state for the robot
P, Q, R, S = binary representation of the output state from the robot
New circuit in register...
'''
def main_circuit(STEPS, initial_state):
signature = ""
state = initial_state
step = 0
while (step < STEPS):
dont_care_list = constant_index_dictionary[state]
input_state = state
state = ""
P = controlled_n_qubit_h([0, 1, 3], [1, 1, 0]) | controlled_n_qubit_h([1, 2], [0, 1]) | controlled_n_qubit_h([0, 2, 3], [0, 0, 1]) | AND_n_qubits([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 3], [1, 1, 1]) | AND_n_qubits([1, 2, 3], [1, 1, 1])
Q = controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 1, 1]) | controlled_n_qubit_h([0, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([1, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 0, 1, 0]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 1, 0, 0]) | AND_n_qubits([0, 1, 3], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 1, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 0, 1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 1, 0])
R = controlled_n_qubit_h([0, 1, 2], [1, 1, 0]) | controlled_n_qubit_h([0, 1, 2], [0, 0, 0]) | controlled_n_qubit_h([0, 1, 3], [0, 1, 0]) | controlled_n_qubit_h([0, 2, 3], [0, 1, 1]) | AND_n_qubits([0, 1], [1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 1])
S = controlled_n_qubit_h([1, 2, 3], [1, 0, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 2], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 0])
state = state + str(P) + str(Q) + str(R) + str(S)
y = int(input_state, 2)^int(state,2)
y = bin(y)[2:].zfill(len(state))
# print("" + str(y) + " is the XOR string")
hamming_distance = len(y.replace('0', ""))
# print(input_state + " " + state + " " + str(hamming_distance))
step = step + hamming_distance
hidden_state = ""
for j in range(len(state)):
if(j in dont_care_list):
hidden_state = hidden_state + "x"
else:
hidden_state = hidden_state + state[j]
# print(state + " " + hidden_state)
signature = signature + hidden_state
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
print("End state: " + str(P) + str(Q) + str(R) + str(S) )
print("Signature: " + signature)
def initialise_starting_state(P, Q, R, S):
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
print("Message: " + str(P) + str(Q) + str(R) + str(S))
def measure_time():
total_time = 0
for i in range(100):
start_time = time.time()
# output = AND_n_qubits([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
output = controlled_n_qubit_h([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
print(str(i) + " " + str(output))
end_time = time.time()
total_time = total_time + (end_time - start_time)
print("Average time: " + str(total_time/100))
start_time = time.time()
initialise_starting_state(1, 0, 1, 1) # message to be signed
STEPS = 20 # security parameter: length of the walk
main_circuit(STEPS, '1011')
# measure_time()
end_time = time.time()
print("Run in time " + str(end_time - start_time))
print(circuit_building_end_time - circuit_building_start_time)
def recipient_initialise_starting_state(P, Q, R, S):
if(P == "1"):
circuit.x(quantum_register[0])
if(Q == "1"):
circuit.x(quantum_register[1])
if(R == "1"):
circuit.x(quantum_register[2])
if(S == "1"):
circuit.x(quantum_register[3])
print("Message: " + str(P) + str(Q) + str(R) + str(S))
def recipient(message, signature, end_state):
STEPS = len(signature)/len(end_state)
STEPS = int(STEPS)
index = 0
recipient_initialise_starting_state(message[0], message[1], message[2], message[3])
state = message
recreated_signature = ""
for _ in range(STEPS):
dont_care_list = constant_index_dictionary[state]
state = ""
P = controlled_n_qubit_h([0, 1, 3], [1, 1, 0]) | controlled_n_qubit_h([1, 2], [0, 1]) | controlled_n_qubit_h([0, 2, 3], [0, 0, 1]) | AND_n_qubits([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 3], [1, 1, 1]) | AND_n_qubits([1, 2, 3], [1, 1, 1])
Q = controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 1, 1]) | controlled_n_qubit_h([0, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([1, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 0, 1, 0]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 1, 0, 0]) | AND_n_qubits([0, 1, 3], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 1, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 0, 1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 1, 0])
R = controlled_n_qubit_h([0, 1, 2], [1, 1, 0]) | controlled_n_qubit_h([0, 1, 2], [0, 0, 0]) | controlled_n_qubit_h([0, 1, 3], [0, 1, 0]) | controlled_n_qubit_h([0, 2, 3], [0, 1, 1]) | AND_n_qubits([0, 1], [1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 1])
S = controlled_n_qubit_h([1, 2, 3], [1, 0, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 2], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 0])
if(signature[index] != "x" and signature[index] == "1"):
P = P | 1
elif(signature[index] != "x"):
P = P & 0
index = index + 1
if(signature[index] != "x" and signature[index] == "1"):
Q = Q | 1
elif(signature[index] != "x"):
Q = Q & 0
index = index + 1
if(signature[index] != "x" and signature[index] == "1"):
R = R | 1
elif(signature[index] != "x"):
R = R & 0
index = index + 1
if(signature[index] != "x" and signature[index] == "1"):
S = S | 1
elif(signature[index] != "x"):
S = S & 0
index = index + 1
state = "" + str(P) + str(Q) + str(R) + str(S)
hidden_state = ""
for j in range(len(state)):
if(j in dont_care_list):
hidden_state = hidden_state + "x"
else:
hidden_state = hidden_state + state[j]
print(state + " " + hidden_state)
recreated_signature = recreated_signature + hidden_state
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
print(recreated_signature)
print(signature)
if(recreated_signature == signature):
print("ACCEPT")
else:
print("REJECT")
start = time.time()
for _ in range(len(circuit.data)):
circuit.data.pop(0)
recipient("1011", "x10x10xxx10x1x1xxx101xx01xx1x01x1x0xx11x0x1xx1x0x0x0xx11", "1111")
for _ in range(len(circuit.data)):
circuit.data.pop(0)
recipient("1011", "x00x10xxx10x1x1xxx101xx01xx1x01x1x0xx11x0x1xx1x0x0x0xx11", "1111")
print(time.time() - start)
```
# Scheme 2
Non-transfer of x
More secure
Requires one-time additional sharing of a dictionary
From the two dictionaries is inferred the total number of output states
(in the cell below, 2 + 2 = 4)
```
constant_index_dictionary = {}
constant_index_dictionary['0000'] = [0, 2]
constant_index_dictionary['0001'] = [2, 3]
constant_index_dictionary['0010'] = [0, 1]
constant_index_dictionary['0011'] = [1, 3]
constant_index_dictionary['0100'] = [2, 3]
constant_index_dictionary['0101'] = [1, 2]
constant_index_dictionary['0110'] = [0, 2]
constant_index_dictionary['0111'] = [0, 2]
constant_index_dictionary['1000'] = [0, 3]
constant_index_dictionary['1001'] = [0, 1]
constant_index_dictionary['1010'] = [1, 2]
constant_index_dictionary['1011'] = [0, 3]
constant_index_dictionary['1100'] = [1, 3]
constant_index_dictionary['1101'] = [2, 3]
constant_index_dictionary['1110'] = [1, 3]
constant_index_dictionary['1111'] = [0, 1]
# additional dictionary to be shared
hidden_index_dictionary = {}
hidden_index_dictionary['0000'] = [1, 3]
hidden_index_dictionary['0001'] = [0, 1]
hidden_index_dictionary['0010'] = [2, 3]
hidden_index_dictionary['0011'] = [0, 2]
hidden_index_dictionary['0100'] = [0, 1]
hidden_index_dictionary['0101'] = [0, 3]
hidden_index_dictionary['0110'] = [1, 3]
hidden_index_dictionary['0111'] = [1, 3]
hidden_index_dictionary['1000'] = [1, 2]
hidden_index_dictionary['1001'] = [2, 3]
hidden_index_dictionary['1010'] = [0, 3]
hidden_index_dictionary['1011'] = [1, 2]
hidden_index_dictionary['1100'] = [0, 2]
hidden_index_dictionary['1101'] = [0, 1]
hidden_index_dictionary['1110'] = [0, 2]
hidden_index_dictionary['1111'] = [2, 3]
import qiskit
import numpy as np
import time
CLASSICAL_REGISTER_LENGTH = 5
QUANTUM_REGISTER_LENGTH = 5
circuit_building_start_time = time.time()
simulator = qiskit.Aer.get_backend('qasm_simulator')
classical_register = qiskit.ClassicalRegister(CLASSICAL_REGISTER_LENGTH)
quantum_register = qiskit.QuantumRegister(QUANTUM_REGISTER_LENGTH)
circuit = qiskit.QuantumCircuit(quantum_register, classical_register)
circuit_building_end_time = time.time()
AND_gate_auxillary_qubit = QUANTUM_REGISTER_LENGTH - 1 # last qubit as the auxillary qubit
'''
Applies quantum AND operation to specified pair of qubits, stores the operation in AND_gate_auxillary_qubit,
and stores the result in a classical register
@PARAMS:
qubit1: position of the first qubit
qubit2: position of the second qubit
qubit1_one: whether the first qubit is NOT
qubit2_one: whether the second qubit is NOT
classical_register_position: the classical register position to store the measurement of AND_gate_auxillary_qubit
'''
def AND_2_qubit(qubit1, qubit2, qubit1_one, qubit2_one, classical_register_position):
if(qubit1_one):
circuit.x(quantum_register[qubit1])
if(qubit2_one):
circuit.x(quantum_register[qubit2])
circuit.ccx(quantum_register[qubit1], quantum_register[qubit2], quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[classical_register_position])
if(qubit1_one):
circuit.x(quantum_register[qubit1])
if(qubit2_one):
circuit.x(quantum_register[qubit2])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
'''
Applies the AND gate operation on a list of n qubits
@PARAMS:
qubit_list: list of qubits to perform the operation on
qubit_one_list: whether each of those qubits is NOT
@RETURN:
result of the n-qubit AND operation
'''
def AND_n_qubits(qubit_list, qubit_one_list):
length = len(qubit_list)
if(length != len(qubit_one_list)):
print("Incorrect dimensions")
return
classical_register_index = 0 # where to store pairwise AND operation results
# handling odd number of qubits by preprocessing the last qubit
if(length % 2 != 0):
if(qubit_one_list[length - 1] == 1):
circuit.x(quantum_register[qubit_list[length-1]])
circuit.cx(quantum_register[qubit_list[length - 1]], quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[classical_register_index])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
classical_register_index = classical_register_index + 1
if(qubit_one_list[length - 1] == 1):
circuit.x(quantum_register[qubit_list[length-1]])
length = length - 1
for index in range(length - 1, 0, -2):
AND_2_qubit(qubit_list[index], qubit_list[index - 1], qubit_one_list[index], qubit_one_list[index - 1], classical_register_index)
classical_register_index = classical_register_index + 1
job = qiskit.execute(circuit, simulator, shots=1)
result = job.result()
counts = str(result.get_counts())
counts = counts[counts.find('\'') + 1:]
counts = counts[:counts.find('\'')]
output = 1
for index in range(0, classical_register_index, 1):
output = output & int(counts[CLASSICAL_REGISTER_LENGTH - 1 - index])
return output
def controlled_n_qubit_h(qubit_list, qubit_one_list):
output = AND_n_qubits(qubit_list, qubit_one_list)
if(output == 1):
circuit.h(quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[0])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
job = qiskit.execute(circuit, simulator, shots=1)
result = job.result()
counts = str(result.get_counts())
counts = counts[counts.find('\'') + 1:]
counts = counts[:counts.find('\'')]
return int(counts[len(counts) - 1])
return 0
'''
the main circuit for the following truth table:
A, B, C, D = binary representation input state for the robot
P, Q, R, S = binary representation of the output state from the robot
New circuit in register...
'''
def main_circuit(STEPS, initial_state):
signature = ""
state = initial_state
for _ in range(STEPS):
dont_care_list = constant_index_dictionary[state]
state = ""
P = controlled_n_qubit_h([0, 1, 3], [1, 1, 0]) | controlled_n_qubit_h([1, 2], [0, 1]) | controlled_n_qubit_h([0, 2, 3], [0, 0, 1]) | AND_n_qubits([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 3], [1, 1, 1]) | AND_n_qubits([1, 2, 3], [1, 1, 1])
Q = controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 1, 1]) | controlled_n_qubit_h([0, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([1, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 0, 1, 0]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 1, 0, 0]) | AND_n_qubits([0, 1, 3], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 1, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 0, 1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 1, 0])
R = controlled_n_qubit_h([0, 1, 2], [1, 1, 0]) | controlled_n_qubit_h([0, 1, 2], [0, 0, 0]) | controlled_n_qubit_h([0, 1, 3], [0, 1, 0]) | controlled_n_qubit_h([0, 2, 3], [0, 1, 1]) | AND_n_qubits([0, 1], [1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 1])
S = controlled_n_qubit_h([1, 2, 3], [1, 0, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 2], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 0])
state = state + str(P) + str(Q) + str(R) + str(S)
hidden_state = ""
for j in range(len(state)):
if(j in dont_care_list):
pass
else:
hidden_state = hidden_state + state[j]
print(state + " " + hidden_state)
signature = signature + hidden_state
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
print("End state: " + str(P) + str(Q) + str(R) + str(S) )
print("Signature: " + signature)
def initialise_starting_state(P, Q, R, S):
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
print("Message: " + str(P) + str(Q) + str(R) + str(S))
def measure_time():
total_time = 0
for i in range(100):
start_time = time.time()
# output = AND_n_qubits([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
output = controlled_n_qubit_h([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
print(str(i) + " " + str(output))
end_time = time.time()
total_time = total_time + (end_time - start_time)
print("Average time: " + str(total_time/100))
start_time = time.time()
initialise_starting_state(0, 1, 0, 1) # message to be signed
STEPS = 10 # security parameter: length of the walk
main_circuit(STEPS, '0101')
# measure_time()
end_time = time.time()
print("Run in time " + str(end_time - start_time))
print(circuit_building_end_time - circuit_building_start_time)
def recipient_initialise_starting_state(P, Q, R, S):
if(P == "1"):
circuit.x(quantum_register[0])
if(Q == "1"):
circuit.x(quantum_register[1])
if(R == "1"):
circuit.x(quantum_register[2])
if(S == "1"):
circuit.x(quantum_register[3])
print("Message: " + str(P) + str(Q) + str(R) + str(S))
def recipient(message, signature, end_state):
# for every 2 bits, there are 2 additional hidden bits, by definition of the shared data structures
STEPS = (2*len(signature))/len(end_state)
STEPS = int(STEPS)
index = 0
recipient_initialise_starting_state(message[0], message[1], message[2], message[3])
state = message
recreated_signature = ""
recreated_original_signature = ""
for _ in range(STEPS):
dont_care_list = constant_index_dictionary[state]
hidden_index_list = hidden_index_dictionary[state]
# print(state + " " + str(hidden_index_list))
state = ""
P = controlled_n_qubit_h([0, 1, 3], [1, 1, 0]) | controlled_n_qubit_h([1, 2], [0, 1]) | controlled_n_qubit_h([0, 2, 3], [0, 0, 1]) | AND_n_qubits([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 3], [1, 1, 1]) | AND_n_qubits([1, 2, 3], [1, 1, 1])
Q = controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 1, 1]) | controlled_n_qubit_h([0, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([1, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 0, 1, 0]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 1, 0, 0]) | AND_n_qubits([0, 1, 3], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 1, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 0, 1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 1, 0])
R = controlled_n_qubit_h([0, 1, 2], [1, 1, 0]) | controlled_n_qubit_h([0, 1, 2], [0, 0, 0]) | controlled_n_qubit_h([0, 1, 3], [0, 1, 0]) | controlled_n_qubit_h([0, 2, 3], [0, 1, 1]) | AND_n_qubits([0, 1], [1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 1])
S = controlled_n_qubit_h([1, 2, 3], [1, 0, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 2], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 0])
for i in range(len(hidden_index_list)):
temp_index = hidden_index_list[i]
if(temp_index == 0):
if(signature[index] == '1'):
P = P | 1
else:
P = P & 0
elif(temp_index == 1):
if(signature[index] == '1'):
Q = Q | 1
else:
Q = Q & 0
elif(temp_index == 2):
if(signature[index] == '1'):
R = R | 1
else:
R = R & 0
elif(temp_index == 3):
if(signature[index] == '1'):
S = S | 1
else:
S = S & 0
index = index + 1
state = "" + str(P) + str(Q) + str(R) + str(S)
hidden_state = ""
for j in range(len(state)):
if(j in dont_care_list):
# hidden_state = hidden_state + "x"
pass
else:
hidden_state = hidden_state + state[j]
print(state + " " + hidden_state)
recreated_signature = recreated_signature + hidden_state
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
if(recreated_signature == signature and end_state == state):
print("ACCEPT")
else:
print("REJECT")
start = time.time()
# for _ in range(len(circuit.data)):
# circuit.data.pop(0)
# recipient("0101", "10011010111000010011", "1111")
for _ in range(len(circuit.data)):
circuit.data.pop(0)
recipient("0101", "1000110000100000000", "0110")
print(time.time() - start)
```
# k-Path dependent scheme
```
constant_index_dictionary = {}
constant_index_dictionary['0000'] = [0, 2]
constant_index_dictionary['0001'] = [2, 3]
constant_index_dictionary['0010'] = [0, 1]
constant_index_dictionary['0011'] = [1, 3]
constant_index_dictionary['0100'] = [2, 3]
constant_index_dictionary['0101'] = [1, 2]
constant_index_dictionary['0110'] = [0, 2]
constant_index_dictionary['0111'] = [0, 2]
constant_index_dictionary['1000'] = [0, 3]
constant_index_dictionary['1001'] = [0, 1]
constant_index_dictionary['1010'] = [1, 2]
constant_index_dictionary['1011'] = [0, 3]
constant_index_dictionary['1100'] = [1, 3]
constant_index_dictionary['1101'] = [2, 3]
constant_index_dictionary['1110'] = [1, 3]
constant_index_dictionary['1111'] = [0, 1]
import qiskit
import numpy as np
import time
CLASSICAL_REGISTER_LENGTH = 5
QUANTUM_REGISTER_LENGTH = 5
circuit_building_start_time = time.time()
simulator = qiskit.Aer.get_backend('qasm_simulator')
classical_register = qiskit.ClassicalRegister(CLASSICAL_REGISTER_LENGTH)
quantum_register = qiskit.QuantumRegister(QUANTUM_REGISTER_LENGTH)
circuit = qiskit.QuantumCircuit(quantum_register, classical_register)
circuit_building_end_time = time.time()
AND_gate_auxillary_qubit = QUANTUM_REGISTER_LENGTH - 1 # last qubit as the auxillary qubit
'''
Applies quantum AND operation to specified pair of qubits, stores the operation in AND_gate_auxillary_qubit,
and stores the result in a classical register
@PARAMS:
qubit1: position of the first qubit
qubit2: position of the second qubit
qubit1_one: whether the first qubit is NOT
qubit2_one: whether the second qubit is NOT
classical_register_position: the classical register position to store the measurement of AND_gate_auxillary_qubit
'''
def AND_2_qubit(qubit1, qubit2, qubit1_one, qubit2_one, classical_register_position):
if(qubit1_one):
circuit.x(quantum_register[qubit1])
if(qubit2_one):
circuit.x(quantum_register[qubit2])
circuit.ccx(quantum_register[qubit1], quantum_register[qubit2], quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[classical_register_position])
if(qubit1_one):
circuit.x(quantum_register[qubit1])
if(qubit2_one):
circuit.x(quantum_register[qubit2])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
'''
Applies the AND gate operation on a list of n qubits
@PARAMS:
qubit_list: list of qubits to perform the operation on
qubit_one_list: whether each of those qubits is NOT
@RETURN:
result of the n-qubit AND operation
'''
def AND_n_qubits(qubit_list, qubit_one_list):
length = len(qubit_list)
if(length != len(qubit_one_list)):
print("Incorrect dimensions")
return
classical_register_index = 0 # where to store pairwise AND operation results
# handling odd number of qubits by preprocessing the last qubit
if(length % 2 != 0):
if(qubit_one_list[length - 1] == 1):
circuit.x(quantum_register[qubit_list[length-1]])
circuit.cx(quantum_register[qubit_list[length - 1]], quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[classical_register_index])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
classical_register_index = classical_register_index + 1
if(qubit_one_list[length - 1] == 1):
circuit.x(quantum_register[qubit_list[length-1]])
length = length - 1
for index in range(length - 1, 0, -2):
AND_2_qubit(qubit_list[index], qubit_list[index - 1], qubit_one_list[index], qubit_one_list[index - 1], classical_register_index)
classical_register_index = classical_register_index + 1
job = qiskit.execute(circuit, simulator, shots=1)
result = job.result()
counts = str(result.get_counts())
counts = counts[counts.find('\'') + 1:]
counts = counts[:counts.find('\'')]
output = 1
for index in range(0, classical_register_index, 1):
output = output & int(counts[CLASSICAL_REGISTER_LENGTH - 1 - index])
return output
def controlled_n_qubit_h(qubit_list, qubit_one_list):
output = AND_n_qubits(qubit_list, qubit_one_list)
if(output == 1):
circuit.h(quantum_register[AND_gate_auxillary_qubit])
circuit.measure(quantum_register[AND_gate_auxillary_qubit], classical_register[0])
circuit.reset(quantum_register[AND_gate_auxillary_qubit])
job = qiskit.execute(circuit, simulator, shots=1)
result = job.result()
counts = str(result.get_counts())
counts = counts[counts.find('\'') + 1:]
counts = counts[:counts.find('\'')]
return int(counts[len(counts) - 1])
return 0
'''
the main circuit for the following truth table:
A, B, C, D = binary representation input state for the robot
P, Q, R, S = binary representation of the output state from the robot
New circuit in register...
'''
def main_circuit(STEPS, initial_state):
signature = ""
state = initial_state
used_states = []
step = 0
rollback_count = 0
while True:
if(step == STEPS):
break
dont_care_list = constant_index_dictionary[state]
rollback_state = state
if(state not in used_states):
used_states.append(state)
state = ""
P = controlled_n_qubit_h([0, 1, 3], [1, 1, 0]) | controlled_n_qubit_h([1, 2], [0, 1]) | controlled_n_qubit_h([0, 2, 3], [0, 0, 1]) | AND_n_qubits([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 3], [1, 1, 1]) | AND_n_qubits([1, 2, 3], [1, 1, 1])
Q = controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 1, 1]) | controlled_n_qubit_h([0, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([1, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 0, 1, 0]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 1, 0, 0]) | AND_n_qubits([0, 1, 3], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 1, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 0, 1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 1, 0])
R = controlled_n_qubit_h([0, 1, 2], [1, 1, 0]) | controlled_n_qubit_h([0, 1, 2], [0, 0, 0]) | controlled_n_qubit_h([0, 1, 3], [0, 1, 0]) | controlled_n_qubit_h([0, 2, 3], [0, 1, 1]) | AND_n_qubits([0, 1], [1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 1])
S = controlled_n_qubit_h([1, 2, 3], [1, 0, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 2], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 0])
state = state + str(P) + str(Q) + str(R) + str(S)
if(state in used_states):
rollback_count = rollback_count + 1
if(rollback_count == (len(initial_state) + 10)):
print("Aborting.")
return "ABORT"
P = rollback_state[0]
Q = rollback_state[1]
R = rollback_state[2]
S = rollback_state[3]
state = rollback_state
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == '1'):
print("Rollback reset")
circuit.x(quantum_register[0])
if(Q == '1'):
print("Rollback reset")
circuit.x(quantum_register[1])
if(R == '1'):
print("Rollback reset")
circuit.x(quantum_register[2])
if(S == '1'):
print("Rollback reset")
circuit.x(quantum_register[3])
print("Rolling back")
continue
step = step + 1
rollback = 0
hidden_state = ""
for j in range(len(state)):
if(j in dont_care_list):
hidden_state = hidden_state + "x"
else:
hidden_state = hidden_state + state[j]
signature = signature + hidden_state
# print(state + " " + hidden_state)
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
return signature
def initialise_starting_state(P, Q, R, S):
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
print("Message: " + str(P) + str(Q) + str(R) + str(S))
def measure_time():
total_time = 0
for i in range(100):
start_time = time.time()
# output = AND_n_qubits([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
output = controlled_n_qubit_h([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
print(str(i) + " " + str(output))
end_time = time.time()
total_time = total_time + (end_time - start_time)
print("Average time: " + str(total_time/100))
```
Creating a long message
100 *bits*
```
def create_random_message(NUMBER_OF_BITS):
message = ""
c = qiskit.ClassicalRegister(1)
q = qiskit.QuantumRegister(1)
s = qiskit.Aer.get_backend('qasm_simulator')
for i in range(NUMBER_OF_BITS):
print(i)
random_circuit = qiskit.QuantumCircuit(q, c)
random_circuit.h(q[0])
random_circuit.measure(q[0], c[0])
job = qiskit.execute(random_circuit, s, shots=1)
result = job.result()
counts = str(result.get_counts())
counts = counts[counts.find('\'') + 1:]
counts = counts[:counts.find('\'')]
message = message + counts
print(message)
create_random_message(100)
```
Signing a long message
```
def sign_message(message):
signature = ""
ITER = int(len(message)/4)
start_time = time.time()
STEPS = 5 # security parameter: length of the walk
iter = 0
while True:
if(iter == ITER):
break
state = message[0:4]
initialise_starting_state(int(state[0]), int(state[1]), int(state[2]), int(state[3]))
return_signature = main_circuit(STEPS, state)
if(return_signature == "ABORT"):
print("Rerun")
continue
iter = iter + 1
signature = signature + return_signature
message = message[4:]
end_time = time.time()
print("Run in time " + str(end_time - start_time))
print(signature)
sign_message('1011000001011010110011111011011100111001000010001111011101101100010100100011010010111000110101100011')
print(len('x00x10xxx10x1x1xxx01'))
def recipient_initialise_starting_state(P, Q, R, S):
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == "1"):
circuit.x(quantum_register[0])
if(Q == "1"):
circuit.x(quantum_register[1])
if(R == "1"):
circuit.x(quantum_register[2])
if(S == "1"):
circuit.x(quantum_register[3])
print("Message: " + str(P) + str(Q) + str(R) + str(S))
def recipient(message, signature, end_state):
STEPS = len(signature)/len(end_state)
STEPS = int(STEPS)
index = 0
recipient_initialise_starting_state(message[0], message[1], message[2], message[3])
state = message
recreated_signature = ""
for _ in range(STEPS):
dont_care_list = constant_index_dictionary[state]
state = ""
P = controlled_n_qubit_h([0, 1, 3], [1, 1, 0]) | controlled_n_qubit_h([1, 2], [0, 1]) | controlled_n_qubit_h([0, 2, 3], [0, 0, 1]) | AND_n_qubits([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 3], [1, 1, 1]) | AND_n_qubits([1, 2, 3], [1, 1, 1])
Q = controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 1, 1]) | controlled_n_qubit_h([0, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([1, 2, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 0, 1, 0]) | controlled_n_qubit_h([0, 1, 2, 3], [0, 1, 0, 0]) | AND_n_qubits([0, 1, 3], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 1, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [1, 0, 1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 1, 0])
R = controlled_n_qubit_h([0, 1, 2], [1, 1, 0]) | controlled_n_qubit_h([0, 1, 2], [0, 0, 0]) | controlled_n_qubit_h([0, 1, 3], [0, 1, 0]) | controlled_n_qubit_h([0, 2, 3], [0, 1, 1]) | AND_n_qubits([0, 1], [1, 0]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 1])
S = controlled_n_qubit_h([1, 2, 3], [1, 0, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 1, 1]) | controlled_n_qubit_h([0, 1, 3], [1, 0, 0]) | controlled_n_qubit_h([0, 1, 2], [1, 0, 0]) | controlled_n_qubit_h([1, 2, 3], [0, 0, 0]) | AND_n_qubits([0, 1, 2], [0, 0, 1]) | AND_n_qubits([0, 1, 2, 3], [0, 1, 0, 0])
if(signature[index] != "x" and signature[index] == "1"):
P = P | 1
elif(signature[index] != "x"):
P = P & 0
index = index + 1
if(signature[index] != "x" and signature[index] == "1"):
Q = Q | 1
elif(signature[index] != "x"):
Q = Q & 0
index = index + 1
if(signature[index] != "x" and signature[index] == "1"):
R = R | 1
elif(signature[index] != "x"):
R = R & 0
index = index + 1
if(signature[index] != "x" and signature[index] == "1"):
S = S | 1
elif(signature[index] != "x"):
S = S & 0
index = index + 1
state = "" + str(P) + str(Q) + str(R) + str(S)
hidden_state = ""
for j in range(len(state)):
if(j in dont_care_list):
hidden_state = hidden_state + "x"
else:
hidden_state = hidden_state + state[j]
recreated_signature = recreated_signature + hidden_state
print(state + " " + hidden_state)
for _ in range(len(circuit.data)):
circuit.data.pop(0)
if(P == 1):
circuit.x(quantum_register[0])
if(Q == 1):
circuit.x(quantum_register[1])
if(R == 1):
circuit.x(quantum_register[2])
if(S == 1):
circuit.x(quantum_register[3])
print(recreated_signature)
print(signature)
if(recreated_signature == signature):
print("ACCEPT")
else:
print("REJECT")
return recreated_signature
import time
start = time.time()
for _ in range(len(circuit.data)):
circuit.data.pop(0)
STEPS = int(len('1011000001011010110011111011011100111001000010001111011101101100010100100011010010111000110101100011') / 4)
message = '1011000001011010110011111011011100111001000010001111011101101100010100100011010010111000110101100011'
signature = 'x11xx1x1xx01xx10x0x1x1x01x0x10xxxx10x1x10xx1x1x1xx11x00x00xx0xx0xx11xx00x10x1x0x0x0x1xx1xx00x11x0x0xxx11x11xx1x00x1xx0x1x01x1x0xx01x0xx0xx01x1x1xx00x10x0x0x1xx00x0xx1x101xx0xx0x1x1xx10x1x1x0x1x01x1x0xx1x101xx1xx1xx00x10xx01x1xx1x10x0xx0x0x0xx01xx10x1x1x0x1x00xx0x01xx1x00x11xx1x1xx0x10x0xx1x01x0x10xx1x1xxx00x01x0xx10x1x1xx00x1xx0x10x1xxx11xx0100xx10xxx11x0x0x0x1xxx101x0x1x1xxx0010xx0xx10x1xxx11xx01x10x0xx0x0x0xx0110xxx01x0xx10x0xx0x1xx0000xx10xxx11x0x1xx1x1x0x0xx100x0x10xx0xx10x1xxx100x1xx1x1x1x1'
temp_signature = signature
k = int(len(signature)/len(message))
end_index = k*4
recipient_signature = ""
for _ in range(STEPS):
start_state = message[0:4]
message = message[4:]
mess_signature = signature[0:end_index]
signature = signature[end_index:]
recipient_signature = recipient_signature + recipient(start_state, mess_signature, '0000')
if(recipient_signature == temp_signature):
print("ACCEPT")
else:
print("REJECT")
print(time.time() - start)
1111 1xx1 1111 1xx1
1011 xx11 1011 xx11
Rolling back
1101 x10x 0101 x10x
0001 00xx 0010 0xx0
1000 10xx 1010 xx10
print(recipient('1011', 'x11xxx01xx10x0x0xx0100xx11xx1x1xxx11x11x', '0000'))
print(recipient('1001', 'xx001x1xxx01xx0011xx1x0x0x1xx1x1xx100xx0', '0000'))
```
|
github_jupyter
|
# Test web application locally
This notebook pulls some images and tests them against the local web app running inside the Docker container we made previously.
```
import matplotlib.pyplot as plt
import numpy as np
from testing_utilities import *
import requests
%matplotlib inline
%load_ext autoreload
%autoreload 2
docker_login = 'fboylu'
image_name = docker_login + '/kerastf-gpu'
```
Run the Docker conatainer in the background and open port 80. Notice we are using nvidia-docker and not docker command.
```
%%bash --bg -s "$image_name"
nvidia-docker run -p 80:80 $1
```
Wait a few seconds for the application to spin up and then check that everything works.
```
!curl 'http://0.0.0.0:80/'
!curl 'http://0.0.0.0:80/version'
```
Pull an image of a Lynx to test our local web app with.
```
IMAGEURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg"
plt.imshow(to_img(IMAGEURL))
jsonimg = img_url_to_json(IMAGEURL)
jsonimg[:100]
headers = {'content-type': 'application/json'}
%time r = requests.post('http://0.0.0.0:80/score', data=jsonimg, headers=headers)
print(r)
r.json()
```
Let's try a few more images.
```
images = ('https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg',
'https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg',
'http://www.worldshipsociety.org/wp-content/themes/construct/lib/scripts/timthumb/thumb.php?src=http://www.worldshipsociety.org/wp-content/uploads/2013/04/stock-photo-5495905-cruise-ship.jpg&w=570&h=370&zc=1&q=100',
'http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/',
'https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg',
'http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg')
url = 'http://0.0.0.0:80/score'
results = [requests.post(url, data=img_url_to_json(img), headers=headers) for img in images]
plot_predictions_dict(images, results)
```
Next let's quickly check what the request response performance is for the locally running Docker container.
```
image_data = list(map(img_url_to_json, images)) # Retrieve the images and data
timer_results = list()
for img in image_data:
res=%timeit -r 1 -o -q requests.post(url, data=img, headers=headers)
timer_results.append(res.best)
timer_results
print('Average time taken: {0:4.2f} ms'.format(10**3 * np.mean(timer_results)))
%%bash
docker stop $(docker ps -q)
```
We can now [deploy our web application on AKS](04_DeployOnAKS.ipynb).
|
github_jupyter
|
# Data analysis with Python, Apache Spark, and PixieDust
***
In this notebook you will:
* analyze customer demographics, such as, age, gender, income, and location
* combine that data with sales data to examine trends for product categories, transaction types, and product popularity
* load data from GitHub as well as from a public open data set
* cleanse, shape, and enrich the data, and then visualize the data with the PixieDust library
Don't worry! PixieDust graphs don't require coding.
By the end of the notebook, you will understand how to combine data to gain insights about which customers you might target to increase sales.
This notebook runs on Python 2 with Spark 2.1, and PixieDust 1.1.10.
<a id="toc"></a>
## Table of contents
#### [Setup](#Setup)
[Load data into the notebook](#Load-data-into-the-notebook)
#### [Explore customer demographics](#part1)
[Prepare the customer data set](#Prepare-the-customer-data-set)<br>
[Visualize customer demographics and locations](#Visualize-customer-demographics-and-locations)<br>
[Enrich demographic information with open data](#Enrich-demographic-information-with-open-data)<br>
#### [Summary and next steps](#summary)
## Setup
You need to import libraries and load the customer data into this notebook.
Import the necessary libraries:
```
import pixiedust
import pyspark.sql.functions as func
import pyspark.sql.types as types
import re
import json
import os
import requests
```
**If you get any errors or if a package is out of date:**
* uncomment the lines in the next cell (remove the `#`)
* restart the kernel (from the Kernel menu at the top of the notebook)
* reload the browser page
* run the cell above, and continue with the notebook
```
#!pip install jinja2 --user --upgrade
#!pip install pixiedust --user --upgrade
#!pip install -U --no-deps bokeh
```
### Load data into the notebook
The data file contains both the customer demographic data that you'll analyzed in Part 1, and the sales transaction data for Part 2.
With `pixiedust.sampleData()` you can load csv data from any url. The below loads the data in a Spark DataFrame.
> In case you wondered, this works with Pandas as well, just add `forcePandas = True` to load data in a Pandas DataFrame. *But do not add this to the below cell as in this notebook you will use Spark.*
```
raw_df = pixiedust.sampleData('https://raw.githubusercontent.com/IBM/analyze-customer-data-spark-pixiedust/master/data/customers_orders1_opt.csv')
raw_df
```
[Back to Table of Contents](#toc)
<a id="part1"></a>
# Explore customer demographics
In this part of the notebook, you will prepare the customer data and then start learning about your customers by creating multiple charts and maps.
## Prepare the customer data set
Create a new Spark DataFrame with only the data you need and then cleanse and enrich the data.
Extract the columns that you are interested in, remove duplicate customers, and add a column for aggregations:
```
# Extract the customer information from the data set
customer_df = raw_df.select("CUST_ID",
"CUSTNAME",
"ADDRESS1",
"ADDRESS2",
"CITY",
"POSTAL_CODE",
"POSTAL_CODE_PLUS4",
"STATE",
"COUNTRY_CODE",
"EMAIL_ADDRESS",
"PHONE_NUMBER",
"AGE",
"GenderCode",
"GENERATION",
"NATIONALITY",
"NATIONAL_ID",
"DRIVER_LICENSE").dropDuplicates()
customer_df.printSchema()
```
Notice that the data type of the AGE column is currently a string. Convert the AGE column to a numeric data type so you can run calculations on customer age.
```
# ---------------------------------------
# Cleanse age (enforce numeric data type)
# ---------------------------------------
def getNumericVal(col):
"""
input: pyspark.sql.types.Column
output: the numeric value represented by col or None
"""
try:
return int(col)
except ValueError:
# age-33
match = re.match('^age\-(\d+)$', col)
if match:
try:
return int(match.group(1))
except ValueError:
return None
return None
toNumericValUDF = func.udf(lambda c: getNumericVal(c), types.IntegerType())
customer_df = customer_df.withColumn("AGE", toNumericValUDF(customer_df["AGE"]))
customer_df
customer_df.show(5)
```
The GenderCode column contains salutations instead of gender values. Derive the gender information for each customer based on the salutation and rename the GenderCode column to GENDER.
```
# ------------------------------
# Derive gender from salutation
# ------------------------------
def deriveGender(col):
""" input: pyspark.sql.types.Column
output: "male", "female" or "unknown"
"""
if col in ['Mr.', 'Master.']:
return 'male'
elif col in ['Mrs.', 'Miss.']:
return 'female'
else:
return 'unknown';
deriveGenderUDF = func.udf(lambda c: deriveGender(c), types.StringType())
customer_df = customer_df.withColumn("GENDER", deriveGenderUDF(customer_df["GenderCode"]))
customer_df.cache()
```
## Explore the customer data set
Instead of exploring the data with `.printSchema()` and `.show()` you can quickly explore data sets using PixieDust'. Invoke the `display()` command and click the table icon to review the schema and preview the data. Customize the options to display only a subset of the fields or rows or apply a filter (by clicking the funnel icon).
```
display(customer_df)
```
[Back to Table of Contents](#toc)
## Visualize customer demographics and locations
Now you are ready to explore the customer base. Using simple charts, you can quickly see these characteristics:
* Customer demographics (gender and age)
* Customer locations (city, state, and country)
You will create charts with the PixieDust library:
- [View customers by gender in a pie chart](#View-customers-by-gender-in-a-pie-chart)
- [View customers by generation in a bar chart](#View-customers-by-generation-in-a-bar-chart)
- [View customers by age in a histogram chart](#View-customers-by-age-in-a-histogram-chart)
- [View specific information with a filter function](#View-specific-information-with-a-filter-function)
- [View customer density by location with a map](#View-customer-density-by-location-with-a-map)
### View customers by gender in a pie chart
Run the `display()` command and then configure the graph to show the percentages of male and female customers:
1. Run the next cell. The PixieDust interactive widget appears.
1. Click the chart button and choose **Pie Chart**. The chart options tool appears.
1. In the chart options, drag `GENDER` into the **Keys** box.
1. In the **Aggregation** field, choose **COUNT**.
1. Increase the **# of Rows to Display** to a very large number to display all data.
1. Click **OK**. The pie chart appears.
If you want to make further changes, click **Options** to return to the chart options tool.
```
display(customer_df)
```
[Back to Table of Contents](#toc)
### View customers by generation in a bar chart
Look at how many customers you have per "generation."
Run the next cell and configure the graph:
1. Choose **Bar Chart** as the chart type and configure the chart options as instructed below.
2. Put `GENERATION` into the **Keys** box.
3. Set **aggregation** to `COUNT`.
1. Increase the **# of Rows to Display** to a very large number to display all data.
4. Click **OK**
4. Change the **Renderer** at the top right of the chart to explore different visualisations.
4. You can use clustering to group customers, for example by geographic location. To group generations by country, select `COUNTRY_CODE` from the **Cluster by** list from the menu on the left of the chart.
```
display(customer_df)
```
[Back to Table of Contents](#toc)
### View customers by age in a histogram chart
A generation is a broad age range. You can look at a smaller age range with a histogram chart. A histogram is like a bar chart except each bar represents a range of numbers, called a bin. You can customize the size of the age range by adjusting the bin size. The more bins you specify, the smaller the age range.
Run the next cell and configure the graph:
1. Choose **Histogram** as the chart type.
2. Put `AGE` into the **Values** box.
1. Increase the **# of Rows to Display** to a very large number to display all data.
1. Click **OK**.
3. Use the **Bin count** slider to specify the number of the bins. Try starting with 40.
```
display(customer_df)
```
[Back to Table of Contents](#toc)
### View specific information with a filter function
You can filter records to restrict analysis by using the [PySpark DataFrame](https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.DataFrame) `filter()` function.
If you want to view the age distribution for a specific generation, uncomment the desired filter condition and run the next cell:
```
# Data subsetting: display age distribution for a specific generation
# (Chart type: histogram, Chart Options > Values: AGE)
# to change the filter condition remove the # sign
condition = "GENERATION = 'Baby_Boomers'"
#condition = "GENERATION = 'Gen_X'"
#condition = "GENERATION = 'Gen_Y'"
#condition = "GENERATION = 'Gen_Z'"
boomers_df = customer_df.filter(condition)
display(boomers_df)
```
PixieDust supports basic filtering to make it easy to analyse data subsets. For example, to view the age distribution for a specific gender configure the chart as follows:
1. Choose `Histogram` as the chart type.
2. Put `AGE` into the **Values** box and click OK.
3. Click the filter button (looking like a funnel), and choose **GENDER** as field and `female` as value.
The filter is only applied to the working data set and does not modify the input `customer_df`.
```
display(customer_df)
```
You can also filter by location. For example, the following command creates a new DataFrame that filters for customers from the USA:
```
condition = "COUNTRY_CODE = 'US'"
us_customer_df = customer_df.filter(condition)
```
You can pivot your analysis perspective based on aspects that are of interest to you by choosing different keys and clusters.
Create a bar chart and cluster the data.
Run the next cell and configure the graph:
1. Choose **Bar chart** as the chart type.
2. Put `COUNTRY_CODE` into the **Keys** box.
4. Set Aggregation to **COUNT**.
5. Click **OK**. The chart displays the number of US customers.
6. From the **Cluster By** list, choose **GENDER**. The chart shows the number of customers by gender.
```
display(us_customer_df)
```
Now try to cluster the customers by state.
A bar chart isn't the best way to show geographic location!
[Back to Table of Contents](#toc)
### View customer density by location with a map
Maps are a much better way to view location data than other chart types.
Visualize customer density by US state with a map.
Run the next cell and configure the graph:
1. Choose **Map** as the chart type.
2. Put `STATE` into the **Keys** box.
4. Set Aggregation to **COUNT**.
5. Click **OK**. The map displays the number of US customers.
6. From the **Renderer** list, choose **brunel**.
> PixieDust supports three map renderers: brunel, [mapbox](https://www.mapbox.com/) and Google. Note that the Mapbox renderer and the Google renderer require an API key or access token and supported features vary by renderer.
7. You can explore more about customers in each state by changing the aggregation method, for example look at customer age ranges (avg, minimum, and maximum) by state. Simply Change the aggregation function to `AVG`, `MIN`, or `MAX` and choose `AGE` as value.
```
display(us_customer_df)
```
[Back to Table of Contents](#toc)
## Enrich demographic information with open data
You can easily combine other sources of data with your existing data. There is a lot of publicly available open data sets that can be very helpful. For example, knowing the approximate income level of your customers might help you target your marketing campaigns.
Run the next cell to load [this data set](https://apsportal.ibm.com/exchange/public/entry/view/beb8c30a3f559e58716d983671b70337) from the United States Census Bureau into your notebook. The data set contains US household income statistics compiled at the zip code geography level.
```
# Load median income information for all US ZIP codes from a public source
income_df = pixiedust.sampleData('https://raw.githubusercontent.com/IBM/analyze-customer-data-spark-pixiedust/master/data/x19_income_select.csv')
income_df.printSchema()
```
Now cleanse the income data set to remove the data that you don't need. Create a new DataFrame for this data:
- The zip code, extracted from the GEOID column.
- The column B19049e1, which contains the median household income for 2013.
```
# ------------------------------
# Helper: Extract ZIP code
# ------------------------------
def extractZIPCode(col):
""" input: pyspark.sql.types.Column containing a geo code, like '86000US01001'
output: ZIP code
"""
m = re.match('^\d+US(\d\d\d\d\d)$',col)
if m:
return m.group(1)
else:
return None
getZIPCodeUDF = func.udf(lambda c: extractZIPCode(c), types.StringType())
income_df = income_df.select('GEOID', 'B19049e1').withColumnRenamed('B19049e1', 'MEDIAN_INCOME_IN_ZIP').withColumn("ZIP", getZIPCodeUDF(income_df['GEOID']))
income_df
```
Perform a left outer join on the customer data set with the income data set, using the zip code as the join condition. For the complete syntax of joins, go to the <a href="https://spark.apache.org/docs/1.5.2/api/python/pyspark.sql.html#pyspark.sql.DataFrame" target="_blank" rel="noopener noreferrer">pyspark DataFrame documentation</a> and scroll down to the `join` syntax.
```
us_customer_df = us_customer_df.join(income_df, us_customer_df.POSTAL_CODE == income_df.ZIP, 'left_outer').drop('GEOID').drop('ZIP')
display(us_customer_df)
```
Now you can visualize the income distribution of your customers by zip code.
Visualize income distribution for our customers.
Run the next cell and configure the graph:
1. Choose **Histogram** as the chart type.
2. Put `MEDIAN_INCOME_IN_ZIP` into the **Values** box and click **OK**.
The majority of your customers live in zip codes where the median income is around 40,000 USD.
[Back to Table of Contents](#toc)
Copyright © 2017, 2018 IBM. This notebook and its source code are released under the terms of the MIT License.
|
github_jupyter
|
# REINFORCE in lasagne
Just like we did before for q-learning, this time we'll design a lasagne network to learn `CartPole-v0` via policy gradient (REINFORCE).
Most of the code in this notebook is taken from approximate qlearning, so you'll find it more or less familiar and even simpler.
__Frameworks__ - we'll accept this homework in any deep learning framework. For example, it translates to TensorFlow almost line-to-line. However, we recommend you to stick to theano/lasagne unless you're certain about your skills in the framework of your choice.
```
%env THEANO_FLAGS = 'floatX=float32'
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
os.environ['DISPLAY'] = ':1'
import gym
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
env = gym.make("CartPole-v0").env
env.reset()
n_actions = env.action_space.n
state_dim = env.observation_space.shape
plt.imshow(env.render("rgb_array"))
```
# Building the network for REINFORCE
For REINFORCE algorithm, we'll need a model that predicts action probabilities given states.
```
import theano
import theano.tensor as T
# create input variables. We'll support multiple states at once
states = T.matrix("states[batch,units]")
actions = T.ivector("action_ids[batch]")
cumulative_rewards = T.vector("G[batch] = r + gamma*r' + gamma^2*r'' + ...")
import lasagne
from lasagne.layers import *
# input layer
l_states = InputLayer((None,)+state_dim, input_var=states)
<Your architecture. Please start with a 1-2 layers with 50-200 neurons >
# output layer
# this time we need to predict action probabilities,
# so make sure your nonlinearity forces p>0 and sum_p = 1
l_action_probas = DenseLayer( < ... > ,
num_units= < ... > ,
nonlinearity= < ... > )
```
#### Predict function
```
# get probabilities of actions
predicted_probas = get_output(l_action_probas)
# predict action probability given state
# if you use float32, set allow_input_downcast=True
predict_proba = <compile a function that takes states and returns predicted_probas >
```
#### Loss function and updates
We now need to define objective and update over policy gradient.
Our objective function is
$$ J \approx { 1 \over N } \sum _{s_i,a_i} \pi_\theta (a_i | s_i) \cdot G(s_i,a_i) $$
Following the REINFORCE algorithm, we can define our objective as follows:
$$ \hat J \approx { 1 \over N } \sum _{s_i,a_i} log \pi_\theta (a_i | s_i) \cdot G(s_i,a_i) $$
When you compute gradient of that function over network weights $ \theta $, it will become exactly the policy gradient.
```
# select probabilities for chosen actions, pi(a_i|s_i)
predicted_probas_for_actions = predicted_probas[T.arange(
actions.shape[0]), actions]
# REINFORCE objective function
J = # <policy objective as in the last formula. Please use mean, not sum.>
# all network weights
all_weights = <get all "thetas" aka network weights using lasagne >
# weight updates. maximize J = minimize -J
updates = lasagne.updates.sgd(-J, all_weights, learning_rate=0.01)
train_step = theano.function([states, actions, cumulative_rewards], updates=updates,
allow_input_downcast=True)
```
### Computing cumulative rewards
```
def get_cumulative_rewards(rewards, # rewards at each step
gamma=0.99 # discount for reward
):
"""
take a list of immediate rewards r(s,a) for the whole session
compute cumulative returns (a.k.a. G(s,a) in Sutton '16)
G_t = r_t + gamma*r_{t+1} + gamma^2*r_{t+2} + ...
The simple way to compute cumulative rewards is to iterate from last to first time tick
and compute G_t = r_t + gamma*G_{t+1} recurrently
You must return an array/list of cumulative rewards with as many elements as in the initial rewards.
"""
<your code here >
return < array of cumulative rewards >
assert len(get_cumulative_rewards(range(100))) == 100
assert np.allclose(get_cumulative_rewards([0, 0, 1, 0, 0, 1, 0], gamma=0.9), [
1.40049, 1.5561, 1.729, 0.81, 0.9, 1.0, 0.0])
assert np.allclose(get_cumulative_rewards(
[0, 0, 1, -2, 3, -4, 0], gamma=0.5), [0.0625, 0.125, 0.25, -1.5, 1.0, -4.0, 0.0])
assert np.allclose(get_cumulative_rewards(
[0, 0, 1, 2, 3, 4, 0], gamma=0), [0, 0, 1, 2, 3, 4, 0])
print("looks good!")
```
### Playing the game
```
def generate_session(t_max=1000):
"""play env with REINFORCE agent and train at the session end"""
# arrays to record session
states, actions, rewards = [], [], []
s = env.reset()
for t in range(t_max):
# action probabilities array aka pi(a|s)
action_probas = predict_proba([s])[0]
a = <sample action with given probabilities >
new_s, r, done, info = env.step(a)
# record session history to train later
states.append(s)
actions.append(a)
rewards.append(r)
s = new_s
if done:
break
cumulative_rewards = get_cumulative_rewards(rewards)
train_step(states, actions, cumulative_rewards)
return sum(rewards)
for i in range(100):
rewards = [generate_session() for _ in range(100)] # generate new sessions
print("mean reward:%.3f" % (np.mean(rewards)))
if np.mean(rewards) > 300:
print("You Win!")
break
```
### Video
```
# record sessions
import gym.wrappers
env = gym.wrappers.Monitor(gym.make("CartPole-v0"),
directory="videos", force=True)
sessions = [generate_session() for _ in range(100)]
env.close()
# show video
from IPython.display import HTML
import os
video_names = list(
filter(lambda s: s.endswith(".mp4"), os.listdir("./videos/")))
HTML("""
<video width="640" height="480" controls>
<source src="{}" type="video/mp4">
</video>
""".format("./videos/"+video_names[-1])) # this may or may not be _last_ video. Try other indices
```
|
github_jupyter
|
# 📝 Exercise M3.02
The goal is to find the best set of hyperparameters which maximize the
generalization performance on a training set.
Here again with limit the size of the training set to make computation
run faster. Feel free to increase the `train_size` value if your computer
is powerful enough.
```
import numpy as np
import pandas as pd
adult_census = pd.read_csv("../datasets/adult-census.csv")
target_name = "class"
target = adult_census[target_name]
data = adult_census.drop(columns=[target_name, "education-num"])
from sklearn.model_selection import train_test_split
data_train, data_test, target_train, target_test = train_test_split(
data, target, train_size=0.2, random_state=42)
```
In this exercise, we will progressively define the classification pipeline
and later tune its hyperparameters.
Our pipeline should:
* preprocess the categorical columns using a `OneHotEncoder` and use a
`StandardScaler` to normalize the numerical data.
* use a `LogisticRegression` as a predictive model.
Start by defining the columns and the preprocessing pipelines to be applied
on each group of columns.
```
from sklearn.compose import make_column_selector as selector
# Write your code here.
categorical_selector = selector(dtype_include=object)
numerical_selector = selector(dtype_exclude=object)
categorical_columns = categorical_selector(data)
numerical_columns = numerical_selector(data)
from sklearn.preprocessing import OneHotEncoder, StandardScaler
# Write your code here.
cat_processor = OneHotEncoder(handle_unknown='ignore')
num_processor = StandardScaler()
```
Subsequently, create a `ColumnTransformer` to redirect the specific columns
a preprocessing pipeline.
```
from sklearn.compose import ColumnTransformer
# Write your code here.
preprocessor = ColumnTransformer(
[
('cat_process', cat_processor, categorical_columns),
('num_process', num_processor, numerical_columns)
])
```
Assemble the final pipeline by combining the above preprocessor
with a logistic regression classifier. Force the maximum number of
iterations to `10_000` to ensure that the model will converge.
```
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
# Write your code here.
model = make_pipeline(preprocessor, LogisticRegression(max_iter=11_000))
```
Use `RandomizedSearchCV` with `n_iter=20` to find the best set of
hyperparameters by tuning the following parameters of the `model`:
- the parameter `C` of the `LogisticRegression` with values ranging from
0.001 to 10. You can use a log-uniform distribution
(i.e. `scipy.stats.loguniform`);
- the parameter `with_mean` of the `StandardScaler` with possible values
`True` or `False`;
- the parameter `with_std` of the `StandardScaler` with possible values
`True` or `False`.
Once the computation has completed, print the best combination of parameters
stored in the `best_params_` attribute.
```
model.get_params()
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import loguniform
# Write your code here.
params_dict = {
'columntransformer__num_process__with_mean': [True, False],
'columntransformer__num_process__with_std': [True, False],
'logisticregression__C': loguniform(1e-3, 10)
}
model_random_search = RandomizedSearchCV(model,
param_distributions= params_dict,
n_iter=20, error_score='raise',
n_jobs=-1, verbose=1)
model_random_search.fit(data_train, target_train)
model_random_search.best_params_
```
|
github_jupyter
|
```
# Description: Plot Figure 3 (Overview of wind, wave and density stratification during the field experiment).
# Author: André Palóczy
# E-mail: [email protected]
# Date: December/2020
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from pandas import Timestamp
from xarray import open_dataset, DataArray
from dewaveADCP.utils import fourfilt
def fitN2(T, z, g=9.8, alpha=2e-4):
fg = ~np.isnan(T)
p = np.polyfit(z[fg], T[fg], 1)
Tfit = np.polyval(p, z)
dTdz = (Tfit[0] - Tfit[-1])/z.ptp()
N = np.sqrt(g*alpha*np.abs(dTdz)) # [1/s].
return N, Tfit
plt. close('all')
head = "../../data_reproduce_figs/"
ds = open_dataset(head+"windstress-wave-tide.nc")
twind = ds['twind'].values
twave = ds['twave'].values
ttide = ds['ttide'].values
taux, tauy = ds['taux'].values, ds['tauy'].values
Hs = ds['Hs'].values
Tp = ds['Tp'].values
meandir = ds['wavedir'].values
meandirspread = ds['wavespread'].values
# Low-pass filter wind stress.
dts_wind = 2*60 # 2 min sampling frequency.
Tmax = dts_wind*taux.size*2
Tmin = 60*60*30 # 30 h low-pass filter.
taux = fourfilt(taux, dts_wind, Tmax, Tmin)
tauy = fourfilt(tauy, dts_wind, Tmax, Tmin)
dsSIO = open_dataset(head+"windstress-SIOMiniMETBuoy.nc")
twindSIO = dsSIO['taux']['t']
tauxSIO, tauySIO = dsSIO['taux'].values, dsSIO['tauy'].values
dts_windSIO = 60*60 # 1 h averages.
tauxSIO = fourfilt(tauxSIO, dts_windSIO, Tmax, Tmin)
tauySIO = fourfilt(tauySIO, dts_windSIO, Tmax, Tmin)
tl, tr = Timestamp('2017-09-08'), Timestamp('2017-11-01')
# Plot wind and wave variables.
shp = (4, 2)
fig = plt.figure(figsize=(7, 7))
ax1 = plt.subplot2grid(shp, (0, 0), colspan=2, rowspan=1)
ax2 = plt.subplot2grid(shp, (1, 0), colspan=2, rowspan=1)
ax3 = plt.subplot2grid(shp, (2, 0), colspan=2, rowspan=1)
ax4 = plt.subplot2grid(shp, (3, 0), colspan=1, rowspan=1) # T profiles from different moorings.
ax5 = plt.subplot2grid(shp, (3, 1), colspan=1, rowspan=1) # Top-bottom temperature difference.
ax1.plot(twindSIO, tauxSIO, color='gray', linestyle='--')
ax1.plot(twindSIO, tauySIO, color='k', linestyle='--')
ax1.plot(twind, taux, color='gray', label=r"$\tau_x$")
ax1.plot(twind, tauy, color='k', label=r"$\tau_y$")
ax1.axhline(color='k', linewidth=1)
ax1.set_ylabel('Wind stress [Pa]', fontsize=13)
ax1.legend(frameon=False, loc=(0.9, -0.01), handlelength=0.8)
ax1.set_ylim(-0.1, 0.1)
ax2.plot(twave, Hs, 'r', label=r'$H_s$')
ax2r = ax2.twinx()
ax2r.plot(twave, Tp, 'b', label=r'$T_p$')
ax2.set_ylabel(r'$H_s$ [m]', fontsize=15, color='r')
ax2r.set_ylabel(r'Peak period [s]', fontsize=13, color='b')
ax2r.spines['right'].set_color('b')
ax2r.spines['left'].set_color('r')
ax2.tick_params(axis='y', colors='r')
ax2r.tick_params(axis='y', colors='b')
ax3.fill_between(twave, meandir-meandirspread, meandir+meandirspread, color='k', alpha=0.2)
ax3.plot(twave, meandir, 'k')
ax3.set_ylim(240, 360)
ax3.set_ylabel(r'Wave direction [$^\circ$]', fontsize=12)
ax1.xaxis.set_ticklabels([])
ax2.xaxis.set_ticklabels([])
ax1.set_xlim(tl, tr)
ax2.set_xlim(tl, tr)
ax3.set_xlim(tl, tr)
fig.subplots_adjust(hspace=0.3)
ax2.axes.xaxis.set_tick_params(rotation=10)
ax1.text(0.01, 0.85, r'(a)', fontsize=13, transform=ax1.transAxes)
ax2.text(0.01, 0.85, r'(b)', fontsize=13, transform=ax2.transAxes)
ax3.text(0.01, 0.85, r'(c)', fontsize=13, transform=ax3.transAxes)
bbox = ax2.get_position()
offset = 0.04
ax2.set_position([bbox.x0, bbox.y0 + offset, bbox.x1-bbox.x0, bbox.y1 - bbox.y0])
bbox = ax3.get_position()
offset = 0.08
ax3.set_position([bbox.x0, bbox.y0 + offset, bbox.x1-bbox.x0, bbox.y1 - bbox.y0])
locator = mdates.AutoDateLocator(minticks=12, maxticks=24)
fmts = ['', '%Y', '%Y', '%Y', '%Y', '%Y %H:%M']
formatter = mdates.ConciseDateFormatter(locator, offset_formats=fmts)
ax3.xaxis.set_major_locator(locator)
ax3.xaxis.set_major_formatter(formatter)
# Panel with all T profiles.
wanted_ids = ['OC25M', 'OC25SA', 'OC25SB', 'OC40S', 'OC40N']
col = dict(OC25M='k', OC25SA='r', OC25SB='m', OC40S='b', OC40N='c')
for id in wanted_ids:
ds = open_dataset(head+"Tmean-"+id+".nc")
T, zab = ds["Tmean"].values, ds["z"].values
ax4.plot(T, zab, linestyle='none', marker='o', ms=5, mfc=col[id], mec=col[id], label=id)
ax4.legend(loc='upper left', bbox_to_anchor=(-0.05, 1.02), frameon=False, fontsize=10, labelspacing=0.01, handletextpad=0, borderpad=0, bbox_transform=ax4.transAxes)
ax4.set_xlabel(r'$T$ [$^o$C]', fontsize=13)
ax4.set_ylabel(r'zab [m]', fontsize=13)
# Fit a line to each mooring to estimate N2.
Navg = 0
for id in wanted_ids:
ds = open_dataset(head+"Tmean-"+id+".nc")
T, zab = ds["Tmean"].values, ds["z"].values
N, Tfit = fitN2(T, zab)
txt = r"$%s --> %.2f \times 10^{-2}$ s$^{-1}$"%(id, N*100)
print(txt)
Navg += N
Navg /= len(wanted_ids)
# Time series of top-to-bottom T difference.
Tstrat = open_dataset(head+"Tstrat-OC25M.nc")
Tstrat, tt = Tstrat["Tstrat"].values, Tstrat["t"].values
ax5.plot(tt, Tstrat, 'k')
ax1.yaxis.set_ticks([-0.1, -0.075, -0.05, -0.025, 0, 0.025, 0.05, 0.075, 0.1])
ax2r.yaxis.set_ticks([5, 10, 15, 20, 25])
ax4.yaxis.set_ticks([0, 10, 20, 30, 40])
ax5.yaxis.set_ticks(np.arange(7))
ax5.set_xlim(tl, tr)
ax5.yaxis.tick_right()
ax5.yaxis.set_label_position("right")
ax5.set_ylabel(r'$T$ difference [$^o$C]', fontsize=13)
locator = mdates.AutoDateLocator()
fmts = ['', '%Y', '%Y', '%Y', '%Y', '%Y %H:%M']
formatter = mdates.ConciseDateFormatter(locator, offset_formats=fmts)
ax5.xaxis.set_major_locator(locator)
ax5.xaxis.set_major_formatter(formatter)
ax4.text(0.90, 0.1, '(d)', fontsize=13, transform=ax4.transAxes)
ax5.text(0.02, 0.1, '(e)', fontsize=13, transform=ax5.transAxes)
offsetx = 0.03
offsety = 0.065
bbox = ax4.get_position()
ax4.set_position([bbox.x0 + offsetx, bbox.y0, bbox.x1-bbox.x0, bbox.y1 - bbox.y0 + offsety])
bbox = ax5.get_position()
ax5.set_position([bbox.x0 - offsetx, bbox.y0, bbox.x1-bbox.x0, bbox.y1 - bbox.y0 + offsety])
plt.show()
fig.savefig("fig03.png", dpi=300, bbox_inches='tight')
```
|
github_jupyter
|
## Stage 3: What do I need to install?
Maybe your experience looks like the typical python dependency management (https://xkcd.com/1987/):
<img src=https://imgs.xkcd.com/comics/python_environment.png>
Furthermore, data science packages can have all sorts of additional non-Python dependencies which makes things even more confusing, and we end up spending more time sorting out our dependencies than doing data science. If you take home nothing else out of this tutorial, learn this stage. I promise. It will save you, and everyone who works with you, many days of your life back.
### Reproducibility Issues:
* (NO-ENVIRONMENT-INSTRUCTIONS) Chicken and egg issue with environments. No environment.yml file or the like. (Even if there are some instructions in a notebook).
* (NO-VERSION-PIN) Versions not pinned. E.g. uses a dev branch without a clear indication of when it became released.
* (IMPOSSIBLE-ENVIRONMENT) dependencies are not resolvable due to version clashes. (e.g. need <=0.48 and >=0.49)
* (ARCH-DIFFERENCE) The same code runs differently on different architectures
* (MONOLITHIC-ENVIRONMENT) One environment to rule (or fail) them all.
### Default Better Principles
* **Use (at least) one virtual environment per repo**: And use the same name for the environment as the repo.
* **Generate lock files**: Lock files include every single dependency in your dependency chain. Lock files are necessarily platform specific, so you need one per platform that you support. This way you have a perfect version pin on the environment that you used for that moment in time.
* **Check in your environment creation instructions**: That means an `environment.yml` file for conda, and its matching lock file(s).
## The Easydata way: `make create_environment`
We like `conda` for environment management since it's the least bad option for most data science workflows. There are no perfect ways of doing this. Here are some basics.
### Setting up your environment
### clone the repo
```
git clone https://github.com/acwooding/easydata-tutorial
cd easydata-tutorial
```
### Initial setup
* **YOUR FIRST TASK OF THIS STAGE***: Check if there is a CONDA_EXE environment variable set with the full path to your conda binary; e.g. by doing the following:
```
export | grep CONDA_EXE
```
* **NOTE:** if there is no CONDA_EXE, you will need to find your conda binary and record its location in the CONDA_EXE line of `Makefile.include`
Recent versions of conda have made finding the actual binary harder than it should be. This might work:
```
>>> which conda
~/miniconda3/bin/conda
```
* Create and switch to the virtual environment:
```
make create_environment
conda activate easydata-tutorial
make update_environment
```
Now you're ready to run `jupyter notebook` (or jupyter lab) and explore the notebooks in the `notebooks` directory.
From within jupyter, re-open this notebook and run the cells below.
**Your next Task**: Run the next cell to ensure that the packages got added to the python environment correctly.
```
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import pandas as pd
%matplotlib inline
```
### Updating your conda and pip environments
The `make` commands, `make create_environment` and `make update_environment` are wrappers that allow you to easily manage your conda and pip environments using a file called `environment.yml` file, which lists the packages you want in your python environment.
(If you ever forget which `make` subcommand to run, you can run `make` by itself and it will provide a list of subcommands that are available.)
When adding packages to your python environment, **never do a `pip install` or `conda install` directly**. Always edit `environment.yml` and `make update_environment` instead.
Your `environment.yml` file will look something like this:
```
name: easydata-tutorial
- pip
- pip:
- -e . # conda >= 4.4 only
- python-dotenv>=0.5.1
- nbval
- nbdime
- umap-learn
- gdown
- # Add more pip dependencies here
- setuptools
- wheel
- git>=2.5 # for git worktree template updating
- sphinx
- bokeh
- click
- colorcet
- coverage
- coveralls
- datashader
- holoviews
- matplotlib
- jupyter
- # Add more conda dependencies here
...
```
Notice you can add conda and pip dependencies separately. For good reproducibility, we recommend you always try and use the conda version of a package if it is available.
Once you're done your edits, run `make update_environment` and voila, your python environment is up to date.
**Git Bonus Task:** To save or share your updated environment, check in your `environment.yml` file using git.
**YOUR NEXT TASK** in the Quest: Updating your python environment to include the `seaborn` package. But first, a quick tip with using `conda` environments in notebooks:
#### Using your conda environment in a jupyter notebook
If you make a new notebook, and your packages don't seem to be available, make sure to select the `easydata-tutorial` Kernel from within the notebook. If you are somehow in another kernel, select **Kernel -> Change kernel -> Python[conda env:easydata-tutorial]**. If you don't seem to have that option, make sure that you ran `jupyter notebooks` with the `easydata-tutorial` conda environment enabled, and that `which jupyter` points to the correct (`easydata-tutorial`) version of jupyter.
You can see what's in your notebook's conda environment by putting the following in a cell and running it:
```
%conda info
```
Another useful cell to include is the following.
If you want your environment changes to be immediately available in your running notebooks, make sure to run a notebook cell containing:
```
%load_ext autoreload
%autoreload 2
```
If you did your task correctly, the following import will succeed.
```
import seaborn as sns
```
Remember, you should **never** do a `pip install` or `conda install` manually. You want to make sure your environment changes are saved to your data science repo. Instead, edit `environment.yml` and do a `make update_environment`.
Your **NEXT TASK of this stage**: Run `make env_challenge` and follow the instructions if it works.
### BONUS Task: Lockfiles
* Do this if there's time *
Lockfiles are a way of separating the list of "packages I want" from "packages I need to install to make everything work". For reproducibility reasons, we want to keep track of both files, but not in the same place. Usually, this separating is done with something called a "lockfile."
Unlike several other virtual environment managers, conda doesn't have lockfiles. To work around this limitation, Easydata generates a basic lockfile from `environment.yml` whenever you run `make update_environment`.
This lockfile is a file called `environment.{$ARCH}.lock.yml` (e.g. `environment.i386.lock.yml`). This file keeps a record of the exact environment that is currently installed in your conda environment `easydata-tutorial`. If you ever need to reproduce an environment exactly, you can install from the `.lock.yml` file. (Note: These are architecture dependent, so don't expect a mac lockfile to work on linux, and vice versa).
For more instructions on setting up and maintaining your python environment (including how to point your environment at your custom forks and work in progress) see [Setting up and Maintaining your Conda Environment Reproducibly](../reference/easydata/conda-environments.md).
**Your BONUS Task** in the Quest: Take a look at the lockfile, and compare it's content to `environment.yml`. Then ask yourself, "aren't I glad I don't have to maintain this list manually?"
|
github_jupyter
|
WKN strings can be converted to the following formats via the `output_format` parameter:
* `compact`: only number strings without any seperators or whitespace, like "A0MNRK"
* `standard`: WKN strings with proper whitespace in the proper places. Note that in the case of WKN, the compact format is the same as the standard one.
* `isin`: convert the number to an ISIN, like "DE000A0MNRK9".
Invalid parsing is handled with the `errors` parameter:
* `coerce` (default): invalid parsing will be set to NaN
* `ignore`: invalid parsing will return the input
* `raise`: invalid parsing will raise an exception
The following sections demonstrate the functionality of `clean_de_wkn()` and `validate_de_wkn()`.
### An example dataset containing WKN strings
```
import pandas as pd
import numpy as np
df = pd.DataFrame(
{
"wkn": [
'A0MNRK',
'AOMNRK',
'7542011030',
'7552A10004',
'8019010008',
"hello",
np.nan,
"NULL",
],
"address": [
"123 Pine Ave.",
"main st",
"1234 west main heights 57033",
"apt 1 789 s maple rd manhattan",
"robie house, 789 north main street",
"1111 S Figueroa St, Los Angeles, CA 90015",
"(staples center) 1111 S Figueroa St, Los Angeles",
"hello",
]
}
)
df
```
## 1. Default `clean_de_wkn`
By default, `clean_de_wkn` will clean wkn strings and output them in the standard format with proper separators.
```
from dataprep.clean import clean_de_wkn
clean_de_wkn(df, column = "wkn")
```
## 2. Output formats
This section demonstrates the output parameter.
### `standard` (default)
```
clean_de_wkn(df, column = "wkn", output_format="standard")
```
### `compact`
```
clean_de_wkn(df, column = "wkn", output_format="compact")
```
### `isin`
```
clean_de_wkn(df, column = "wkn", output_format="isin")
```
## 3. `inplace` parameter
This deletes the given column from the returned DataFrame.
A new column containing cleaned WKN strings is added with a title in the format `"{original title}_clean"`.
```
clean_de_wkn(df, column="wkn", inplace=True)
```
## 4. `errors` parameter
### `coerce` (default)
```
clean_de_wkn(df, "wkn", errors="coerce")
```
### `ignore`
```
clean_de_wkn(df, "wkn", errors="ignore")
```
## 4. `validate_de_wkn()`
`validate_de_wkn()` returns `True` when the input is a valid WKN. Otherwise it returns `False`.
The input of `validate_de_wkn()` can be a string, a Pandas DataSeries, a Dask DataSeries, a Pandas DataFrame and a dask DataFrame.
When the input is a string, a Pandas DataSeries or a Dask DataSeries, user doesn't need to specify a column name to be validated.
When the input is a Pandas DataFrame or a dask DataFrame, user can both specify or not specify a column name to be validated. If user specify the column name, `validate_de_wkn()` only returns the validation result for the specified column. If user doesn't specify the column name, `validate_de_wkn()` returns the validation result for the whole DataFrame.
```
from dataprep.clean import validate_de_wkn
print(validate_de_wkn('A0MNRK'))
print(validate_de_wkn('AOMNRK'))
print(validate_de_wkn('7542011030'))
print(validate_de_wkn('7552A10004'))
print(validate_de_wkn('8019010008'))
print(validate_de_wkn("hello"))
print(validate_de_wkn(np.nan))
print(validate_de_wkn("NULL"))
```
### Series
```
validate_de_wkn(df["wkn"])
```
### DataFrame + Specify Column
```
validate_de_wkn(df, column="wkn")
```
### Only DataFrame
```
validate_de_wkn(df)
```
|
github_jupyter
|
```
from google.colab import drive
drive.mount('/content/gdrive')
!git clone https://github.com/NVIDIA/pix2pixHD.git
import os
os.chdir('pix2pixHD/')
# !chmod 755 /content/gdrive/My\ Drive/Images_for_GAN/datasets/download_convert_apples_dataset.sh
# !/content/gdrive/My\ Drive/Images_for_GAN/datasets/download_convert_apples_dataset.sh
!ls
!pip install dominate
import numpy as np
import scipy
import matplotlib
import pandas as pd
import cv2
import matplotlib.pyplot as plt
# import pydmd
#from pydmd import DMD
%matplotlib inline
import scipy.integrate
from matplotlib import animation
from IPython.display import HTML
from pylab import rcParams
rcParams['figure.figsize'] = 8, 5
from PIL import Image
from skimage import io
# Example of RGB image from A
apples_example1 = cv2.imread('/content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/A/20_12_26_22_15_00_Canon_top_all_on.jpg')
apples_example1 = cv2.cvtColor(apples_example1, cv2.COLOR_BGR2RGB)
plt.imshow(apples_example1)
plt.show()
print(type(apples_example1))
print("- Number of Pixels: " + str(apples_example1.size))
print("- Shape/Dimensions: " + str(apples_example1.shape))
# Example of RGB image from B
apples_example2 = cv2.imread('/content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/B/set10_20201226_221732_686_00000_channel7.png')
apples_example2 = cv2.cvtColor(apples_example2, cv2.COLOR_BGR2RGB)
plt.imshow(apples_example2)
plt.show()
print(type(apples_example2))
print("- Number of Pixels: " + str(apples_example2.size))
print("- Shape/Dimensions: " + str(apples_example2.shape))
# Example of RGB image from ./train_A/
apples_example3 = cv2.imread('/content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/train/train_A/20210111_171500.png')
apples_example3 = cv2.cvtColor(apples_example3, cv2.COLOR_BGR2RGB)
plt.imshow(apples_example3)
plt.show()
print(type(apples_example3))
print("- Number of Pixels: " + str(apples_example3.size))
print("- Shape/Dimensions: " + str(apples_example3.shape))
# Example of RGB image from ./train_B/
apples_example4 = cv2.imread('/content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/train/train_B/20210111_134500.png')
apples_example4 = cv2.cvtColor(apples_example4, cv2.COLOR_BGR2RGB)
plt.imshow(apples_example4)
plt.show()
print(type(apples_example4))
print("- Number of Pixels: " + str(apples_example4.size))
print("- Shape/Dimensions: " + str(apples_example4.shape))
#!python train.py --loadSize 512 --fineSize 512 --label_nc 0 --no_instance --name apples_RGB_NIR --dataroot /content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/train --checkpoints_dir /content/gdrive/MyDrive/Images_for_GAN/checkpoints --model Pix2PixHD --save_epoch_freq 5
path_train_A = '/content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/train/train_A/'
print('path_train_A: ', path_train_A)
print('Number of images in path_train_A:', len(path_train_A))
path_train_B = '/content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/train/train_B/'
print('path_train_B: ', path_train_B)
print('Number of images in path_train_B:', len(path_train_B))
# !python train.py --name apples_trash --dataroot /content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/Trash --checkpoints_dir /content/gdrive/MyDrive/Images_for_GAN/checkpoints --norm batch --loadSize 512 --fineSize 512 --label_nc 0 --no_instance
!python train.py --name apples_trash_1 --dataroot /content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/train --label_nc 0 --no_instance --loadSize 320 --fineSize 160 --resize_or_crop resize_and_crop
# !python train.py --name apples_trash --dataroot /content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/Trash --checkpoints_dir /content/gdrive/MyDrive/Images_for_GAN/checkpoints --norm batch --loadSize 512 --fineSize 512 --label_nc 0 --no_instance
!python train.py --name apples_train_1 --dataroot /content/gdrive/MyDrive/Images_for_GAN/apples_RGB_NIR/Trash --label_nc 0 --no_instance --loadSize 320 --fineSize 160 --resize_or_crop resize_and_crop
```
|
github_jupyter
|
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Image/texture.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Image/texture.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Image/texture.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Image/texture.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The magic command `%%capture` can be used to hide output from a specific cell.
```
# %%capture
# !pip install earthengine-api
# !pip install geehydro
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once. Uncomment the line `ee.Authenticate()`
if you are running this notebook for this first time or if you are getting an authentication error.
```
# ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
import math
# Load a high-resolution NAIP image.
image = ee.Image('USDA/NAIP/DOQQ/m_3712213_sw_10_1_20140613')
# Zoom to San Francisco, display.
Map.setCenter(-122.466123, 37.769833, 17)
Map.addLayer(image, {'max': 255}, 'image')
# Get the NIR band.
nir = image.select('N')
# Define a neighborhood with a kernel.
square = ee.Kernel.square(**{'radius': 4})
# Compute entropy and display.
entropy = nir.entropy(square)
Map.addLayer(entropy,
{'min': 1, 'max': 5, 'palette': ['0000CC', 'CC0000']},
'entropy')
# Compute the gray-level co-occurrence matrix (GLCM), get contrast.
glcm = nir.glcmTexture(**{'size': 4})
contrast = glcm.select('N_contrast')
Map.addLayer(contrast,
{'min': 0, 'max': 1500, 'palette': ['0000CC', 'CC0000']},
'contrast')
# Create a list of weights for a 9x9 kernel.
list = [1, 1, 1, 1, 1, 1, 1, 1, 1]
# The center of the kernel is zero.
centerList = [1, 1, 1, 1, 0, 1, 1, 1, 1]
# Assemble a list of lists: the 9x9 kernel weights as a 2-D matrix.
lists = [list, list, list, list, centerList, list, list, list, list]
# Create the kernel from the weights.
# Non-zero weights represent the spatial neighborhood.
kernel = ee.Kernel.fixed(9, 9, lists, -4, -4, False)
# Convert the neighborhood into multiple bands.
neighs = nir.neighborhoodToBands(kernel)
# Compute local Geary's C, a measure of spatial association.
gearys = nir.subtract(neighs).pow(2).reduce(ee.Reducer.sum()) \
.divide(math.pow(9, 2))
Map.addLayer(gearys,
{'min': 20, 'max': 2500, 'palette': ['0000CC', 'CC0000']},
"Geary's C")
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
|
github_jupyter
|
## TFMA Notebook example
This notebook describes how to export your model for TFMA and demonstrates the analysis tooling it offers.
Note: Please make sure to follow the instructions in [README.md](https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi/README.md) when running this notebook
## Setup
Import necessary packages.
```
import apache_beam as beam
import os
import preprocess
import shutil
import tensorflow as tf
import tensorflow_data_validation as tfdv
import tensorflow_model_analysis as tfma
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
from trainer import task
from trainer import taxi
```
Helper functions and some constants for running the notebook locally.
```
BASE_DIR = os.getcwd()
DATA_DIR = os.path.join(BASE_DIR, 'data')
OUTPUT_DIR = os.path.join(BASE_DIR, 'chicago_taxi_output')
# Base dir containing train and eval data
TRAIN_DATA_DIR = os.path.join(DATA_DIR, 'train')
EVAL_DATA_DIR = os.path.join(DATA_DIR, 'eval')
# Base dir where TFT writes training data
TFT_TRAIN_OUTPUT_BASE_DIR = os.path.join(OUTPUT_DIR, 'tft_train')
TFT_TRAIN_FILE_PREFIX = 'train_transformed'
# Base dir where TFT writes eval data
TFT_EVAL_OUTPUT_BASE_DIR = os.path.join(OUTPUT_DIR, 'tft_eval')
TFT_EVAL_FILE_PREFIX = 'eval_transformed'
TF_OUTPUT_BASE_DIR = os.path.join(OUTPUT_DIR, 'tf')
# Base dir where TFMA writes eval data
TFMA_OUTPUT_BASE_DIR = os.path.join(OUTPUT_DIR, 'tfma')
SERVING_MODEL_DIR = 'serving_model_dir'
EVAL_MODEL_DIR = 'eval_model_dir'
def get_tft_train_output_dir(run_id):
return _get_output_dir(TFT_TRAIN_OUTPUT_BASE_DIR, run_id)
def get_tft_eval_output_dir(run_id):
return _get_output_dir(TFT_EVAL_OUTPUT_BASE_DIR, run_id)
def get_tf_output_dir(run_id):
return _get_output_dir(TF_OUTPUT_BASE_DIR, run_id)
def get_tfma_output_dir(run_id):
return _get_output_dir(TFMA_OUTPUT_BASE_DIR, run_id)
def _get_output_dir(base_dir, run_id):
return os.path.join(base_dir, 'run_' + str(run_id))
def get_schema_file():
return os.path.join(OUTPUT_DIR, 'schema.pbtxt')
```
Clean up output directories.
```
shutil.rmtree(TFT_TRAIN_OUTPUT_BASE_DIR, ignore_errors=True)
shutil.rmtree(TFT_EVAL_OUTPUT_BASE_DIR, ignore_errors=True)
shutil.rmtree(TF_OUTPUT_BASE_DIR, ignore_errors=True)
shutil.rmtree(get_schema_file(), ignore_errors=True)
```
## Compute and visualize descriptive data statistics
```
# Compute stats over training data.
train_stats = tfdv.generate_statistics_from_csv(data_location=os.path.join(TRAIN_DATA_DIR, 'data.csv'))
# Visualize training data stats.
tfdv.visualize_statistics(train_stats)
```
## Infer a schema
```
# Infer a schema from the training data stats.
schema = tfdv.infer_schema(statistics=train_stats, infer_feature_shape=False)
tfdv.display_schema(schema=schema)
```
## Check evaluation data for errors
```
# Compute stats over eval data.
eval_stats = tfdv.generate_statistics_from_csv(data_location=os.path.join(EVAL_DATA_DIR, 'data.csv'))
# Compare stats of eval data with training data.
tfdv.visualize_statistics(lhs_statistics=eval_stats, rhs_statistics=train_stats,
lhs_name='EVAL_DATASET', rhs_name='TRAIN_DATASET')
# Check eval data for errors by validating the eval data stats using the previously inferred schema.
anomalies = tfdv.validate_statistics(statistics=eval_stats, schema=schema)
tfdv.display_anomalies(anomalies)
# Update the schema based on the observed anomalies.
# Relax the minimum fraction of values that must come from the domain for feature company.
company = tfdv.get_feature(schema, 'company')
company.distribution_constraints.min_domain_mass = 0.9
# Add new value to the domain of feature payment_type.
payment_type_domain = tfdv.get_domain(schema, 'payment_type')
payment_type_domain.value.append('Prcard')
# Validate eval stats after updating the schema
updated_anomalies = tfdv.validate_statistics(eval_stats, schema)
tfdv.display_anomalies(updated_anomalies)
```
## Freeze the schema
Now that the schema has been reviewed and curated, we will store it in a file to reflect its "frozen" state.
```
file_io.recursive_create_dir(OUTPUT_DIR)
file_io.write_string_to_file(get_schema_file(), text_format.MessageToString(schema))
```
## Preprocess Inputs
transform_data is defined in preprocess.py and uses the tensorflow_transform library to perform preprocessing. The same code is used for both local preprocessing in this notebook and preprocessing in the Cloud (via Dataflow).
```
# Transform eval data
preprocess.transform_data(input_handle=os.path.join(EVAL_DATA_DIR, 'data.csv'),
outfile_prefix=TFT_EVAL_FILE_PREFIX,
working_dir=get_tft_eval_output_dir(0),
schema_file=get_schema_file(),
pipeline_args=['--runner=DirectRunner'])
print('Done')
# Transform training data
preprocess.transform_data(input_handle=os.path.join(TRAIN_DATA_DIR, 'data.csv'),
outfile_prefix=TFT_TRAIN_FILE_PREFIX,
working_dir=get_tft_train_output_dir(0),
schema_file=get_schema_file(),
pipeline_args=['--runner=DirectRunner'])
print('Done')
```
## Compute statistics over transformed data
```
# Compute stats over transformed training data.
TRANSFORMED_TRAIN_DATA = os.path.join(get_tft_train_output_dir(0), TFT_TRAIN_FILE_PREFIX + "*")
transformed_train_stats = tfdv.generate_statistics_from_tfrecord(data_location=TRANSFORMED_TRAIN_DATA)
# Visualize transformed training data stats and compare to raw training data.
# Use 'Feature search' to focus on a feature and see statistics pre- and post-transformation.
tfdv.visualize_statistics(transformed_train_stats, train_stats, lhs_name='TRANSFORMED', rhs_name='RAW')
```
## Prepare the Model
To use TFMA, export the model into an **EvalSavedModel** by calling ``tfma.export.export_eval_savedmodel``.
``tfma.export.export_eval_savedmodel`` is analogous to ``estimator.export_savedmodel`` but exports the evaluation graph as opposed to the training or inference graph. Notice that one of the inputs is ``eval_input_receiver_fn`` which is analogous to ``serving_input_receiver_fn`` for ``estimator.export_savedmodel``. For more details, refer to the documentation for TFMA on Github.
Contruct the **EvalSavedModel** after training is completed.
```
def run_experiment(hparams):
"""Run the training and evaluate using the high level API"""
# Train and evaluate the model as usual.
estimator = task.train_and_maybe_evaluate(hparams)
# Export TFMA's sepcial EvalSavedModel
eval_model_dir = os.path.join(hparams.output_dir, EVAL_MODEL_DIR)
receiver_fn = lambda: eval_input_receiver_fn(hparams.tf_transform_dir)
tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=eval_model_dir,
eval_input_receiver_fn=receiver_fn)
def eval_input_receiver_fn(working_dir):
# Extract feature spec from the schema.
raw_feature_spec = schema_utils.schema_as_feature_spec(schema).feature_spec
serialized_tf_example = tf.placeholder(
dtype=tf.string, shape=[None], name='input_example_tensor')
# First we deserialize our examples using the raw schema.
features = tf.parse_example(serialized_tf_example, raw_feature_spec)
# Now that we have our raw examples, we must process them through tft
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform(
os.path.join(working_dir, transform_fn_io.TRANSFORM_FN_DIR),
features))
# The key MUST be 'examples'.
receiver_tensors = {'examples': serialized_tf_example}
# NOTE: Model is driven by transformed features (since training works on the
# materialized output of TFT, but slicing will happen on raw features.
features.update(transformed_features)
return tfma.export.EvalInputReceiver(
features=features,
receiver_tensors=receiver_tensors,
labels=transformed_features[taxi.transformed_name(taxi.LABEL_KEY)])
print('Done')
```
## Train and export the model for TFMA
```
def run_local_experiment(tft_run_id, tf_run_id, num_layers, first_layer_size, scale_factor):
"""Helper method to train and export the model for TFMA
The caller specifies the input and output directory by providing run ids. The optional parameters
allows the user to change the modelfor time series view.
Args:
tft_run_id: The run id for the preprocessing. Identifies the folder containing training data.
tf_run_id: The run for this training run. Identify where the exported model will be written to.
num_layers: The number of layers used by the hiden layer.
first_layer_size: The size of the first hidden layer.
scale_factor: The scale factor between each layer in in hidden layers.
"""
hparams = tf.contrib.training.HParams(
# Inputs: are tf-transformed materialized features
train_files=os.path.join(get_tft_train_output_dir(tft_run_id), TFT_TRAIN_FILE_PREFIX + '-00000-of-*'),
eval_files=os.path.join(get_tft_eval_output_dir(tft_run_id), TFT_EVAL_FILE_PREFIX + '-00000-of-*'),
schema_file=get_schema_file(),
# Output: dir for trained model
job_dir=get_tf_output_dir(tf_run_id),
tf_transform_dir=get_tft_train_output_dir(tft_run_id),
# Output: dir for both the serving model and eval_model which will go into tfma
# evaluation
output_dir=get_tf_output_dir(tf_run_id),
train_steps=10000,
eval_steps=5000,
num_layers=num_layers,
first_layer_size=first_layer_size,
scale_factor=scale_factor,
num_epochs=None,
train_batch_size=40,
eval_batch_size=40)
run_experiment(hparams)
print('Done')
run_local_experiment(tft_run_id=0,
tf_run_id=0,
num_layers=4,
first_layer_size=100,
scale_factor=0.7)
print('Done')
```
## Run TFMA to compute metrics
For local analysis, TFMA offers a helper method ``tfma.run_model_analysis``
```
help(tfma.run_model_analysis)
```
#### You can also write your own custom pipeline if you want to perform extra transformations on the data before evaluation.
```
def run_tfma(slice_spec, tf_run_id, tfma_run_id, input_csv, schema_file, add_metrics_callbacks=None):
"""A simple wrapper function that runs tfma locally.
A function that does extra transformations on the data and then run model analysis.
Args:
slice_spec: The slicing spec for how to slice the data.
tf_run_id: An id to contruct the model directories with.
tfma_run_id: An id to construct output directories with.
input_csv: The evaluation data in csv format.
schema_file: The file holding a text-serialized schema for the input data.
add_metrics_callback: Optional list of callbacks for computing extra metrics.
Returns:
An EvalResult that can be used with TFMA visualization functions.
"""
eval_model_base_dir = os.path.join(get_tf_output_dir(tf_run_id), EVAL_MODEL_DIR)
eval_model_dir = os.path.join(eval_model_base_dir, next(os.walk(eval_model_base_dir))[1][0])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=eval_model_dir,
add_metrics_callbacks=add_metrics_callbacks)
schema = taxi.read_schema(schema_file)
print(eval_model_dir)
display_only_data_location = input_csv
with beam.Pipeline() as pipeline:
csv_coder = taxi.make_csv_coder(schema)
raw_data = (
pipeline
| 'ReadFromText' >> beam.io.ReadFromText(
input_csv,
coder=beam.coders.BytesCoder(),
skip_header_lines=True)
| 'ParseCSV' >> beam.Map(csv_coder.decode))
# Examples must be in clean tf-example format.
coder = taxi.make_proto_coder(schema)
raw_data = (
raw_data
| 'ToSerializedTFExample' >> beam.Map(coder.encode))
_ = (raw_data
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
slice_spec=slice_spec,
output_path=get_tfma_output_dir(tfma_run_id),
display_only_data_location=input_csv))
return tfma.load_eval_result(output_path=get_tfma_output_dir(tfma_run_id))
print('Done')
```
#### You can also compute metrics on slices of your data in TFMA. Slices can be specified using ``tfma.slicer.SingleSliceSpec``.
Below are examples of how slices can be specified.
```
# An empty slice spec means the overall slice, that is, the whole dataset.
OVERALL_SLICE_SPEC = tfma.slicer.SingleSliceSpec()
# Data can be sliced along a feature column
# In this case, data is sliced along feature column trip_start_hour.
FEATURE_COLUMN_SLICE_SPEC = tfma.slicer.SingleSliceSpec(columns=['trip_start_hour'])
# Data can be sliced by crossing feature columns
# In this case, slices are computed for trip_start_day x trip_start_month.
FEATURE_COLUMN_CROSS_SPEC = tfma.slicer.SingleSliceSpec(columns=['trip_start_day', 'trip_start_month'])
# Metrics can be computed for a particular feature value.
# In this case, metrics is computed for all data where trip_start_hour is 12.
FEATURE_VALUE_SPEC = tfma.slicer.SingleSliceSpec(features=[('trip_start_hour', 12)])
# It is also possible to mix column cross and feature value cross.
# In this case, data where trip_start_hour is 12 will be sliced by trip_start_day.
COLUMN_CROSS_VALUE_SPEC = tfma.slicer.SingleSliceSpec(columns=['trip_start_day'], features=[('trip_start_hour', 12)])
ALL_SPECS = [
OVERALL_SLICE_SPEC,
FEATURE_COLUMN_SLICE_SPEC,
FEATURE_COLUMN_CROSS_SPEC,
FEATURE_VALUE_SPEC,
COLUMN_CROSS_VALUE_SPEC
]
```
#### Let's run TFMA!
```
tf.logging.set_verbosity(tf.logging.INFO)
tfma_result_1 = run_tfma(input_csv=os.path.join(EVAL_DATA_DIR, 'data.csv'),
tf_run_id=0,
tfma_run_id=1,
slice_spec=ALL_SPECS,
schema_file=get_schema_file())
print('Done')
```
## Visualization: Slicing Metrics
To see the slices, either use the name of the column (by setting slicing_column) or provide a tfma.slicer.SingleSliceSpec (by setting slicing_spec). If neither is provided, the overall will be displayed.
The default visualization is **slice overview** when the number of slices is small. It shows the value of a metric for each slice sorted by the another metric. It is also possible to set a threshold to filter out slices with smaller weights.
This view also supports **metrics histogram** as an alternative visualization. It is also the defautl view when the number of slices is large. The results will be divided into buckets and the number of slices / total weights / both can be visualized. Slices with small weights can be fitlered out by setting the threshold. Further filtering can be applied by dragging the grey band. To reset the range, double click the band. Filtering can be used to remove outliers in the visualization and the metrics table below.
```
# Show data sliced along feature column trip_start_hour.
tfma.view.render_slicing_metrics(
tfma_result_1, slicing_column='trip_start_hour')
# Show metrics sliced by COLUMN_CROSS_VALUE_SPEC above.
tfma.view.render_slicing_metrics(tfma_result_1, slicing_spec=COLUMN_CROSS_VALUE_SPEC)
# Show overall metrics.
tfma.view.render_slicing_metrics(tfma_result_1)
```
## Visualization: Plots
TFMA offers a number of built-in plots. To see them, add them to ``add_metrics_callbacks``
```
tf.logging.set_verbosity(tf.logging.INFO)
tfma_vis = run_tfma(input_csv=os.path.join(EVAL_DATA_DIR, 'data.csv'),
tf_run_id=0,
tfma_run_id='vis',
slice_spec=ALL_SPECS,
schema_file=get_schema_file(),
add_metrics_callbacks=[
# calibration_plot_and_prediction_histogram computes calibration plot and prediction
# distribution at different thresholds.
tfma.post_export_metrics.calibration_plot_and_prediction_histogram(),
# auc_plots enables precision-recall curve and ROC visualization at different thresholds.
tfma.post_export_metrics.auc_plots()
])
print('Done')
```
Plots must be visualized for an individual slice. To specify a slice, use ``tfma.slicer.SingleSliceSpec``.
In the example below, we are using ``tfma.slicer.SingleSliceSpec(features=[('trip_start_hour', 1)])`` to specify the slice where trip_start_hour is 1.
Plots are interactive:
- Drag to pan
- Scroll to zoom
- Right click to reset the view
Simply hover over the desired data point to see more details.
```
tfma.view.render_plot(tfma_vis, tfma.slicer.SingleSliceSpec(features=[('trip_start_hour', 1)]))
```
#### Custom metrics
In addition to plots, it is also possible to compute additional metrics not present at export time or custom metrics metrics using ``add_metrics_callbacks``.
All metrics in ``tf.metrics`` are supported in the callback and can be used to compose other metrics:
https://www.tensorflow.org/api_docs/python/tf/metrics
In the cells below, false negative rate is computed as an example.
```
# Defines a callback that adds FNR to the result.
def add_fnr_for_threshold(threshold):
def _add_fnr_callback(features_dict, predictions_dict, labels_dict):
metric_ops = {}
prediction_tensor = tf.cast(
predictions_dict.get(tf.contrib.learn.PredictionKey.LOGISTIC), tf.float64)
fn_value_op, fn_update_op = tf.metrics.false_negatives_at_thresholds(tf.squeeze(labels_dict),
tf.squeeze(prediction_tensor),
[threshold])
tp_value_op, tp_update_op = tf.metrics.true_positives_at_thresholds(tf.squeeze(labels_dict),
tf.squeeze(prediction_tensor),
[threshold])
fnr = fn_value_op[0] / (fn_value_op[0] + tp_value_op[0])
metric_ops['FNR@' + str(threshold)] = (fnr, tf.group(fn_update_op, tp_update_op))
return metric_ops
return _add_fnr_callback
tf.logging.set_verbosity(tf.logging.INFO)
tfma_fnr = run_tfma(input_csv=os.path.join(EVAL_DATA_DIR, 'data.csv'),
tf_run_id=0,
tfma_run_id='fnr',
slice_spec=ALL_SPECS,
schema_file=get_schema_file(),
add_metrics_callbacks=[
# Simply add the call here.
add_fnr_for_threshold(0.75)
])
tfma.view.render_slicing_metrics(tfma_fnr, slicing_spec=FEATURE_COLUMN_SLICE_SPEC)
```
## Visualization: Time Series
It is important to track how your model is doing over time. TFMA offers two modes to show your model performs over time.
**Multiple model analysis** shows how model perfoms from one version to another. This is useful early on to see how the addition of new features, change in modeling technique, etc, affects the performance. TFMA offers a convenient method.
```
help(tfma.multiple_model_analysis)
```
**Multiple data analysis** shows how a model perfoms under different evaluation data set. This is useful to ensure that model performance does not degrade over time. TFMA offer a conveneient method.
```
help(tfma.multiple_data_analysis)
```
It is also possible to compose a time series manually.
```
# Create different models.
# Run some experiments with different hidden layer configurations.
run_local_experiment(tft_run_id=0,
tf_run_id=1,
num_layers=3,
first_layer_size=200,
scale_factor=0.7)
run_local_experiment(tft_run_id=0,
tf_run_id=2,
num_layers=4,
first_layer_size=240,
scale_factor=0.5)
print('Done')
tfma_result_2 = run_tfma(input_csv=os.path.join(EVAL_DATA_DIR, 'data.csv'),
tf_run_id=1,
tfma_run_id=2,
slice_spec=ALL_SPECS,
schema_file=get_schema_file())
tfma_result_3 = run_tfma(input_csv=os.path.join(EVAL_DATA_DIR, 'data.csv'),
tf_run_id=2,
tfma_run_id=3,
slice_spec=ALL_SPECS,
schema_file=get_schema_file())
print('Done')
```
Like plots, time series view must visualized for a slice too.
In the example below, we are showing the overall slice.
Select a metric to see its time series graph. Hover over each data point to get more details.
```
eval_results = tfma.make_eval_results([tfma_result_1, tfma_result_2, tfma_result_3],
tfma.constants.MODEL_CENTRIC_MODE)
tfma.view.render_time_series(eval_results, OVERALL_SLICE_SPEC)
```
Serialized results can also be used to construct a time series. Thus, there is no need to re-run TFMA for models already evaluated for a long running pipeline.
```
# Visualize the results in a Time Series. In this case, we are showing the slice specified.
eval_results_from_disk = tfma.load_eval_results([get_tfma_output_dir(1),
get_tfma_output_dir(2),
get_tfma_output_dir(3)],
tfma.constants.MODEL_CENTRIC_MODE)
tfma.view.render_time_series(eval_results_from_disk, FEATURE_VALUE_SPEC)
```
|
github_jupyter
|
# Conservative remapping
```
import xgcm
import xarray as xr
import numpy as np
import xbasin
```
We open the example data and create 2 grids: 1 for the dataset we have and 1 for the remapped one.
Here '_fr' means *from* and '_to' *to* (i.e. remapped data).
```
ds = xr.open_dataset('data/nemo_output_ex.nc')
from xnemogcm import open_nemo_and_domain_cfg
ds = open_nemo_and_domain_cfg(datadir='/home/romain/Documents/Education/PhD/Courses/2019-OC6310/Project/Experiments/EXP_eos00/Rawdata')
metrics_fr = {
('X',): ['e1t', 'e1u', 'e1v', 'e1f'],
('Y',): ['e2t', 'e2u', 'e2v', 'e2f'],
('Z',): ['e3t', 'e3u', 'e3v', 'e3w']
}
metrics_to = {
('X',): ['e1t', 'e1u', 'e1v', 'e1f'],
('Y',): ['e2t', 'e2u', 'e2v', 'e2f'],
('Z',): ['e3t_1d', 'e3w_1d']
}
grid_fr = xgcm.Grid(ds, periodic=False, metrics=metrics_fr)
grid_to = xgcm.Grid(ds, periodic=False, metrics=metrics_to)
# Convert the thetao float32 to float64 for more precision
ds.thetao.values = ds.thetao.values.astype(np.float64)
print(ds)
```
## Remap a T point
```
%timeit xbasin.remap_vertical(ds.thetao, grid_fr, grid_to, axis='Z')
theta_to = xbasin.remap_vertical(ds.thetao, grid_fr, grid_to, axis='Z')
print(theta_to.coords)
```
The total heat content is conserved:
```
hc_fr = grid_fr.integrate(ds.thetao, axis='Z')
hc_to = grid_to.integrate(theta_to, axis='Z')
(hc_fr == hc_to).all()
```
## Remap a W point
```
w_to = xbasin.remap_vertical(ds.woce*0+1, grid_fr, grid_to, axis='Z')
grid_to.integrate(w_to, axis='Z')[-1].plot()
grid_fr.integrate((ds.woce*0+1), axis='Z')[-1].plot()
```
## Time comparison
The heart function of the remapping is computed from python to C++ with pythran, which improves the speed. However if pythran is not installed, the original python function is called instead.
As a user, you should not use the 2 following functions, they are only shown here for the time comparison.
```
fake_dataset = [
np.ascontiguousarray(ds.gdept_0.values.reshape(ds.gdept_0.values.shape+(1,))),
np.ascontiguousarray(ds.gdepw_0.values.reshape(ds.gdepw_0.values.shape+(1,))),
np.ascontiguousarray(ds.thetao.transpose('z_c', 'y_c', 'x_c', 't').values.flatten().reshape(ds.thetao.transpose('z_c', 'y_c', 'x_c', 't').shape)[...,0:1])
]
from xbasin._interpolation import interp_new_vertical as _interpolation_pure_python
from xbasin.interpolation_compiled import interp_new_vertical as _interpolation_pythran
```
### Pure Python
```
%timeit _interpolation_pure_python(*fake_dataset)
```
### Pythran
```
%timeit _interpolation_pythran(*fake_dataset)
```
We see that the compiled version runs about 10-100 times faster (of course this number is just a rough approximation). The pure Python version does not use vectorized arrays and is thus slower.
|
github_jupyter
|
# DeepDreaming with TensorFlow
>[Loading and displaying the model graph](#loading)
>[Naive feature visualization](#naive)
>[Multiscale image generation](#multiscale)
>[Laplacian Pyramid Gradient Normalization](#laplacian)
>[Playing with feature visualzations](#playing)
>[DeepDream](#deepdream)
This notebook demonstrates a number of Convolutional Neural Network image generation techniques implemented with TensorFlow for fun and science:
- visualize individual feature channels and their combinations to explore the space of patterns learned by the neural network (see [GoogLeNet](http://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](http://storage.googleapis.com/deepdream/visualz/vgg16/index.html) galleries)
- embed TensorBoard graph visualizations into Jupyter notebooks
- produce high-resolution images with tiled computation ([example](http://storage.googleapis.com/deepdream/pilatus_flowers.jpg))
- use Laplacian Pyramid Gradient Normalization to produce smooth and colorful visuals at low cost
- generate DeepDream-like images with TensorFlow (DogSlugs included)
The network under examination is the [GoogLeNet architecture](http://arxiv.org/abs/1409.4842), trained to classify images into one of 1000 categories of the [ImageNet](http://image-net.org/) dataset. It consists of a set of layers that apply a sequence of transformations to the input image. The parameters of these transformations were determined during the training process by a variant of gradient descent algorithm. The internal image representations may seem obscure, but it is possible to visualize and interpret them. In this notebook we are going to present a few tricks that allow to make these visualizations both efficient to generate and even beautiful. Impatient readers can start with exploring the full galleries of images generated by the method described here for [GoogLeNet](http://storage.googleapis.com/deepdream/visualz/tensorflow_inception/index.html) and [VGG16](http://storage.googleapis.com/deepdream/visualz/vgg16/index.html) architectures.
```
# boilerplate code
from __future__ import print_function
import os
from io import BytesIO
import numpy as np
from functools import partial
import PIL.Image
from IPython.display import clear_output, Image, display, HTML
import tensorflow as tf
```
<a id='loading'></a>
## Loading and displaying the model graph
The pretrained network can be downloaded [here](https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip). Unpack the `tensorflow_inception_graph.pb` file from the archive and set its path to `model_fn` variable. Alternatively you can uncomment and run the following cell to download the network:
```
#!wget https://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip && unzip inception5h.zip
model_fn = 'tensorflow_inception_graph.pb'
# creating TensorFlow session and loading the model
graph = tf.Graph()
sess = tf.InteractiveSession(graph=graph)
with tf.gfile.FastGFile(model_fn, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tf.placeholder(np.float32, name='input') # define the input tensor
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tf.import_graph_def(graph_def, {'input':t_preprocessed})
```
To take a glimpse into the kinds of patterns that the network learned to recognize, we will try to generate images that maximize the sum of activations of particular channel of a particular convolutional layer of the neural network. The network we explore contains many convolutional layers, each of which outputs tens to hundreds of feature channels, so we have plenty of patterns to explore.
```
layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]
print('Number of layers', len(layers))
print('Total number of feature channels:', sum(feature_nums))
# Helper functions for TF Graph visualization
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = "<stripped %d bytes>"%size
return strip_def
def rename_nodes(graph_def, rename_func):
res_def = tf.GraphDef()
for n0 in graph_def.node:
n = res_def.node.add()
n.MergeFrom(n0)
n.name = rename_func(n.name)
for i, s in enumerate(n.input):
n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
return res_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# Visualizing the network graph. Be sure expand the "mixed" nodes to see their
# internal structure. We are going to visualize "Conv2D" nodes.
tmp_def = rename_nodes(graph_def, lambda s:"/".join(s.split('_',1)))
show_graph(tmp_def)
```
<a id='naive'></a>
## Naive feature visualization
Let's start with a naive way of visualizing these. Image-space gradient ascent!
```
# Picking some internal layer. Note that we use outputs before applying the ReLU nonlinearity
# to have non-zero gradients for features with negative initial activations.
layer = 'mixed4d_3x3_bottleneck_pre_relu'
channel = 139 # picking some feature channel to visualize
# start with a gray image with a little noise
img_noise = np.random.uniform(size=(224,224,3)) + 100.0
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 1)*255)
f = BytesIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
def visstd(a, s=0.1):
'''Normalize the image range for visualization'''
return (a-a.mean())/max(a.std(), 1e-4)*s + 0.5
def T(layer):
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0"%layer)
def render_naive(t_obj, img0=img_noise, iter_n=20, step=1.0):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for i in range(iter_n):
g, score = sess.run([t_grad, t_score], {t_input:img})
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print(score, end = ' ')
clear_output()
showarray(visstd(img))
render_naive(T(layer)[:,:,:,channel])
```
<a id="multiscale"></a>
## Multiscale image generation
Looks like the network wants to show us something interesting! Let's help it. We are going to apply gradient ascent on multiple scales. Details formed on smaller scale will be upscaled and augmented with additional details on the next scale.
With multiscale image generation it may be tempting to set the number of octaves to some high value to produce wallpaper-sized images. Storing network activations and backprop values will quickly run out of GPU memory in this case. There is a simple trick to avoid this: split the image into smaller tiles and compute each tile gradient independently. Applying random shifts to the image before every iteration helps avoid tile seams and improves the overall image quality.
```
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tf.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
# Helper function that uses TF to resize an image
def resize(img, size):
img = tf.expand_dims(img, 0)
return tf.image.resize_bilinear(img, size)[0,:,:,:]
resize = tffunc(np.float32, np.int32)(resize)
def calc_grad_tiled(img, t_grad, tile_size=512):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
g = sess.run(t_grad, {t_input:sub})
grad[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
def render_multiscale(t_obj, img0=img_noise, iter_n=10, step=1.0, octave_n=3, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
# normalizing the gradient, so the same step size should work
g /= g.std()+1e-8 # for different layers and networks
img += g*step
print('.', end = ' ')
clear_output()
showarray(visstd(img))
render_multiscale(T(layer)[:,:,:,channel])
```
<a id="laplacian"></a>
## Laplacian Pyramid Gradient Normalization
This looks better, but the resulting images mostly contain high frequencies. Can we improve it? One way is to add a smoothness prior into the optimization objective. This will effectively blur the image a little every iteration, suppressing the higher frequencies, so that the lower frequencies can catch up. This will require more iterations to produce a nice image. Why don't we just boost lower frequencies of the gradient instead? One way to achieve this is through the [Laplacian pyramid](https://en.wikipedia.org/wiki/Pyramid_%28image_processing%29#Laplacian_pyramid) decomposition. We call the resulting technique _Laplacian Pyramid Gradient Normailzation_.
```
k = np.float32([1,4,6,4,1])
k = np.outer(k, k)
k5x5 = k[:,:,None,None]/k.sum()*np.eye(3, dtype=np.float32)
def lap_split(img):
'''Split the image into lo and hi frequency components'''
with tf.name_scope('split'):
lo = tf.nn.conv2d(img, k5x5, [1,2,2,1], 'SAME')
lo2 = tf.nn.conv2d_transpose(lo, k5x5*4, tf.shape(img), [1,2,2,1])
hi = img-lo2
return lo, hi
def lap_split_n(img, n):
'''Build Laplacian pyramid with n splits'''
levels = []
for i in range(n):
img, hi = lap_split(img)
levels.append(hi)
levels.append(img)
return levels[::-1]
def lap_merge(levels):
'''Merge Laplacian pyramid'''
img = levels[0]
for hi in levels[1:]:
with tf.name_scope('merge'):
img = tf.nn.conv2d_transpose(img, k5x5*4, tf.shape(hi), [1,2,2,1]) + hi
return img
def normalize_std(img, eps=1e-10):
'''Normalize image by making its standard deviation = 1.0'''
with tf.name_scope('normalize'):
std = tf.sqrt(tf.reduce_mean(tf.square(img)))
return img/tf.maximum(std, eps)
def lap_normalize(img, scale_n=4):
'''Perform the Laplacian pyramid normalization.'''
img = tf.expand_dims(img,0)
tlevels = lap_split_n(img, scale_n)
tlevels = list(map(normalize_std, tlevels))
out = lap_merge(tlevels)
return out[0,:,:,:]
# Showing the lap_normalize graph with TensorBoard
lap_graph = tf.Graph()
with lap_graph.as_default():
lap_in = tf.placeholder(np.float32, name='lap_in')
lap_out = lap_normalize(lap_in)
show_graph(lap_graph)
def render_lapnorm(t_obj, img0=img_noise, visfunc=visstd,
iter_n=10, step=1.0, octave_n=3, octave_scale=1.4, lap_n=4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# build the laplacian normalization graph
lap_norm_func = tffunc(np.float32)(partial(lap_normalize, scale_n=lap_n))
img = img0.copy()
for octave in range(octave_n):
if octave>0:
hw = np.float32(img.shape[:2])*octave_scale
img = resize(img, np.int32(hw))
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
g = lap_norm_func(g)
img += g*step
print('.', end = ' ')
clear_output()
showarray(visfunc(img))
render_lapnorm(T(layer)[:,:,:,channel])
```
<a id="playing"></a>
## Playing with feature visualizations
We got a nice smooth image using only 10 iterations per octave. In case of running on GPU this takes just a few seconds. Let's try to visualize another channel from the same layer. The network can generate wide diversity of patterns.
```
render_lapnorm(T(layer)[:,:,:,65])
```
Lower layers produce features of lower complexity.
```
render_lapnorm(T('mixed3b_1x1_pre_relu')[:,:,:,101])
```
There are many interesting things one may try. For example, optimizing a linear combination of features often gives a "mixture" pattern.
```
render_lapnorm(T(layer)[:,:,:,65]+T(layer)[:,:,:,139], octave_n=4)
```
<a id="deepdream"></a>
## DeepDream
Now let's reproduce the [DeepDream algorithm](https://github.com/google/deepdream/blob/master/dream.ipynb) with TensorFlow.
```
def render_deepdream(t_obj, img0=img_noise,
iter_n=10, step=1.5, octave_n=4, octave_scale=1.4):
t_score = tf.reduce_mean(t_obj) # defining the optimization objective
t_grad = tf.gradients(t_score, t_input)[0] # behold the power of automatic differentiation!
# split the image into a number of octaves
img = img0
octaves = []
for i in range(octave_n-1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw)/octave_scale))
hi = img-resize(lo, hw)
img = lo
octaves.append(hi)
# generate details octave by octave
for octave in range(octave_n):
if octave>0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2])+hi
for i in range(iter_n):
g = calc_grad_tiled(img, t_grad)
img += g*(step / (np.abs(g).mean()+1e-7))
print('.',end = ' ')
clear_output()
showarray(img/255.0)
```
Let's load some image and populate it with DogSlugs (in case you've missed them).
```
img0 = PIL.Image.open('pilatus800.jpg')
img0 = np.float32(img0)
showarray(img0/255.0)
render_deepdream(tf.square(T('mixed4c')), img0)
```
Note that results can differ from the [Caffe](https://github.com/BVLC/caffe)'s implementation, as we are using an independently trained network. Still, the network seems to like dogs and animal-like features due to the nature of the ImageNet dataset.
Using an arbitrary optimization objective still works:
```
render_deepdream(T(layer)[:,:,:,139], img0)
```
Don't hesitate to use higher resolution inputs (also increase the number of octaves)! Here is an [example](http://storage.googleapis.com/deepdream/pilatus_flowers.jpg) of running the flower dream over the bigger image.
We hope that the visualization tricks described here may be helpful for analyzing representations learned by neural networks or find their use in various artistic applications.
|
github_jupyter
|
**[Introduction to Machine Learning Home Page](https://www.kaggle.com/learn/intro-to-machine-learning)**
---
## Recap
Here's the code you've written so far.
```
# code you have previously used
# load data
import pandas as pd
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
home_data = pd.read_csv(iowa_file_path)
# create target object and call it y
y = home_data['SalePrice']
# create X
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
X = home_data[features]
# split into validation and training data
from sklearn.model_selection import train_test_split
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# specify Model
from sklearn.tree import DecisionTreeRegressor
iowa_model = DecisionTreeRegressor(random_state=1)
# fit Model
iowa_model.fit(train_X, train_y)
# make validation predictions
val_predictions = iowa_model.predict(val_X)
# calculate mean absolute error
from sklearn.metrics import mean_absolute_error
val_mae = mean_absolute_error(val_y, val_predictions)
print(f"Validation MAE when not specifying max_leaf_nodes: {val_mae:,.0f}")
# print("Validation MAE when not specifying max_leaf_nodes: {:,.0f}".format(val_mae))
# using best value for max_leaf_nodes
iowa_model = DecisionTreeRegressor(max_leaf_nodes=100, random_state=1)
iowa_model.fit(train_X, train_y)
val_predictions = iowa_model.predict(val_X)
val_mae = mean_absolute_error(val_y, val_predictions)
print(f"Validation MAE for best value of max_leaf_nodes: {val_mae:,.0f}")
# set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex6 import *
print("\nSetup complete")
```
# Exercises
Data science isn't always this easy. But replacing the decision tree with a Random Forest is going to be an easy win.
## Step 1: Use a Random Forest
```
from sklearn.ensemble import RandomForestRegressor
# specify model. set random_state to 1
rf_model = RandomForestRegressor(random_state=1)
# fit model
rf_model.fit(train_X, train_y)
# calculate the mean absolute error of your Random Forest model on the validation data
val_ft_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(val_y, val_ft_predictions)
print(f"Validation MAE for Random Forest Model: {rf_val_mae}")
# Check your answer
step_1.check()
# The lines below will show you a hint or the solution.
# step_1.hint()
# step_1.solution()
```
So far, you have followed specific instructions at each step of your project. This helped learn key ideas and build your first model, but now you know enough to try things on your own.
Machine Learning competitions are a great way to try your own ideas and learn more as you independently navigate a machine learning project.
# Keep Going
You are ready for **[Machine Learning Competitions](https://www.kaggle.com/kernels/fork/1259198).**
---
**[Introduction to Machine Learning Home Page](https://www.kaggle.com/learn/intro-to-machine-learning)**
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
|
github_jupyter
|
We will use this notebook to calculate and visualize statistics of our chess move dataset. This will allow us to better understand our limitations and help diagnose problems we may encounter down the road when training/defining our model.
```
import pdb
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def get_move_freqs(moves, sort=True):
freq_dict = {}
for move in moves:
if move not in freq_dict:
freq_dict[move] = 0
freq_dict[move] = freq_dict[move] + 1
tuples = [(w, c) for w, c in freq_dict.items()]
if sort:
tuples = sorted(tuples, key=lambda x: -x[1])
return (tuples, moves)
def plot_frequency(counts, move_limit=1000):
# limit to the n most frequent moves
n = 1000
counts = counts[0:n]
# from: http://stackoverflow.com/questions/30690619/python-histogram-using-matplotlib-on-top-words
moves = [x[0] for x in counts]
values = [int(x[1]) for x in counts]
bar = plt.bar(range(len(moves)), values, color='green', alpha=0.4)
plt.xlabel('Move Index')
plt.ylabel('Frequency')
plt.title('Move Frequency Chart')
plt.show()
def plot_uniq_over_count(moves, interval=0.01):
xs, ys = [], []
for i in range(0, len(moves), int(len(moves) * interval)):
chunk = moves[0:i]
uniq = list(set(chunk))
xs.append(len(chunk))
ys.append(len(uniq))
plt.plot(xs, ys)
plt.ticklabel_format(style='sci', axis='x', scilimits=(0, 0))
plt.xlabel('Moves')
plt.ylabel('Unique Moves')
plt.show()
def plot_game_lengths(game_lengths):
xs = [g[0] for g in game_lengths]
ys = [g[1] for g in game_lengths]
bar = plt.bar(xs, ys, color='blue', alpha=0.4)
plt.xlabel('Half-moves per game')
plt.ylabel('Frequency')
plt.title('Game Length')
plt.show()
def plot_repeat_states(moves):
uniq_states = {}
moves_in_game = ''
for move in moves:
moves_in_game = moves_in_game + ' ' + move
if moves_in_game not in uniq_states:
uniq_states[moves_in_game] = 0
uniq_states[moves_in_game] = uniq_states[moves_in_game] + 1
if is_game_over_move(move):
moves_in_game = ''
vals = []
d = {}
for state, count in sorted(uniq_states.items(), key=lambda x: (-x[1], x[0])):
vals.append((count, state))
# move_count = len(state.split())
# if move_count not in d:
# d[move_count] = 0
# d[move_count] = d[move_count] + 1
vals.append([c for c, s in vals])
plt.plot(vals)
plt.xlim([0, 100])
plt.xlabel('Board State')
plt.ylabel('Frequency')
plt.title('Frequency of Board State')
plt.show()
# vals = [(length, count) for length, count in sorted(d.items(), key=lambda x: -x[0])]
# pdb.set_trace()
# plt.bar(vals)
# plt.xlim([0, 1000])
# plt.xlabel('Moves in State')
# plt.ylabel('Frequency')
# print('{} uniq board states'.format(len(list(uniq_states.keys()))))
def get_game_lengths(moves):
game_lengths = {}
total_games = 0
current_move = 1
for move in moves:
if is_game_over_move(move):
if current_move not in game_lengths:
game_lengths[current_move] = 0
game_lengths[current_move] = game_lengths[current_move] + 1
current_move = 1
total_games = total_games + 1
else:
current_move = current_move + 1
print(total_games)
return [(k, v) for k, v in game_lengths.items()], total_games
def is_game_over_move(move):
return move in ('0-1', '1-0', '1/2-1/2')
```
Load our concatonated moves data.
```
with open('../data/train_moves.txt', 'r') as f:
moves = f.read().split(' ')
print('{} moves loaded'.format(len(moves)))
counts, moves = get_move_freqs(moves)
game_lengths, total_games = get_game_lengths(moves)
# plot_repeat_states(moves)
```
## Plot Move Frequency
Here we can see which moves appear most frequently in the dataset. These moves are the most popular moves played by chess champions.
```
plot_frequency(counts)
```
We will list the most common few moves along with what percentage of the entire moves dataset this move represents.
```
top_n = 10
for w in counts[0:top_n]:
print((w[0]).ljust(8), '{:.2f}%'.format((w[1]/len(moves)) * 100.00))
```
## Plot Unique Moves
Here we compare the number of unique moves over the total move count. Take notice that the number of unique moves converges towards a constant as the number of total moves increase. This would suggest that there is a subset of all possible moves that actually make sense for a chess champion to play.
```
plot_uniq_over_count(moves)
```
## Plot Game Lengths
```
plot_game_lengths(game_lengths)
top_n = 10
sorted_lengths = sorted(game_lengths, key=lambda x: -x[1])
for l in sorted_lengths[0:top_n]:
print((str(l[0])).ljust(8), '{:.3f}%'.format((l[1]/total_games) * 100.00))
```
|
github_jupyter
|
This notebook will set up colab so that you can run the SYCL blur lab for the module "Introduction to SYCYL programming" created by the TOUCH project. (https://github.com/TeachingUndergradsCHC/modules/tree/master/Programming/sycl). The initial setup instructions are created following slides by Aksel Alpay
https://www.iwocl.org/wp-content/uploads/iwocl-syclcon-2020-alpay-32-slides.pdf
and the hipSCYL documentation https://github.com/illuhad/hipSYCL/blob/develop/doc/installing.md .
Begin by setting your runtime to use a CPU (Select "Change runtime type" in the Runtime menu and choose "CPU".) Then run the first couple of instructions below. Run them one at a time, waiting for each to finish before beginning the next. This will take several minutes.
Update repositories and then get and build llvm so can build hipSYCL.
```
!apt update -qq;
!apt-get update -qq;
!add-apt-repository -y ppa:ubuntu-toolchain-r/test
!apt update
!apt install gcc-11 g++-11
!bash -c "$(wget -O - https://apt.llvm.org/llvm.sh)"
!apt-get install libboost-all-dev libclang-13-dev cmake python -qq;
!git clone --recurse-submodules https://github.com/illuhad/hipSYCL
!apt-get upgrade
```
Now build hipSYCL
```
!mkdir hipSYCL_build
%cd hipSYCL_build
!export CC=/usr/bin/gcc-11
!export CXX=/usr/bin/g++-11
!cmake -DCMAKE_INSTALL_PREFIX=/content/hipSYCL_install -DCMAKE_C_COMPILER=/usr/bin/gcc-11 -DCMAKE_CXX_COMPILER=/usr/bin/g++-11 /content/hipSYCL
!make install
%cd ..
```
Get the examples
```
!git clone https://github.com/TeachingUndergradsCHC/modules
%cd modules/Programming/sycl
```
Examine hello.cpp
```
!cat hello.cpp
```
Now compile hello.cpp
```
!/content/hipSYCL_install/bin/syclcc --hipsycl-platform=cpu -o hello hello.cpp
```
Then run it
```
!./hello
```
Now try the addVector program, first view it
```
!cat addVectors.cpp
```
Then compile it
```
!/content/hipSYCL_install/bin/syclcc --hipsycl-platform=cpu -o addVectors addVectors.cpp
```
Finally run it
```
!./addVectors
```
Next, examine the files that you'll need for the blur project. These are the library code for managing bmp files (stb_image.h and stb_image_write.h), the image that you'll be using (I provide 640x426.bmp, but you could use another file instead) and the program itself noRed.cpp. Then compile it
```
!/content/hipSYCL_install/bin/syclcc --hipsycl-platform=cpu -o noRed noRed.cpp
```
Now run the code
```
!./noRed
```
Original Image
```
from IPython.display import display
from PIL import Image
path="/content/modules/Programming/sycl/640x426.bmp"
display(Image.open(path))
```
Final Image
```
from IPython.display import display
from PIL import Image
path="/content/modules/Programming/sycl/out.bmp"
display(Image.open(path))
```
|
github_jupyter
|
# Classification of Chest and Abdominal X-rays
Code Source: Lakhani, P., Gray, D.L., Pett, C.R. et al. J Digit Imaging (2018) 31: 283. https://doi.org/10.1007/s10278-018-0079-6
The code to download and prepare dataset had been modified form the original source code.
```
# load requirements for the Keras library
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import Adam
!rm -rf /content/*
# Download dataset
!wget https://github.com/paras42/Hello_World_Deep_Learning/raw/9921a12c905c00a88898121d5dc538e3b524e520/Open_I_abd_vs_CXRs.zip
!ls /content
# unzip
!unzip /content/Open_I_abd_vs_CXRs.zip
# dimensions of our images
img_width, img_height = 299, 299
# directory and image information
train_data_dir = 'Open_I_abd_vs_CXRs/TRAIN/'
validation_data_dir = 'Open_I_abd_vs_CXRs/VAL/'
# epochs = number of passes of through training data
# batch_size = number of images processes at the same time
train_samples = 65
validation_samples = 10
epochs = 20
batch_size = 5
# build the Inception V3 network, use pretrained weights from ImgaeNet
# remove top funnly connected layers by imclude_top=False
base_model = applications.InceptionV3(weights='imagenet', include_top=False,
input_shape=(img_width, img_height,3))
# build a classifier model to put on top of the convolutional model
# This consists of a global average pooling layer and a fully connected layer with 256 nodes
# Then apply dropout and signoid activation
model_top = Sequential()
model_top.add(GlobalAveragePooling2D(input_shape=base_model.output_shape[1:],
data_format=None)),
model_top.add(Dense(256, activation='relu'))
model_top.add(Dropout(0.5))
model_top.add(Dense(1, activation='sigmoid'))
model = Model(inputs=base_model.input, outputs=model_top(base_model.output))
# Compile model using Adam optimizer with common values and binary cross entropy loss
# USe low learning rate (lr) for transfer learning
model.compile(optimizer=Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0),
loss='binary_crossentropy',
metrics=['accuracy'])
# Some on-the-fly augmentation options
train_datagen = ImageDataGenerator(
rescale = 1./255, # Rescale pixel values to 0-1 to aid CNN processing
shear_range = 0.2, # 0-1 range for shearing
zoom_range = 0.2, # 0-1 range for zoom
rotation_range = 20, # 0.180 range, degrees of rotation
width_shift_range = 0.2, # 0-1 range horizontal translation
height_shift_range = 0.2, # 0-1 range vertical translation
horizontal_flip = True # set True or false
)
val_datagen = ImageDataGenerator(
rescale=1./255 # Rescale pixel values to 0-1 to aid CNN processing
)
# Directory, image size, batch size already specied above
# Class mode is set to 'binary' for a 2-class problem
# Generator randomly shuffles and presents images in batches to the network
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary'
)
validation_generator = val_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode='binary'
)
# Fine-tune the pretrained Inception V3 model using the data generator
# Specify steps per epoch (number of samples/batch_size)
history = model.fit_generator(
train_generator,
steps_per_epoch=train_samples//batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_samples//batch_size
)
# import matplotlib library, and plot training curve
import matplotlib.pyplot as plt
print(history.history.keys())
plt.figure()
plt.plot(history.history['acc'],'orange', label='Training accuracy')
plt.plot(history.history['val_acc'],'blue', label='Validation accuracy')
plt.plot(history.history['loss'],'red', label='Training loss')
plt.plot(history.history['val_loss'],'green', label='validation loss')
plt.legend()
plt.show()
# import numpy and keras preprocessing libraries
import numpy as np
from keras.preprocessing import image
# load, resize, and display test images
img_path = 'Open_I_abd_vs_CXRs/TEST/abd2.png'
img_path2 = 'Open_I_abd_vs_CXRs/TEST/chest2.png'
img = image.load_img(img_path, target_size=(img_width, img_height))
img2 = image.load_img(img_path2, target_size=(img_width, img_height))
plt.imshow(img)
plt.show()
# convert image to numpy array, so Keras can render a prediction
img = image.img_to_array(img)
# expand array from 3 dimensions (height, width, channels) to 4 dimensions (batch size, height, width, channels)
# rescale pixel values to 0-1
x = np.expand_dims(img, axis=0) * 1./255
# get prediction on test image
score = model.predict(x)
print('Predicted:', score, 'Chest X-ray' if score < 0.5 else 'Abd X-ray')
# display and render a prediction for the 2nd image
plt.imshow(img2)
plt.show()
img2 = image.img_to_array(img2)
x = np.expand_dims(img2, axis=0) * 1./255
score = model.predict(x)
print('Predicted:', score, 'Chest X-ray' if score < 0.5 else 'Abd X-ray')
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/hadisotudeh/zestyAI_challenge/blob/main/Zesty_AI_Data_Scientist_Assignment_%7C_Hadi_Sotudeh.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<center> <h1><b>Zesty AI Data Science Interview Task - Hadi Sotudeh</b></h1> </center>
To perform this task, I had access to the [`2009 RESIDENTIAL ENERGY CONSUMPTION SURVEY`](https://www.eia.gov/consumption/residential/data/2009/index.php?view=microdata) to predict `electricity consumption`.
</br>
</br>
Libraries available in Python such as `scikit-learn` and `fastai` were employed to perform this machine learning regression task.
</br>
</br>
First, I need to install the notebook dependencies, import the relevant libraries, download the dataset, and have them available in Google Colab (next cell).
## Install Dependencies, Import Libraries, and Download the dataset
```
%%capture
# install dependencies
!pip install fastai --upgrade
# Import Libraries
# general libraries
import warnings
import os
from datetime import datetime
from tqdm import tqdm_notebook as tqdm
# machine learning libraries
import pandas as pd
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from fastai.tabular.all import *
from sklearn.ensemble import RandomForestRegressor
from pandas_profiling import ProfileReport
import joblib
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
# model interpretation library
from sklearn.inspection import plot_partial_dependence
%%capture
#download the dataset
! wget https://www.eia.gov/consumption/residential/data/2009/csv/recs2009_public.csv
```
## Set Global parameters
The electric consumption is located in the `KWH` field of the dataset.
```
#show plots inside the jupyter notebook
%matplotlib inline
# pandas settings to show more columns are rows in the jupyter notebook
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 50000)
# don't show warnings
warnings.filterwarnings('ignore')
# dataset file path
dataset = "recs2009_public.csv"
# target variable to predict
dep_var = "KWH"
```
## Read the dataset from CSV files, Perform Data Cleaning, and Feature Engineering
Following a typical machine learning project, I first clean up the dataset to prevent data-leakage related features or non-relevant features.</br></br>It is important to mention that I did not first look at each column to figure out which feature to keep or not. What I did first was to train a model and iteratively look at the feature importances and check their meanings in the dataset documentation to figure out what features to remove to prevent data leakage.</br></br>In addition, a group of features with high correlations were identified and only one of them in each group was kept.
```
# read the train file
df = pd.read_csv(dataset)
# remove data-leakage and non-relevant features
non_essential_features = ["KWHSPH","KWHCOL","KWHWTH","KWHRFG","KWHOTH","BTUEL","BTUELSPH","BTUELCOL",
"BTUELWTH","BTUELRFG","BTUELOTH","DOLLAREL","DOLELSPH","DOLELCOL","DOLELWTH",
"DOLELRFG","DOLELOTH","TOTALBTUOTH","TOTALBTURFG","TOTALDOL","ELWATER",
"TOTALBTUWTH","TOTALBTU","ELWARM","TOTALBTUCOL","TOTALDOLCOL",
"REPORTABLE_DOMAIN","TOTALDOLWTH","TOTALBTUSPH","TOTCSQFT","TOTALDOLSPH",
"BTUNG", "BTUNGSPH", "BTUNGWTH","BTUNGOTH","DOLLARNG","DOLNGSPH","DOLNGWTH","DOLNGOTH",
"DIVISION"
]
df.drop(columns = non_essential_features, inplace=True)
# take the log of dependent variable ('price'). More details are in the training step.
df[dep_var] = np.log(df[dep_var])
```
I created train and validation sets with random selection (80% vs.20% rule) from the dataset[link text](https://) file in the next step.
```
splits = RandomSplitter(valid_pct=0.2)(range_of(df))
procs = [Categorify, FillMissing]
cont, cat = cont_cat_split(df, 1, dep_var=dep_var)
to = TabularPandas(df, procs, cat, cont, y_names=dep_var, splits = splits)
```
The following cell shows 5 random instances of the dataset (after cleaning and feature engineering).
```
to.show(5)
```
## Train the ML Model
Since model interpretation is also important for me, I chose RandomForest for both prediction and interpretation and knowledge discovery.
```
def rf(xs, y, n_estimators=40, max_features=0.5, min_samples_leaf=5, **kwargs):
"randomforst regressor"
return RandomForestRegressor(n_jobs=-1, n_estimators=n_estimators, max_features=max_features, min_samples_leaf=min_samples_leaf, oob_score=True).fit(xs, y)
xs,y = to.train.xs,to.train.y
valid_xs,valid_y = to.valid.xs,to.valid.y
m = rf(xs, y)
```
The predictions are evaluated based on [Root-Mean-Squared-Error (RMSE) between the logarithm of the predicted value and the logarithm of the observed sales price](https://www.kaggle.com/c/house-prices-advanced-regression-techniques/overview/evaluation) (Taking logs means that errors in predicting high electricity consumptions and low ones will affect the result equally).
</br>
</br>
```
def r_mse(pred,y):
return round(math.sqrt(((pred-y)**2).mean()), 6)
def m_rmse(m, xs, y):
return r_mse(m.predict(xs), y)
```
Print the Mean Root Squared Error of the logarithmic `KWH` on the train set:
```
m_rmse(m, xs, y)
```
Print the Mean Root Squared Error of the logarithmic `KWH` on the validation set:
```
m_rmse(m, valid_xs, valid_y)
```
Calculate Feature Importance and remove non-important features and re-train the model.
```
def rf_feat_importance(m, df):
return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}).sort_values('imp', ascending=False)
# show the top 10 features
fi = rf_feat_importance(m, xs)
fi[:10]
```
Only keep features with importance of more than 0.005 for re-training.
```
to_keep = fi[fi.imp>0.005].cols
print(f"features to keep are : {list(to_keep)}")
```
Some of the features to keep for re-training are:
1. `TOTALDOLOTH`: Total cost for appliances, electronics, lighting, and miscellaneous
2. `PELHOTWA`: Who pays for electricity used for water heating
3. `ACROOMS`: Number of rooms cooled
4. `TOTALDOLRFG`: Total cost for refrigerators, in whole dollars
5. `REGIONC`: Census Region
6. `TEMPNITEAC`: Temperature at night (summer)
```
xs_imp = xs[to_keep]
valid_xs_imp = valid_xs[to_keep]
m = rf(xs_imp, y)
```
Print the loss function of the re-trained model on train and validation sets.
```
m_rmse(m, xs_imp, y), m_rmse(m, valid_xs_imp, valid_y)
```
Check the correlation among the final features and adjust the set of features to remove at the beginning of the code.
```
from scipy.cluster import hierarchy as hc
def cluster_columns(df, figsize=(10,6), font_size=12):
corr = np.round(scipy.stats.spearmanr(df).correlation, 4)
corr_condensed = hc.distance.squareform(1-corr)
z = hc.linkage(corr_condensed, method='average')
fig = plt.figure(figsize=figsize)
hc.dendrogram(z, labels=df.columns, orientation='left', leaf_font_size=font_size)
plt.show()
cluster_columns(xs_imp)
```
Store the re-trained model.
```
joblib.dump(m, 'model.joblib')
```
## Interpret the Model and Do Knowledge Discovery
When I plot the feature importances of the trained model, I can clearly see that `TOTALDOLOTH` (Total cost for appliances, electronics, lighting, and miscellaneous uses in whole dollars) is the most important factor for the model to make its decisions.
```
def plot_fi(fi):
return fi.plot('cols', 'imp', 'barh', figsize=(12,7), legend=False)
plot_fi(rf_feat_importance(m, xs_imp));
```
In this section, I make use of the [Partial Dependence Plots](https://christophm.github.io/interpretable-ml-book/pdp.html) to interpret the learned function (ML model) and understand how this function makes decisions and predicts house prices for sale.</br></br>The 1D-feature plots show by changing one unit (increase or decrease) of the feature shown in the x-axis, how much the predicted dependent variable (`log KWH`) changes on average.
```
explore_cols = ['TOTALDOLOTH','TOTALDOLRFG','ACROOMS','TEMPHOMEAC','TEMPNITEAC','CDD30YR','CUFEETNGOTH','WASHLOAD','CUFEETNG']
explore_cols_vals = ["Total cost for appliances, electronics, lighting, and miscellaneous uses, in whole dollars",
"Total cost for refrigerators, in whole dollars",
"Number of rooms cooled",
"Temperature when someone is home during the day (summer)",
"Temperature at night (summer)",
"Cooling degree days, 30-year average 1981-2010, base 65F",
"Natural Gas usage for other purposes (all end-uses except SPH and WTH), in hundred cubic feet",
"Frequency clothes washer used",
"Total Natural Gas usage, in hundred cubic feet"]
for index, col in enumerate(explore_cols):
fig,ax = plt.subplots(figsize=(12, 4))
plot_partial_dependence(m, valid_xs_imp, [col], grid_resolution=20, ax=ax);
x_label = explore_cols_vals[index]
plt.xlabel(x_label)
```
The 2D-feature plots show by changing one unit (increase or decrease) of the features shown in the x and y axes, how much the dependent variable changes.
</br>
</br>
Here, the plot shows how much the model (learned function) changes its `log KWH` prediction on average when the two dimensions on the x and y axes change.
```
paired_features = [("TEMPNITEAC","TEMPHOMEAC"),("CUFEETNG","CUFEETNGOTH")]
paired_features_vals = [("Temperature at night (summer)","Temperature when someone is home during the day (summer)"),
("Total Natural Gas usage, in hundred cubic feet","Natural Gas usage for other purposes (all end-uses except SPH and WTH), in hundred cubic feet")]
for index, pair in enumerate(paired_features):
fig,ax = plt.subplots(figsize=(8, 8))
plot_partial_dependence(m, valid_xs_imp, [pair], grid_resolution=20, ax=ax);
x_label = paired_features_vals[index][0]
y_label = paired_features_vals[index][1]
plt.xlabel(x_label)
plt.ylabel(y_label)
```
## THE END!
|
github_jupyter
|
## Bengaluru House Price
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option("display.max_rows", None, "display.max_columns", None)
df1=pd.read_csv("Dataset/Bengaluru_House_Data.csv")
df1.head()
```
### Data Cleaning
```
df1.info()
df1.isnull().sum()
df1.groupby('area_type')['area_type'].agg('count')
df2=df1.drop(['area_type','availability','society','balcony'], axis='columns')
df2.head()
df2.isnull().sum()
df2.shape
df2['location'].fillna(df2['location'].mode().values[0],inplace=True)
df2['size'].fillna(df2['size'].mode().values[0],inplace=True)
df2['bath'].fillna(df2['bath'].mode().values[0],inplace=True)
df2.isnull().sum()
df2['size'].unique()
df2['bhk']=df2['size'].apply(lambda x: int(x.split(' ')[0]))
df2=df2.drop(['size'],axis='columns')
df2.head()
df2['bhk'].unique()
df2['total_sqft'].unique()
```
###### Dimension Reduction
```
def infloat(x):
try:
float(x)
except:
return False
return True
df2[~df2['total_sqft'].apply(infloat)].head(10)
def convert(x):
token=x.split('-')
if(len(token)==2):
return (float(token[0])+float(token[1]))/2
try:
return float(x)
except:
return 1600
df2['total_sqft']=df2['total_sqft'].apply(convert)
df2.head()
df2.loc[410]
df2.isnull().sum()
df2['total_sqft'].agg('mean')
df2['bath'].unique()
df3=df2.copy()
df3['price_per_sqft']=(df3['price']*100000/df3['total_sqft']).round(2)
df3.head()
df3.location.unique()
stats=df3.groupby('location')['location'].agg('count').sort_values(ascending=False)
stats
location_stat_less_than_10=stats[stats<=10]
location_stat_less_than_10
df3['location']=df3['location'].apply(lambda x:'others' if x in location_stat_less_than_10 else x)
len(df3.location.unique())
df3.head(10)
df3[df3['total_sqft']/df3['bhk']<300].head()
df3.shape
df4=df3[~(df3['total_sqft']/df3['bhk']<300)]
df4.shape
df4.price_per_sqft.describe()
def remove(df):
df_out = pd.DataFrame()
for key, subdf in df.groupby('location'):
m=np.mean(subdf.price_per_sqft)
st=np.std(subdf.price_per_sqft)
reduced_df=subdf[(subdf.price_per_sqft >(m-st)) & (subdf.price_per_sqft<=(m+st))]
df_out = pd.concat([df_out, reduced_df],ignore_index=True)
return df_out
df5=remove(df4)
df5.shape
def draw(df,location):
bhk2=df[ (df.location==location) & (df.bhk==2)]
bhk3=df[ (df.location==location) & (df.bhk==3)]
plt.rcParams['figure.figsize']=(15,10)
plt.scatter(bhk2.total_sqft,bhk2.price,color='blue')
plt.scatter(bhk3.total_sqft,bhk3.price,color='green',marker='+')
draw(df5,'Rajaji Nagar')
import matplotlib
matplotlib.rcParams['figure.figsize']=(15,10)
plt.hist(df5.price_per_sqft,rwidth=.8)
df5.bath.unique()
df5[df5.bath>df5.bhk+2]
df6=df5[df5.bath<df5.bhk+2]
df6.shape
df6.head()
df6=df6.drop(['price_per_sqft'],axis='columns')
df6.head()
dummies=pd.get_dummies(df6.location)
dummies.head(3)
dummies.shape
df7=pd.concat([df6,dummies.drop('others',axis='columns')],axis='columns')
df7.shape
df7.head(3)
df8=df7.drop('location',axis='columns')
df8.head(3)
df8.shape
x=df8.drop('price',axis='columns')
x.head(2)
y=df8['price']
y.head()
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y, test_size=0.2,random_state=10)
from sklearn.linear_model import LinearRegression
lr=LinearRegression()
lr.fit(x_train,y_train)
y_pred=lr.predict(x_test)
from sklearn.metrics import r2_score
r2_score(y_pred,y_test)
lr.score(x_test,y_test)
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
cv=ShuffleSplit(n_splits=5, test_size=.2,random_state=0)
cross_val_score(LinearRegression(),x,y,cv=cv)
from sklearn.ensemble import RandomForestRegressor
rfg=RandomForestRegressor(n_estimators=50)
rfg.fit(x_train,y_train)
r2_score(y_test,rfg.predict(x_test))
rfg.score(x_test,y_test)
cross_val_score(RandomForestRegressor(),x,y,cv=cv)
x.columns
X=x
def predict_price(location,sqft,bath,bhk):
loc_index = np.where(X.columns==location)[0][0]
x=np.zeros(len(X.columns))
x[0]=sqft
x[1]=bath
x[2]=bhk
if loc_index>=0:
x[loc_index]=1
return lr.predict([x])[0]
predict_price('1st Phase JP Nagar',1000,4,5)
predict_price('Indira Nagar',1000,2,2)
import pickle
with open('banglore_home_price_model.pickle','wb') as f:
pickle.dump(lr,f)
import json
columns={
'data_columns' : [col.lower() for col in X.columns]
}
with open("columns.json","w") as f:
f.write(json.dumps(columns))
```
|
github_jupyter
|
```
from python_dict_wrapper import wrap
import sys
sys.path.append('../')
import torch
sys.path.append("../../CPC/dpc")
sys.path.append("../../CPC/backbone")
import matplotlib.pyplot as plt
import numpy as np
import scipy
def find_dominant_orientation(W):
Wf = abs(np.fft.fft2(W))
orient_sel = 1 - Wf[0, 0] / Wf.sum()
Wf[0, 0] = 0
Wf = np.fft.fftshift(Wf)
dt = W.shape[0] // 2
xi, yi = np.meshgrid(np.arange(-dt, dt+1), np.arange(-dt, dt+1))
# Check whether we should split this horizontally or vertically
if Wf[xi == 0].sum() > Wf[yi == 0].sum():
# Use a top-down split
xi_ = xi * (xi >= 0)
yi_ = yi * (xi >= 0)
x0 = (xi_ * Wf).sum() / ((xi >= 0) * Wf).sum()
y0 = (yi_ * Wf).sum() / ((xi >= 0) * Wf).sum()
else:
xi_ = xi * (yi >= 0)
yi_ = yi * (yi >= 0)
x0 = (xi_ * Wf).sum() / ((yi >= 0) * Wf).sum()
y0 = (yi_ * Wf).sum() / ((yi >= 0) * Wf).sum()
return np.arctan2(y0, x0), orient_sel
def get_spatial_slice(W, theta):
dx = W.shape[0] // 2
dt = W.shape[2] // 2
xi, yi, zi = np.meshgrid(np.arange(W.shape[0]),
np.arange(W.shape[1]),
np.arange(W.shape[2]))
xi_, zi_ = np.meshgrid(np.arange(W.shape[0]),
np.arange(W.shape[2]))
Ws = []
for i in range(W.shape[3]):
interp = scipy.interpolate.LinearNDInterpolator(np.array([xi.ravel(),
yi.ravel(),
zi.ravel()]).T, W[:, :, :, i].ravel())
probe = np.array([dx + (xi_ - dx) * np.cos(theta),
dx + (xi_ - dx) * np.sin(theta),
zi_]).T
Ws.append(interp(probe))
return np.stack(Ws, axis=2)
def plot_static_shot(W):
#assert W.shape[0] == 64
W = W / abs(W).max(axis=4).max(axis=3).max(axis=2).max(axis=1).reshape(-1, 1, 1, 1, 1) / 2 + .5
t = W.shape[2] // 2
best_thetas = []
orient_sels = []
for i in range(W.shape[0]):
theta, orient_sel = find_dominant_orientation(W[i, :, t, :, :].transpose(1, 2, 0).sum(2))
best_thetas.append(theta)
orient_sels.append(orient_sel)
best_thetas = np.array(best_thetas)
orient_sels = np.array(orient_sels)
sort_idx = np.argsort(orient_sels)[::-1]
best_thetas = best_thetas[sort_idx]
orient_sels = orient_sels[sort_idx]
W = W[sort_idx, :, :, :, :]
plt.figure(figsize=(8, 8))
for i in range(W.shape[0]):
plt.subplot(8, 8, i + 1)
plt.imshow(W[i, :, t, :, :].transpose(1, 2, 0))
theta = best_thetas[i]
#plt.plot([3 + 3 * np.sin(theta), 3 - 3 * np.sin(theta)], [3 + 3 * np.cos(theta), 3 - 3 * np.cos(theta)], 'r-')
plt.axis(False)
#plt.suptitle(f'xy filters, sliced at t = {t}')
plt.show()
dt = W.shape[-1] // 2
xi, yi = np.meshgrid(np.arange(-dt, dt+1), np.arange(-dt, dt+1))
plt.figure(figsize=(8, 8))
for i in range(W.shape[0]):
W_ = W[i, :, :, :, :].transpose((3, 2, 1, 0))
plt.subplot(8, 8, i + 1)
theta = best_thetas[i]
W_ = get_spatial_slice(W_, theta)
plt.imshow(W_)
plt.axis(False)
plt.show()
from models import get_feature_model
args = wrap({'features': 'cpc_02',
'ckpt_root': '../pretrained',
'slowfast_root': '../../slowfast',
'ntau': 1,
'subsample_layers': False})
model, _, _ = get_feature_model(args)
plot_static_shot(model.s1.conv1.weight.detach().cpu().numpy())
args = wrap({'features': 'cpc_01',
'ckpt_root': '../pretrained',
'slowfast_root': '../../slowfast',
'ntau': 1,
'subsample_layers': False})
model, _, _ = get_feature_model(args)
plot_static_shot(model.s1.conv1.weight.detach().cpu().numpy())
args = wrap({'features': 'airsim_04',
'ckpt_root': '../pretrained',
'slowfast_root': '../../slowfast',
'ntau': 1,
'subsample_layers': False})
model, _, _ = get_feature_model(args)
plot_static_shot(model.s1.conv1.weight.detach().cpu().numpy())
data = model.s1.conv1.weight.detach().cpu().numpy()
F = data.mean(axis=1).reshape((64, 5, 7*7))
sepindexes = []
for i in range(F.shape[0]):
U, S, V = np.linalg.svd(F[i, :, :])
sepindex = S[0] ** 2 / (S ** 2).sum()
sepindexes.append(sepindex)
plt.figure(figsize=(2,2))
plt.hist(sepindexes, np.arange(11)/10)
plt.xlabel('Separability index')
plt.ylabel('Count')
plt.plot([.71, .71], [0, 17], 'k--')
plt.plot([.71], [17], 'kv')
import seaborn as sns
sns.despine()
```
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.