prompt
stringlengths 501
4.98M
| target
stringclasses 1
value | chunk_prompt
bool 1
class | kind
stringclasses 2
values | prob
float64 0.2
0.97
⌀ | path
stringlengths 10
394
⌀ | quality_prob
float64 0.4
0.99
⌀ | learning_prob
float64 0.15
1
⌀ | filename
stringlengths 4
221
⌀ |
---|---|---|---|---|---|---|---|---|
<a href="https://colab.research.google.com/github/probml/probml-notebooks/blob/main/notebooks/bnn_hmc_gaussian.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# (SG)HMC for inferring params of a 2d Gaussian
Based on
https://github.com/google-research/google-research/blob/master/bnn_hmc/notebooks/mcmc_gaussian_test.ipynb
```
import jax
print(jax.devices())
!git clone https://github.com/google-research/google-research.git
%cd /content/google-research
!ls bnn_hmc
!pip install optax
```
# Setup
```
from jax.config import config
import jax
from jax import numpy as jnp
import numpy as onp
import numpy as np
import os
import sys
import time
import tqdm
import optax
import functools
from matplotlib import pyplot as plt
from bnn_hmc.utils import losses
from bnn_hmc.utils import train_utils
from bnn_hmc.utils import tree_utils
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
# Data and model
```
mu = jnp.zeros([2,])
# sigma = jnp.array([[1., .5], [.5, 1.]])
sigma = jnp.array([[1.e-4, 0], [0., 1.]])
sigma_l = jnp.linalg.cholesky(sigma)
sigma_inv = jnp.linalg.inv(sigma)
sigma_det = jnp.linalg.det(sigma)
onp.random.seed(0)
samples = onp.random.multivariate_normal(onp.asarray(mu), onp.asarray(sigma), size=1000)
plt.scatter(samples[:, 0], samples[:, 1], alpha=0.3)
plt.grid()
def log_density_fn(params):
assert params.shape == mu.shape, "Shape error"
diff = params - mu
k = mu.size
log_density = -jnp.log(2 * jnp.pi) * k / 2
log_density -= jnp.log(sigma_det) / 2
log_density -= diff.T @ sigma_inv @ diff / 2
return log_density
def log_likelihood_fn(_, params, *args, **kwargs):
return log_density_fn(params), jnp.array(jnp.nan)
def log_prior_fn(_):
return 0.
def log_prior_diff_fn(*args):
return 0.
fake_net_apply = None
fake_data = jnp.array([[jnp.nan,],]), jnp.array([[jnp.nan,],])
fake_net_state = jnp.array([jnp.nan,])
```
# HMC
```
step_size = 1e-1
trajectory_len = jnp.pi / 2
max_num_leapfrog_steps = int(trajectory_len // step_size + 1)
print("Leapfrog steps per iteration:", max_num_leapfrog_steps)
update, get_log_prob_and_grad = train_utils.make_hmc_update(
fake_net_apply, log_likelihood_fn, log_prior_fn, log_prior_diff_fn,
max_num_leapfrog_steps, 1., 0.)
# Initial log-prob and grad values
# params = jnp.ones_like(mu)[None, :]
params = jnp.ones_like(mu)
log_prob, state_grad, log_likelihood, net_state = (
get_log_prob_and_grad(fake_data, params, fake_net_state))
%%time
num_iterations = 500
all_samples = []
key = jax.random.PRNGKey(0)
for iteration in tqdm.tqdm(range(num_iterations)):
(params, net_state, log_likelihood, state_grad, step_size, key,
accept_prob, accepted) = (
update(fake_data, params, net_state, log_likelihood, state_grad,
key, step_size, trajectory_len, True))
if accepted:
all_samples.append(onp.asarray(params).copy())
# print("It: {} \t Accept P: {} \t Accepted {} \t Log-likelihood: {}".format(
# iteration, accept_prob, accepted, log_likelihood))
len(all_samples)
log_prob, state_grad, log_likelihood, net_state
all_samples_cat = onp.stack(all_samples)
plt.scatter(all_samples_cat[:, 0], all_samples_cat[:, 1], alpha=0.3)
plt.grid()
```
# Blackjax
```
!pip install blackjax
import jax
import jax.numpy as jnp
import jax.scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np
import blackjax.hmc as hmc
import blackjax.nuts as nuts
import blackjax.stan_warmup as stan_warmup
print(jax.devices())
potential = lambda x: -log_density_fn(**x)
num_integration_steps = 30
kernel_generator = lambda step_size, inverse_mass_matrix: hmc.kernel(
potential, step_size, inverse_mass_matrix, num_integration_steps
)
rng_key = jax.random.PRNGKey(0)
initial_position = {"params": np.zeros(2)}
initial_state = hmc.new_state(initial_position, potential)
print(initial_state)
%%time
nsteps = 500
final_state, (step_size, inverse_mass_matrix), info = stan_warmup.run(
rng_key,
kernel_generator,
initial_state,
nsteps,
)
%%time
kernel = nuts.kernel(potential, step_size, inverse_mass_matrix)
kernel = jax.jit(kernel)
def inference_loop(rng_key, kernel, initial_state, num_samples):
def one_step(state, rng_key):
state, _ = kernel(rng_key, state)
return state, state
keys = jax.random.split(rng_key, num_samples)
_, states = jax.lax.scan(one_step, initial_state, keys)
return states
%%time
nsamples = 500
states = inference_loop(rng_key, kernel, initial_state, nsamples)
samples = states.position["params"].block_until_ready()
print(samples.shape)
plt.scatter(samples[:, 0], samples[:, 1], alpha=0.3)
plt.grid()
```
| true |
code
| 0.615232 | null | null | null | null |
|
# CIFAR10 전이학습 기반 분류기
이 노트북은 사전 훈련된 심층-CNN 중에서 VGG16으로 전이학습의 개념을 확용한 분류기를 구축하는 단계를 개략적으로 설명한다.
```
%matplotlib inline
# Pandas and Numpy for data structures and util fucntions
import scipy as sp
import numpy as np
import pandas as pd
from numpy.random import rand
pd.options.display.max_colwidth = 600
# Scikit 임포트
from sklearn import preprocessing
from sklearn.metrics import roc_curve, auc, precision_recall_curve
from sklearn.model_selection import train_test_split
import cnn_utils as utils
# Matplot 임포트
import matplotlib.pyplot as plt
params = {'legend.fontsize': 'x-large',
'figure.figsize': (15, 5),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
plt.rcParams.update(params)
# 판다스는 데이터 프레임을 테이블로 보여준다.
from IPython.display import display, HTML
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.keras import callbacks
from tensorflow.keras import optimizers
from tensorflow.keras.datasets import cifar10
from tensorflow.keras import Model
from tensorflow.keras.applications import vgg16 as vgg
from tensorflow.keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D,BatchNormalization
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.utils import np_utils
```
## 데이터 세트 로딩과 준비
```
BATCH_SIZE = 32
EPOCHS = 40
NUM_CLASSES = 10
LEARNING_RATE = 1e-4
MOMENTUM = 0.9
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
```
Split training dataset in train and validation sets
```
X_train, X_val, y_train, y_val = train_test_split(X_train,
y_train,
test_size=0.15,
stratify=np.array(y_train),
random_state=42)
```
Transform target variable/labels into one hot encoded form
```
Y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
Y_val = np_utils.to_categorical(y_val, NUM_CLASSES)
Y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
```
### 전처리
VGG16을 특성 추출기로 사용할 것이기 때문에, 이미지의 최소 크기는 48x48이어야 한다. ```scipy```로 이미지 크기를 필요한 차원으로 재조정 한다.
```
X_train = np.array([sp.misc.imresize(x,
(48, 48)) for x in X_train])
X_val = np.array([sp.misc.imresize(x,
(48, 48)) for x in X_val])
X_test = np.array([sp.misc.imresize(x,
(48, 48)) for x in X_test])
```
## 모델 준비
* 최상위층 없이 VGG16 로딩
* 커스텀 분류기 준비
* 모델의 맨 위에 새로운 층 쌓기
```
base_model = vgg.VGG16(weights='imagenet',
include_top=False,
input_shape=(48, 48, 3))
```
목표는 분류층만 훈련시키는 것이기 때문에 훈련할 수 있는 파라미터 세팅을 False로 해서 나머지 층을 동결했다. 이렇게 하면 덜 강력한 기반 구조에서도 기존 아키텍처를 활용할 수 있고 학습된 가중치를 한 도메인에서 다른 도메인으로 전이할 수 있다.
```
# VGG16 모델의 세 번째 블록에서 마지막 층 추출
last = base_model.get_layer('block3_pool').output
# 상위 층에 분류층 추가
x = GlobalAveragePooling2D()(last)
x= BatchNormalization()(x)
x = Dense(256, activation='relu')(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.6)(x)
pred = Dense(NUM_CLASSES, activation='softmax')(x)
model = Model(base_model.input, pred)
```
우리의 목표는 커스컴 분류기를 훈련시키는 것이기 때문에 VGG16 층은 동결한다.
```
for layer in base_model.layers:
layer.trainable = False
model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=LEARNING_RATE),
metrics=['accuracy'])
model.summary()
```
## 데이터 늘리기
소규모 데이터 세트의 한계를 극복하고 모델을 일반화할 수 있도록 ```케라스``` 유틸리티로 데이터 세트를 늘려준다.
```
# 데이터 늘리기 구성의 준비
train_datagen = ImageDataGenerator(
rescale=1. / 255,
horizontal_flip=False)
train_datagen.fit(X_train)
train_generator = train_datagen.flow(X_train,
Y_train,
batch_size=BATCH_SIZE)
val_datagen = ImageDataGenerator(rescale=1. / 255,
horizontal_flip=False)
val_datagen.fit(X_val)
val_generator = val_datagen.flow(X_val,
Y_val,
batch_size=BATCH_SIZE)
```
## 모델 훈련
이제 모델을 몇 번의 에포크로 훈련시키고 그 성능을 측정해 보자. 다음 코드로 모델에 새로 추가된 층을 훈련시키기 위한 fit_generator() 함수를 호출한다.
```
train_steps_per_epoch = X_train.shape[0] // BATCH_SIZE
val_steps_per_epoch = X_val.shape[0] // BATCH_SIZE
history = model.fit_generator(train_generator,
steps_per_epoch=train_steps_per_epoch,
validation_data=val_generator,
validation_steps=val_steps_per_epoch,
epochs=EPOCHS,
verbose=1)
```
## 모델 성능 분석
```
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
t = f.suptitle('Deep Neural Net Performance', fontsize=12)
f.subplots_adjust(top=0.85, wspace=0.3)
epochs = list(range(1,EPOCHS+1))
ax1.plot(epochs, history.history['acc'], label='Train Accuracy')
ax1.plot(epochs, history.history['val_acc'], label='Validation Accuracy')
ax1.set_xticks(epochs)
ax1.set_ylabel('Accuracy Value')
ax1.set_xlabel('Epoch')
ax1.set_title('Accuracy')
l1 = ax1.legend(loc="best")
ax2.plot(epochs, history.history['loss'], label='Train Loss')
ax2.plot(epochs, history.history['val_loss'], label='Validation Loss')
ax2.set_xticks(epochs)
ax2.set_ylabel('Loss Value')
ax2.set_xlabel('Epoch')
ax2.set_title('Loss')
l2 = ax2.legend(loc="best")
predictions = model.predict(X_test/255.)
test_labels = list(y_test.squeeze())
predictions = list(predictions.argmax(axis=1))
get_metrics(true_labels=y_test,
predicted_labels=predictions)
```
## 예측 시각화
```
label_dict = {0:'airplane',
1:'automobile',
2:'bird',
3:'cat',
4:'deer',
5:'dog',
6:'frog',
7:'horse',
8:'ship',
9:'truck'}
utils.plot_predictions(model=model,dataset=X_test/255.,
dataset_labels=Y_test,
label_dict=label_dict,
batch_size=16,
grid_height=4,
grid_width=4)
```
| true |
code
| 0.648272 | null | null | null | null |
|
[source](../api/alibi_detect.ad.model_distillation.rst)
# Model distillation
## Overview
[Model distillation](https://arxiv.org/abs/1503.02531) is a technique that is used to transfer knowledge from a large network to a smaller network. Typically, it consists of training a second model with a simplified architecture on soft targets (the output distributions or the logits) obtained from the original model.
Here, we apply model distillation to obtain harmfulness scores, by comparing the output distributions of the original model with the output distributions
of the distilled model, in order to detect adversarial data, malicious data drift or data corruption.
We use the following definition of harmful and harmless data points:
* Harmful data points are defined as inputs for which the model's predictions on the uncorrupted data are correct while the model's predictions on the corrupted data are wrong.
* Harmless data points are defined as inputs for which the model's predictions on the uncorrupted data are correct and the model's predictions on the corrupted data remain correct.
Analogously to the [adversarial AE detector](https://arxiv.org/abs/2002.09364), which is also part of the library, the model distillation detector picks up drift that reduces the performance of the classification model.
The detector can be used as follows:
* Given an input $x,$ an adversarial score $S(x)$ is computed. $S(x)$ equals the value loss function employed for distillation calculated between the original model's output and the distilled model's output on $x$.
* If $S(x)$ is above a threshold (explicitly defined or inferred from training data), the instance is flagged as adversarial.
## Usage
### Initialize
Parameters:
* `threshold`: threshold value above which the instance is flagged as an adversarial instance.
* `distilled_model`: `tf.keras.Sequential` instance containing the model used for distillation. Example:
```python
distilled_model = tf.keras.Sequential(
[
tf.keras.InputLayer(input_shape=(input_dim,)),
tf.keras.layers.Dense(output_dim, activation=tf.nn.softmax)
]
)
```
* `model`: the classifier as a `tf.keras.Model`. Example:
```python
inputs = tf.keras.Input(shape=(input_dim,))
hidden = tf.keras.layers.Dense(hidden_dim)(inputs)
outputs = tf.keras.layers.Dense(output_dim, activation=tf.nn.softmax)(hidden)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
* `loss_type`: type of loss used for distillation. Supported losses: 'kld', 'xent'.
* `temperature`: Temperature used for model prediction scaling. Temperature <1 sharpens the prediction probability distribution which can be beneficial for prediction distributions with high entropy.
* `data_type`: can specify data type added to metadata. E.g. *'tabular'* or *'image'*.
Initialized detector example:
```python
from alibi_detect.ad import ModelDistillation
ad = ModelDistillation(
distilled_model=distilled_model,
model=model,
temperature=0.5
)
```
### Fit
We then need to train the detector. The following parameters can be specified:
* `X`: training batch as a numpy array.
* `loss_fn`: loss function used for training. Defaults to the custom model distillation loss.
* `optimizer`: optimizer used for training. Defaults to [Adam](https://arxiv.org/abs/1412.6980) with learning rate 1e-3.
* `epochs`: number of training epochs.
* `batch_size`: batch size used during training.
* `verbose`: boolean whether to print training progress.
* `log_metric`: additional metrics whose progress will be displayed if verbose equals True.
* `preprocess_fn`: optional data preprocessing function applied per batch during training.
```python
ad.fit(X_train, epochs=50)
```
The threshold for the adversarial / harmfulness score can be set via ```infer_threshold```. We need to pass a batch of instances $X$ and specify what percentage of those we consider to be normal via `threshold_perc`. Even if we only have normal instances in the batch, it might be best to set the threshold value a bit lower (e.g. $95$%) since the model could have misclassified training instances.
```python
ad.infer_threshold(X_train, threshold_perc=95, batch_size=64)
```
### Detect
We detect adversarial / harmful instances by simply calling `predict` on a batch of instances `X`. We can also return the instance level score by setting `return_instance_score` to True.
The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys:
* `is_adversarial`: boolean whether instances are above the threshold and therefore adversarial instances. The array is of shape *(batch size,)*.
* `instance_score`: contains instance level scores if `return_instance_score` equals True.
```python
preds_detect = ad.predict(X, batch_size=64, return_instance_score=True)
```
## Examples
### Image
[Harmful drift detection through model distillation on CIFAR10](../examples/cd_distillation_cifar10.nblink)
| true |
code
| 0.819515 | null | null | null | null |
|
```
file_1 = """Stock Close Beta Cap
Apple 188.72 0.2 895.667B
Tesla 278.62 0.5 48.338B"""
file_2 = """Employee Wage Hired Promotion
Linda 3000 2017 Yes
Bob 2000 2016 No
Joshua 800 2019 Yes"""
```
### My solution
Other approaches are possible
```
def parser(stringa):
"""
Parse string and returns dict of lists: keys are first line, lists are columns.
"""
# lines will be a list of lists
# each sub list contains the words of a single line
lines = list()
for line in stringa.splitlines():
lines.append(line.split())
keys = lines[0] # the first line is the key
lines = lines[1:] # now lines does not include the first line
result = dict()
count = 0
for key in keys:
values = [line[count] for line in lines]
result[key] = values
count += 1
return result
parser(file_1)
parser(file_2)
```
### Test
We want to verify carefully that everything works as intended
```
def feel_bored_1_test(function):
"""
Verify that function returns result1 and result2.
"""
result_1 = {'Stock': ['Apple', 'Tesla'], 'Close': ['188.72', '278.62'], 'Beta': ['0.2', '0.5'],
'Cap': ['895.667B', '48.338B']}
result_2 = {'Employee': ['Linda', 'Bob', 'Joshua'], 'Wage': ['3000', '2000', '800'], 'Hired': ['2017', '2016', '2019'],
'Promotion': ['Yes', 'No', 'Yes']}
results = list()
if function(file1) == result1:
print("Test 1 passed")
results.append(True)
else:
print("Test 1 not passed")
results.append(False)
if function(file2) == result2:
print("Test 2 passed")
results.append(True)
else:
print("Test 2 not passed")
results.append(False)
return results
```
We can follow DRY (Don't Repeat Yourself) with a for loop to improve the testing function
```
def feel_bored_1_test(function):
result_1 = {'Stock': ['Apple', 'Tesla'], 'Close': ['188.72', '278.62'], 'Beta': ['0.2', '0.5'],
'Cap': ['895.667B', '48.338B']}
result_2 = {'Employee': ['Linda', 'Bob', 'Joshua'], 'Wage': ['3000', '2000', '800'], 'Hired': ['2017', '2016', '2019'],
'Promotion': ['Yes', 'No', 'Yes']}
input_to_output = {file_1: result_1, file_2: result_2}
results = list()
count = 1
for key, value in input_to_output.items():
if function(key) == value:
results.append(True)
print(f"Test {count} passed")
else:
results.append(False)
print(f"Test {count} not passed")
count += 1
return results
feel_bored_1_test(parser)
```
### Improve code
```
def fast_parser(stringa):
"""
Parse string and returns dict of lists: keys are first line, lists are columns.
"""
lines = [line.split() for line in stringa.splitlines()] # list of lists
keys = lines.pop(0) # remove first line and assign to keys
result = {
key: [line[index] for line in lines] for index, key in enumerate(keys)
}
return result
```
### Everything appears to work as intended
```
feel_bored_1_test(fast_parser)
```
### Efficiency does not matter, but it's still interesting to measure
We can see that the difference is insignificant for small inputs
```
%%timeit
parser(file_1)
%%timeit
fast_parser(file_1)
```
### With bigger inputs, parsing efficiency becomes relevant
<br>
**Key Takeaway:**
<br>
Do not waste time on optimizing code if you don't need it
<br>
<br>
**Premature optimization is the root of all evil**
```
big_input = (file_1 + '\n') * 100
print(big_input)
%%timeit
parser(big_input)
%%timeit
fast_parser(big_input)
```
| true |
code
| 0.380932 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/satyajitghana/PadhAI-Course/blob/master/11_VectorizedGDAlgorithms.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, mean_squared_error, log_loss
from tqdm import tqdm_notebook
import seaborn as sns
import imageio
import time
from IPython.display import HTML
sns.set()
from sklearn.preprocessing import OneHotEncoder
from sklearn.datasets import make_blobs
my_cmap = 'inferno'
np.random.seed(0)
```
# Generate Data
```
data, labels = make_blobs(n_samples=1000, centers=4, n_features=2, random_state=0)
print(data.shape, labels.shape)
plt.scatter(data[:,0], data[:,1], c=labels, cmap=my_cmap)
plt.show()
labels_orig = labels
labels = np.mod(labels_orig, 2)
plt.scatter(data[:,0], data[:,1], c=labels, cmap=my_cmap)
plt.show()
```
# MultiClass Classification
```
X_train, X_val, Y_train, Y_val = train_test_split(data, labels_orig, stratify=labels_orig, random_state=0)
print(X_train.shape, X_val.shape, labels_orig.shape)
enc = OneHotEncoder()
# 0 -> (1, 0, 0, 0), 1 -> (0, 1, 0, 0), 2 -> (0, 0, 1, 0), 3 -> (0, 0, 0, 1)
y_OH_train = enc.fit_transform(np.expand_dims(Y_train,1)).toarray()
y_OH_val = enc.fit_transform(np.expand_dims(Y_val,1)).toarray()
print(y_OH_train.shape, y_OH_val.shape)
W1 = np.random.randn(2,2)
W2 = np.random.randn(2,4)
print(W1)
print(W2)
```
# FF Class
```
class FFNetwork:
def __init__(self, W1, W2):
self.params={}
self.params["W1"]=W1.copy()
self.params["W2"]=W2.copy()
self.params["B1"]=np.zeros((1,2))
self.params["B2"]=np.zeros((1,4))
self.num_layers=2
self.gradients={}
self.update_params={}
self.prev_update_params={}
for i in range(1,self.num_layers+1):
self.update_params["v_w"+str(i)]=0
self.update_params["v_b"+str(i)]=0
self.update_params["m_b"+str(i)]=0
self.update_params["m_w"+str(i)]=0
self.prev_update_params["v_w"+str(i)]=0
self.prev_update_params["v_b"+str(i)]=0
def forward_activation(self, X):
return 1.0/(1.0 + np.exp(-X))
def grad_activation(self, X):
return X*(1-X)
def softmax(self, X):
exps = np.exp(X)
return exps / np.sum(exps, axis=1).reshape(-1,1)
def forward_pass(self, X, params = None):
if params is None:
params = self.params
self.A1 = np.matmul(X, params["W1"]) + params["B1"] # (N, 2) * (2, 2) -> (N, 2)
self.H1 = self.forward_activation(self.A1) # (N, 2)
self.A2 = np.matmul(self.H1, params["W2"]) + params["B2"] # (N, 2) * (2, 4) -> (N, 4)
self.H2 = self.softmax(self.A2) # (N, 4)
return self.H2
def grad(self, X, Y, params = None):
if params is None:
params = self.params
self.forward_pass(X, params)
m = X.shape[0]
self.gradients["dA2"] = self.H2 - Y # (N, 4) - (N, 4) -> (N, 4)
self.gradients["dW2"] = np.matmul(self.H1.T, self.gradients["dA2"]) # (2, N) * (N, 4) -> (2, 4)
self.gradients["dB2"] = np.sum(self.gradients["dA2"], axis=0).reshape(1, -1) # (N, 4) -> (1, 4)
self.gradients["dH1"] = np.matmul(self.gradients["dA2"], params["W2"].T) # (N, 4) * (4, 2) -> (N, 2)
self.gradients["dA1"] = np.multiply(self.gradients["dH1"], self.grad_activation(self.H1)) # (N, 2) .* (N, 2) -> (N, 2)
self.gradients["dW1"] = np.matmul(X.T, self.gradients["dA1"]) # (2, N) * (N, 2) -> (2, 2)
self.gradients["dB1"] = np.sum(self.gradients["dA1"], axis=0).reshape(1, -1) # (N, 2) -> (1, 2)
def fit(self, X, Y, epochs=1, algo= "GD", display_loss=False,
eta=1, mini_batch_size=100, eps=1e-8,
beta=0.9, beta1=0.9, beta2=0.9, gamma=0.9 ):
if display_loss:
loss = {}
for num_epoch in tqdm_notebook(range(epochs), total=epochs, unit="epoch"):
m = X.shape[0]
if algo == "GD":
self.grad(X, Y)
for i in range(1,self.num_layers+1):
self.params["W"+str(i)] -= eta * (self.gradients["dW"+str(i)]/m)
self.params["B"+str(i)] -= eta * (self.gradients["dB"+str(i)]/m)
elif algo == "MiniBatch":
for k in range(0,m,mini_batch_size):
self.grad(X[k:k+mini_batch_size], Y[k:k+mini_batch_size])
for i in range(1,self.num_layers+1):
self.params["W"+str(i)] -= eta * (self.gradients["dW"+str(i)]/mini_batch_size)
self.params["B"+str(i)] -= eta * (self.gradients["dB"+str(i)]/mini_batch_size)
elif algo == "Momentum":
self.grad(X, Y)
for i in range(1,self.num_layers+1):
self.update_params["v_w"+str(i)] = gamma *self.update_params["v_w"+str(i)] + eta * (self.gradients["dW"+str(i)]/m)
self.update_params["v_b"+str(i)] = gamma *self.update_params["v_b"+str(i)] + eta * (self.gradients["dB"+str(i)]/m)
self.params["W"+str(i)] -= self.update_params["v_w"+str(i)]
self.params["B"+str(i)] -= self.update_params["v_b"+str(i)]
elif algo == "NAG":
temp_params = {}
for i in range(1,self.num_layers+1):
self.update_params["v_w"+str(i)]=gamma*self.prev_update_params["v_w"+str(i)]
self.update_params["v_b"+str(i)]=gamma*self.prev_update_params["v_b"+str(i)]
temp_params["W"+str(i)]=self.params["W"+str(i)]-self.update_params["v_w"+str(i)]
temp_params["B"+str(i)]=self.params["B"+str(i)]-self.update_params["v_b"+str(i)]
self.grad(X,Y,temp_params)
for i in range(1,self.num_layers+1):
self.update_params["v_w"+str(i)] = gamma *self.update_params["v_w"+str(i)] + eta * (self.gradients["dW"+str(i)]/m)
self.update_params["v_b"+str(i)] = gamma *self.update_params["v_b"+str(i)] + eta * (self.gradients["dB"+str(i)]/m)
self.params["W"+str(i)] -= eta * (self.update_params["v_w"+str(i)])
self.params["B"+str(i)] -= eta * (self.update_params["v_b"+str(i)])
self.prev_update_params=self.update_params
elif algo == "AdaGrad":
self.grad(X, Y)
for i in range(1,self.num_layers+1):
self.update_params["v_w"+str(i)] += (self.gradients["dW"+str(i)]/m)**2
self.update_params["v_b"+str(i)] += (self.gradients["dB"+str(i)]/m)**2
self.params["W"+str(i)] -= (eta/(np.sqrt(self.update_params["v_w"+str(i)])+eps)) * (self.gradients["dW"+str(i)]/m)
self.params["B"+str(i)] -= (eta/(np.sqrt(self.update_params["v_b"+str(i)])+eps)) * (self.gradients["dB"+str(i)]/m)
elif algo == "RMSProp":
self.grad(X, Y)
for i in range(1,self.num_layers+1):
self.update_params["v_w"+str(i)] = beta*self.update_params["v_w"+str(i)] +(1-beta)*((self.gradients["dW"+str(i)]/m)**2)
self.update_params["v_b"+str(i)] = beta*self.update_params["v_b"+str(i)] +(1-beta)*((self.gradients["dB"+str(i)]/m)**2)
self.params["W"+str(i)] -= (eta/(np.sqrt(self.update_params["v_w"+str(i)]+eps)))*(self.gradients["dW"+str(i)]/m)
self.params["B"+str(i)] -= (eta/(np.sqrt(self.update_params["v_b"+str(i)]+eps)))*(self.gradients["dB"+str(i)]/m)
elif algo == "Adam":
self.grad(X, Y)
num_updates=0
for i in range(1,self.num_layers+1):
num_updates+=1
self.update_params["m_w"+str(i)]=beta1*self.update_params["m_w"+str(i)]+(1-beta1)*(self.gradients["dW"+str(i)]/m)
self.update_params["v_w"+str(i)]=beta2*self.update_params["v_w"+str(i)]+(1-beta2)*((self.gradients["dW"+str(i)]/m)**2)
m_w_hat=self.update_params["m_w"+str(i)]/(1-np.power(beta1,num_updates))
v_w_hat=self.update_params["v_w"+str(i)]/(1-np.power(beta2,num_updates))
self.params["W"+str(i)] -=(eta/np.sqrt(v_w_hat+eps))*m_w_hat
self.update_params["m_b"+str(i)]=beta1*self.update_params["m_b"+str(i)]+(1-beta1)*(self.gradients["dB"+str(i)]/m)
self.update_params["v_b"+str(i)]=beta2*self.update_params["v_b"+str(i)]+(1-beta2)*((self.gradients["dB"+str(i)]/m)**2)
m_b_hat=self.update_params["m_b"+str(i)]/(1-np.power(beta1,num_updates))
v_b_hat=self.update_params["v_b"+str(i)]/(1-np.power(beta2,num_updates))
self.params["B"+str(i)] -=(eta/np.sqrt(v_b_hat+eps))*m_b_hat
if display_loss:
Y_pred = self.predict(X)
loss[num_epoch] = log_loss(np.argmax(Y, axis=1), Y_pred)
if display_loss:
plt.plot(list(loss.values()), '-o', markersize=5)
plt.xlabel('Epochs')
plt.ylabel('Log Loss')
plt.show()
def predict(self, X):
Y_pred = self.forward_pass(X)
return np.array(Y_pred).squeeze()
def print_accuracy():
Y_pred_train = model.predict(X_train)
Y_pred_train = np.argmax(Y_pred_train,1)
Y_pred_val = model.predict(X_val)
Y_pred_val = np.argmax(Y_pred_val,1)
accuracy_train = accuracy_score(Y_pred_train, Y_train)
accuracy_val = accuracy_score(Y_pred_val, Y_val)
print("Training accuracy", round(accuracy_train, 4))
print("Validation accuracy", round(accuracy_val, 4))
if False:
plt.scatter(X_train[:,0], X_train[:,1], c=Y_pred_train, cmap=my_cmap, s=15*(np.abs(np.sign(Y_pred_train-Y_train))+.1))
plt.show()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="GD", display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="MiniBatch", mini_batch_size=128, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="MiniBatch", mini_batch_size=8, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="Momentum", gamma=0.5, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="Momentum", gamma=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="Momentum", gamma=0.99, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="NAG", gamma=0.99, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="NAG", gamma=0.5, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="NAG", gamma=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=1, algo="AdaGrad", display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=.1, algo="AdaGrad", display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=.1, algo="RMSProp", beta=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=.9, algo="RMSProp", beta=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=.9, algo="Adam", beta=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=100, eta=.1, algo="Adam", beta=0.9, display_loss=True)
print_accuracy()
```
# Good Configuration for each Algorithm
```
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=10000, eta=0.5, algo="GD", display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=1000, eta=0.5, algo="Momentum", gamma=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=1000, eta=0.5, algo="NAG", gamma=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=500, eta=1, algo="AdaGrad", display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=2000, eta=.01, algo="RMSProp", beta=0.9, display_loss=True)
print_accuracy()
%%time
model = FFNetwork(W1, W2)
model.fit(X_train, y_OH_train, epochs=200, eta=.1, algo="Adam", beta=0.9, display_loss=True)
print_accuracy()
```
| true |
code
| 0.588771 | null | null | null | null |
|
### quero usar matplotlib para ilustrar permutações
A primeira coisa é fazer circulos numerados
```
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig = plt.gcf()
fig.gca().add_artist(circle1)
plt.axis("off")
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False)
circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False)
circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False)
circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
fig1.gca().add_artist(circle3)
fig1.gca().add_artist(circle4)
fig1.gca().add_artist(circle5)
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False)
circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False)
circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False)
circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
circled1=plt.Circle((1,0),.1,color='r', alpha=0.2, clip_on=False)
circled2=plt.Circle((1,0.2),.1,color='y', alpha=0.2, clip_on=False)
circled3=plt.Circle((1,0.4),.1,color='b', alpha=0.2, clip_on=False)
circled4=plt.Circle((1,0.6),.1,color='g', alpha=0.2, clip_on=False)
circled5=plt.Circle((1,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
fig1.gca().add_artist(circle3)
fig1.gca().add_artist(circle4)
fig1.gca().add_artist(circle5)
fig1.gca().add_artist(circled1)
fig1.gca().add_artist(circled2)
fig1.gca().add_artist(circled3)
fig1.gca().add_artist(circled4)
fig1.gca().add_artist(circled5)
circle1=plt.Circle((0,0),.1,color='r', alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color='y', alpha=0.2, clip_on=False)
circle3=plt.Circle((0,0.4),.1,color='b', alpha=0.2, clip_on=False)
circle4=plt.Circle((0,0.6),.1,color='g', alpha=0.2, clip_on=False)
circle5=plt.Circle((0,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
circled1=plt.Circle((1,0),.1,color='r', alpha=0.2, clip_on=False)
circled2=plt.Circle((1,0.2),.1,color='y', alpha=0.2, clip_on=False)
circled3=plt.Circle((1,0.4),.1,color='b', alpha=0.2, clip_on=False)
circled4=plt.Circle((1,0.6),.1,color='g', alpha=0.2, clip_on=False)
circled5=plt.Circle((1,0.8),.1,color=(0.2,0.6,0.7), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
fig1.gca().add_artist(circle3)
fig1.gca().add_artist(circle4)
fig1.gca().add_artist(circle5)
fig1.gca().add_artist(circled1)
fig1.gca().add_artist(circled2)
fig1.gca().add_artist(circled3)
fig1.gca().add_artist(circled4)
fig1.gca().add_artist(circled5)
# as arestas
fig1.gca().plot([0.15,0.85],[0,0.8], color="red", alpha=0.6 )
fig1.gca().text(0.,0.,r'$5$', fontsize=20,verticalalignment='center', horizontalalignment='center')
fig1.gca().text(1,0,r'$5$', fontsize=20, verticalalignment='center', horizontalalignment='center')
fig1.gca().text(1,0.8,r'$1$', fontsize=20, verticalalignment='center', horizontalalignment='center')
fig1.gca().text(0,0.8,r'$1$', fontsize=20, verticalalignment='center', horizontalalignment='center')
fig1.gca().plot([0.15,0.85],[0.8,0.4], color=(.2,.6,.7), alpha=0.6 )
# agora faremos as funções. primeiro a cor de um inteiro
def cor(n):
''' Dado um inteiro n designa uma cor'''
return (n/(n+1), 1- n/(n+1), 1-(n+2)/(n+5))
#teste
circle1=plt.Circle((0,0),.1,color=cor(1), alpha=0.2, clip_on=False)
circle2=plt.Circle((0,0.2),.1,color=cor(3), alpha=0.2, clip_on=False)
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circle1)
fig1.gca().add_artist(circle2)
def circulo(x,n):
'''Define um circulo de centro (x,0.2*n) de raio 0.1 e cor n'''
return plt.Circle((x,0.2*n), .1, color=cor(n), alpha=0.3, clip_on=False )
#teste
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
fig1.gca().add_artist(circulo(0,3))
fig1.gca().add_artist(circulo(0,4))
# função pilha de circulos
def pilha_de_circulos(x,n):
'''Faz uma pilha de n circulos sobre a abcissa x'''
for k in range(n):
fig1.gca().add_artist(circulo(x,k))
fig1.gca().text(x,0.2*k,r'$'+str(k+1)+'$', fontsize=20,verticalalignment='center', horizontalalignment='center')
return
# teste desta função:
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
pilha_de_circulos(0,3)
pilha_de_circulos(1,3)
pilha_de_circulos(2,3)
# agora a função mapa_permu
def mapa_permu(x,p):
''' desenha a permutação p (uma lista) na posição x'''
l=len(p)
x1= x+.15
x2= x+.85
for y in range(l):
fig1.gca().plot([x1,x2],[0.2*y,0.2*(p[y]-1)], color=cor(y), alpha=0.6 )
return
# teste
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
pilha_de_circulos(0,3)
pilha_de_circulos(1,3)
pilha_de_circulos(2,3)
mapa_permu(0,[2,1,3])
mapa_permu(1.0, [3,1,2])
plt.axes(aspect="equal")
fig1 = plt.gcf()
plt.axis("off")
pilha_de_circulos(0,5)
pilha_de_circulos(1,5)
mapa_permu(0,[3,2,1,5,4])
def pgrafico(x,p):
'''Faz o grafico da permutação p começando em x'''
n=len(p)
fig1= plt.gcf()
plt.axis("off")
pilha_de_circulos(x,n)
pilha_de_circulos(x+1,n)
return mapa_permu(x,p)
#teste
plt.axes(aspect="equal")
fig1= plt.gcf()
plt.axis("off")
pgrafico(0,[3,1,2])
```
| true |
code
| 0.581184 | null | null | null | null |
|
# Fitting to existing data
```
# Base Data Science snippet
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import time
from tqdm import tqdm_notebook
%matplotlib inline
%load_ext autoreload
%autoreload 2
```
Inspiration - https://www.lewuathe.com/covid-19-dynamics-with-sir-model.html
```
import sys
sys.path.append("../")
from covid.dataset import fetch_daily_case
from covid.models import SIR
from covid.models.states import CompartmentStates
```
# Fitting French data to SIR model
```
cases = fetch_daily_case(return_data=True)
cases.head()
```
## Getting cases for all France
### Fetching French data and prepare it
```
cases_fr = (
cases.query("granularite =='pays'")
.query("source_nom=='Ministère des Solidarités et de la Santé'")
[["date","cas_confirmes","deces","gueris"]]
.drop_duplicates(subset = ["date"])
.fillna(0.0)
.assign(date = lambda x : pd.to_datetime(x["date"]))
.set_index("date")
)
start,end = cases_fr.index[0],cases_fr.index[-1]
date_range = pd.date_range(start,end,freq="D")
cases_fr = cases_fr.reindex(date_range).fillna(method="ffill")
cases_fr.plot(figsize = (15,4))
plt.show()
```
### Recomputing compartments
```
cases_fr["I"] = cases_fr["cas_confirmes"] - (cases_fr["deces"] + cases_fr["gueris"])
cases_fr["R"] = (cases_fr["deces"] + cases_fr["gueris"])
pop_fr = 66.99*1e6
cases_fr["S"] = pop_fr - cases_fr["I"] - cases_fr["R"]
cases_fr[["S","I","R"]].plot(figsize = (15,4));
cases_fr[["I","R"]].plot(figsize = (15,4));
```
### Smoothing curves
```
from scipy.signal import savgol_filter
import statsmodels.api as sm
def smooth(y,p = 1600):
cycle, trend = sm.tsa.filters.hpfilter(y, p)
return trend
pd.Series(savgol_filter(cases_fr["I"], 51, 2)).plot(figsize = (15,4))
pd.Series(savgol_filter(cases_fr["R"], 51, 2)).plot()
plt.show()
pd.Series(smooth(cases_fr["I"],6.25)).plot(figsize = (15,4),label = "Is")
pd.Series(cases_fr["I"]).plot(label = "I")
pd.Series(smooth(cases_fr["R"],6.25)).plot(label = "Rs")
pd.Series(cases_fr["R"]).plot(label = "R")
plt.legend()
plt.show()
pd.Series(smooth(cases_fr["I"],1600)).plot(figsize = (15,4),label = "Is")
pd.Series(cases_fr["I"]).plot(label = "I")
pd.Series(smooth(cases_fr["R"],1600)).plot(label = "Rs")
pd.Series(cases_fr["R"]).plot(label = "R")
plt.legend()
plt.show()
```
## Preparing SIR model
```
from covid.models import SIR
# Parameters
N = pop_fr
beta = 5/4
gamma = 1/4
start_date = cases_fr.index[0]
sir = SIR(N,beta,gamma)
states = sir.solve((N-1,1,0),start_date = start_date)
states.head()
states.show(plotly = False)
```
So of course parameters are not correct, in this version of the SIR model 30+ million persons get infected.<br>
Even because of test biases, estimates are more around 5 to 12% not 40%. <br>
Moreover, starting from first cases a peak was to be expected in mid-February, and in France lockdown started on the 10th of March.
```
states["I"].plot(figsize = (15,4))
cases_fr["I"].plot()
plt.show()
states["I"].plot(figsize = (15,4),label = "I_pred")
cases_fr["I"].plot(secondary_y = True,label = "I_true")
plt.legend()
plt.show()
```
## Some intuition about parameter sensibility
```
from ipywidgets import interact
@interact(beta = 5/4,gamma = 1/4)
def show_sir(beta,gamma):
# Create SIR model
N = pop_fr
start_date = cases_fr.index[0]
sir = SIR(N,beta,gamma)
states = sir.solve((N-1,1,0),start_date = start_date)
# Plot result
states["I"].plot(figsize = (15,2),label = "I_pred")
cases_fr["I"].plot(secondary_y = True,label = "I_true")
plt.legend()
plt.show()
states["I"].plot(figsize = (15,2),label = "I_pred")
cases_fr["I"].plot(label = "I_true")
plt.legend()
plt.show()
```
## Fitting parameters with hyperopt
### First attempt
##### References
- https://towardsdatascience.com/hyperparameter-optimization-in-python-part-2-hyperopt-5f661db91324
- http://hyperopt.github.io/hyperopt/
##### Space
```
from hyperopt import hp
space = {
"beta":hp.uniform('beta',0.1,5),
"gamma":hp.uniform('gamma',1/15,1/3),
}
```
##### Loss function between prediction and true
```
def loss_pred(states,true,cols = None):
if cols is None: cols = states.columns.tolist()
loss = 0
for col in cols:
loss = np.linalg.norm(states.loc[true.index,col].values - true[col].values)
return loss
loss_pred(states,cases_fr,cols = ["I","R"])
loss_pred(cases_fr,cases_fr,cols = ["I","R"])
```
##### Final loss function
```
def objective(params):
sir = SIR(N,params["beta"],params["gamma"])
states = sir.solve((N-1,1,0),start_date = start_date)
return loss_pred(states,cases_fr,cols = ["I","R"])
```
##### Hyperopt optimization
```
from hyperopt import fmin, tpe, Trials
trials = Trials()
best = fmin(
fn=objective,
space=space,
trials=trials,
algo=tpe.suggest,
max_evals=1000)
print(best)
```
##### Visualizing results
```
sir = SIR(N,best["beta"],best["gamma"])
states = sir.solve((N-1,1,0),start_date = start_date)
states["I"].plot(figsize = (15,2),label = "pred")
cases_fr["I"].plot(label = "true")
plt.legend()
plt.show()
states["R"].plot(figsize = (15,4),label = "pred")
cases_fr["R"].plot(label = "true")
plt.legend()
plt.show()
```
| true |
code
| 0.569613 | null | null | null | null |
|
<img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/logo-bdc.png" align="right" width="64"/>
# <span style="color:#336699">Introduction to the SpatioTemporal Asset Catalog (STAC)</span>
<hr style="border:2px solid #0077b9;">
<div style="text-align: left;">
<a href="https://nbviewer.jupyter.org/github/brazil-data-cube/code-gallery/blob/master/jupyter/Python/stac/stac-introduction.ipynb"><img src="https://raw.githubusercontent.com/jupyter/design/master/logos/Badges/nbviewer_badge.svg" align="center"/></a>
</div>
<br/>
<div style="text-align: center;font-size: 90%;">
Matheus Zaglia<sup><a href="https://orcid.org/0000-0001-6181-2158"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Rennan Marujo<sup><a href="https://orcid.org/0000-0002-0082-9498"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>, Gilberto R. Queiroz<sup><a href="https://orcid.org/0000-0001-7534-0219"><i class="fab fa-lg fa-orcid" style="color: #a6ce39"></i></a></sup>
<br/><br/>
Earth Observation and Geoinformatics Division, National Institute for Space Research (INPE)
<br/>
Avenida dos Astronautas, 1758, Jardim da Granja, São José dos Campos, SP 12227-010, Brazil
<br/><br/>
Contact: <a href="mailto:[email protected]">[email protected]</a>
<br/><br/>
Last Update: March 12, 2021
</div>
<br/>
<div style="text-align: justify; margin-left: 25%; margin-right: 25%;">
<b>Abstract.</b> This Jupyter Notebook gives an overview on how to use the STAC service to discover and access the data products from the <em>Brazil Data Cube</em>.
</div>
<br/>
<div style="text-align: justify; margin-left: 25%; margin-right: 25%;font-size: 75%; border-style: solid; border-color: #0077b9; border-width: 1px; padding: 5px;">
<b>This Jupyter Notebook is a supplement to the following paper:</b>
<div style="margin-left: 10px; margin-right: 10px">
Zaglia, M.; Vinhas, L.; Queiroz, G. R.; Simões, R. <a href="http://urlib.net/rep/8JMKD3MGPDW34R/3UFEFD8" target="_blank">Catalogação de Metadados do Cubo de Dados do Brasil com o SpatioTemporal Asset Catalog</a>. In: Proceedings XX GEOINFO, November 11-13, 2019, São José dos Campos, SP, Brazil. p 280-285.
</div>
</div>
# Introduction
<hr style="border:1px solid #0077b9;">
The [**S**patio**T**emporal **A**sset **C**atalog (STAC)](https://stacspec.org/) is a specification created through the colaboration of several organizations intended to increase satellite image search interoperability.
The diagram depicted in the picture contains the most important concepts behind the STAC data model:
<center>
<img src="https://raw.githubusercontent.com/brazil-data-cube/code-gallery/master/img/stac/stac-model.png" width="480" />
<br/>
<b>Figure 1</b> - STAC model.
</center>
The description of the concepts below are adapted from the [STAC Specification](https://github.com/radiantearth/stac-spec):
- **Item**: a `STAC Item` is the atomic unit of metadata in STAC, providing links to the actual `assets` (including thumbnails) that they represent. It is a `GeoJSON Feature` with additional fields for things like time, links to related entities and mainly to the assets. According to the specification, this is the atomic unit that describes the data to be discovered in a `STAC Catalog` or `Collection`.
- **Asset**: a `spatiotemporal asset` is any file that represents information about the earth captured in a certain space and time.
- **Catalog**: provides a structure to link various `STAC Items` together or even to other `STAC Catalogs` or `Collections`.
- **Collection:** is a specialization of the `Catalog` that allows additional information about a spatio-temporal collection of data.
# STAC Client API
<hr style="border:1px solid #0077b9;">
For running the examples in this Jupyter Notebook you will need to install the [STAC client for Python](https://github.com/brazil-data-cube/stac.py). To install it from PyPI using `pip`, use the following command:
```
#!pip install stac.py
```
In order to access the funcionalities of the client API, you should import the `stac` package, as follows:
```
import stac
```
After that, you can check the installed `stac` package version:
```
stac.__version__
```
Then, create a `STAC` object attached to the Brazil Data Cube' STAC service:
```
service = stac.STAC('https://brazildatacube.dpi.inpe.br/stac/', access_token='change-me')
```
# Listing the Available Data Products
<hr style="border:1px solid #0077b9;">
In the Jupyter environment, the `STAC` object will list the available image and data cube collections from the service:
```
service
```
or, access the `collections` property:
```
service.collections
```
# Retrieving the Metadata of a Data Product
<hr style="border:1px solid #0077b9;">
The `collection` method returns information about a given image or data cube collection identified by its name. In this example we are retrieving information about the datacube collection `CB4_64_16D_STK-1`:
```
collection = service.collection('CB4_64_16D_STK-1')
collection
```
# Retrieving Collection Items
<hr style="border:1px solid #0077b9;">
The `get_items` method returns the items of a given collection:
```
collection.get_items()
```
The `get_items` method also supports filtering rules through the specification of a rectangle (`bbox`) or a date and time (`datatime`) criterias:
```
items = collection.get_items(
filter={
'bbox':'-46.62597656250001,-13.19716452328198,-45.03570556640626,-12.297068292853805',
'datetime':'2018-08-01/2019-07-31',
'limit':10
}
)
items
```
From the item collection retrieved with the `get_item` method, it is possible to traverse the list of items:
```
for item in items:
print(item.id)
```
or, it is possible to use the index operador (`[]`) with the ``features`` property in order to retrieve a specific item from the collection:
```
item = items.features[0]
item.id
```
# Assets
<hr style="border:1px solid #0077b9;">
The assets with the links to the images, thumbnails or specific metadata files, can be accessed through the property `assets` (from a given item):
```
assets = item.assets
```
Then, from the assets it is possible to traverse or access individual elements:
```
for k in assets.keys():
print(k)
```
The metadata related to the CBERS-4/AWFI blue band is available under the dictionary key `BAND13`:
```
blue_asset = assets['BAND13']
blue_asset
```
To iterate in the item's assets, use the following pattern:
```
for asset in assets.values():
print(asset)
```
# Using RasterIO and NumPy
<hr style="border:1px solid #0077b9;">
The `rasterio` library can be used to read image files from the Brazil Data Cube' service on-the-fly and then to create `NumPy` arrays. The `read` method of an `Item` can be used to perform the reading and array creation:
```
nir = item.read('BAND16')
```
<div style="text-align: justify; margin-left: 15%; margin-right: 15%; border-style: solid; border-color: #0077b9; border-width: 1px; padding: 5px;">
<b>Note:</b> If there are errors because of your pyproj version, you can run the code below as specified in <a href="https://rasterio.readthedocs.io/en/latest/faq.html#why-can-t-rasterio-find-proj-db-rasterio-from-pypi-versions-1-2-0" target="_blank">rasterio documentation</a> and try again:
import os
del os.environ['PROJ_LIB']
</div>
```
nir
```
The next cell code import the `Window` class from the `rasterio` library in order to retrieve a subset of an image and then create an array:
```
from rasterio.windows import Window
```
We can specify a subset of the image file (window or chunck) to be read. Let's read a range that starts on pixel (0, 0) with 500 x 500 and column 0 to column 500, for the spectral bands `red`, `green` and `blue`:
```
red = item.read('BAND15', window=Window(0, 0, 500, 500)) # Window(col_off, row_off, width, height)
green = item.read('BAND14', window=Window(0, 0, 500, 500))
blue = item.read('BAND13', window=Window(0, 0, 500, 500))
blue
```
# Using Matplotlib to Visualize Images
<hr style="border:1px solid #0077b9;">
The `Matplotlib` cab be used to plot the arrays read in the last section:
```
%matplotlib inline
from matplotlib import pyplot as plt
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(12, 4))
ax1.imshow(red, cmap='gray')
ax2.imshow(green, cmap='gray')
ax3.imshow(blue, cmap='gray')
```
Using `Numpy` we can stack the previous arrays and use `Matplotlib` to plot a color image, but first we need to normalize their values:
```
import numpy
def normalize(array):
"""Normalizes numpy arrays into scale 0.0 - 1.0"""
array_min, array_max = array.min(), array.max()
return ((array - array_min)/(array_max - array_min))
rgb = numpy.dstack((normalize(red), normalize(green), normalize(blue)))
plt.imshow(rgb)
```
# Retrieving Image Files
<hr style="border:1px solid #0077b9;">
The file related to an asset can be retrieved through the `download` method. The cell code below shows ho to download the image file associated to the asset into a folder named `img`:
```
blue_asset.download('img')
```
In order to download all files related to an item, use the `Item.download` method:
```
item.download('images')
```
Note that the URL for a given asset can be retrieved by the property `href`:
```
blue_asset.href
```
# References
<hr style="border:1px solid #0077b9;">
- [Spatio Temporal Asset Catalog Specification](https://stacspec.org/)
- [Brazil Data Cube Python Client Library for STAC Service - GitHub Repository](https://github.com/brazil-data-cube/stac.py)
# See also the following Jupyter Notebooks
<hr style="border:1px solid #0077b9;">
* [NDVI calculation on images obtained through STAC](./stac-ndvi-calculation.ipynb)
* [Thresholding images obtained through STAC](./stac-image-threshold.ipynb)
* [Calculating Image Difference on images obtained through STAC](./stac-image-difference.ipynb)
| true |
code
| 0.488344 | null | null | null | null |
|
# Example: Polynomial Cureve Fitting
Observse a real-valued input variable $x$ $\rightarrow$ predict a real-valued target variable $t$
* $\textbf{x} \equiv (x_1, \cdots, x_i, \cdots, x_N)^T, \quad x_i \in [0, 1]$
* $\textbf{t} \equiv (t_1, \cdots, t_i, \cdots, t_N)^T, \quad t_i = \sin(2\pi x_i) + N(\mu, \sigma^2)$
```
import numpy as np
import matplotlib.pylab as plt
# making data
seed = 62
np.random.seed(seed)
N = 10
x = np.random.rand(N)
t = np.sin(2*np.pi*x) + np.random.randn(N) * 0.1
x_sin = np.linspace(0, 1)
t_sin = np.sin(2*np.pi*x_sin)
plt.plot(x_sin, t_sin, c='green')
plt.scatter(x, t)
plt.xlabel('x', fontsize=16)
plt.ylabel('t', rotation=0, fontsize=16)
plt.show()
```
* Goal: exploit this training set in order to make predictions of the value $\hat{t}$ of the target variable for some new value $\hat{x}$ of the input variable.
* Use some theories:
* Probability theory: provides a framework for expressing such uncertainty in a precise and quantitative manner
* Decision theory: allows us to exploit this probabilistic representation in order to make predictions that are optimal according to appropriate criteria
* For the moment, let's use polynomial function, where $M$ is the order of polynomial. $y(x, \mathbf{w})$ is a linear function of coefficients ($\mathbf{w}$)
$$y(x, \mathbf{w}) = w_0 + w_1 x + w_2 x^2 + \cdots + w_M x^M = \sum_{j=0}^{M} w_j x^j$$
```
def vandermonde_matrix(x, m):
"""we will introduce vandermonde_matrix, when we find solution of polynomial regression"""
return np.array([x**i for i in range(m+1)]).T
def polynomial_function(x, w, m):
assert w.size == m+1, "coefficients number must same as M+1"
V = vandermonde_matrix(x, m) # shape (x.size, M+1)
return np.dot(V, w)
np.random.seed(seed)
M = 3
w = np.random.randn(M+1)
t_hat = polynomial_function(x, w, M)
print(t_hat.round(3))
```
* The values of the coefficients will be determined by fitting the polynomial to the training data, this can be done by minimizing an error function, which measure the misfit between the function $y(x, \mathbf{w})$ and training data points.
$$E(\mathbf{w}) = \dfrac{1}{2} \sum_{n=1}^{N} (y(x_n, \mathbf{w}) - t_n)^2$$
```
def error_function(pred, target):
return (1/2)*((pred-target)**2).sum()
error_value = error_function(t_hat, t)
error_value
```
* Because error function is quadratic function of $\mathbf{w}$, its derivatives with respect to the coefficients will be linear in the elements of $\mathbf{w}$, so the minimization of the error function has a unique solution.
* The remain problem is choosing the order $M$, this is called **model comparison or model selection**.
* Then how to choose optimal $M$?
* use test data with 100 data points
* evaluate the residual value of error
```
np.random.seed(seed)
N_test = 100
x_test = np.random.rand(N_test)
t_test = np.sin(2*np.pi*x_test) + np.random.randn(N_test) * 0.1
plt.plot(x_sin, t_sin, c='green')
plt.scatter(x_test, t_test, c='red')
plt.xlabel('x', fontsize=16)
plt.ylabel('t', rotation=0, fontsize=16)
plt.show()
def root_mean_square_error(error, n_samples):
return np.sqrt(2*error/n_samples)
# M=3
error = error_function(polynomial_function(x_test, w, M), t_test)
rms = root_mean_square_error(error, N_test)
rms
```
### using normal equation to find soulution
First define $V$(size is $(N, M+1)$) matrix named **Vandermode matrix** which is looks like below. $M$ is degree of polynomial function.
$$V = \begin{bmatrix}
1 & x_1 & x_1^2 & \cdots & x_1^M \\
1 & x_2 & x_2^2 & \cdots & x_2^M \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
1 & x_N & x_N^2 & \cdots & x_N^M
\end{bmatrix}$$
```
def vandermonde_matrix(x, m):
"""vandermonde matrix"""
return np.array([x**i for i in range(m+1)]).T
M = 3
V = vandermonde_matrix(x, M)
print(V.round(3))
```
So, we can define polynomial as $y=V\cdot w$. Where $w$ is a column vector called **coefficients** , $w = [w_0, w_1, \cdots , w_M]^T$
$$y = \begin{bmatrix} y_1 \\ y_2 \\ \vdots \\ y_N \end{bmatrix} =
\begin{bmatrix} w_0 + w_1x_1 + w_2x_1^2 + \cdots + w_Mx_1^M \\ w_0 + w_1x_2 + w_2x_2^2 + \cdots + w_Mx_2^M \\ \vdots \\
w_0 + w_1x_N + w_2x_N^2 + \cdots + w_Mx_N^M \end{bmatrix}$$
We already defined error function, $E(\mathbf{w}) = \dfrac{1}{2} \sum_{n=1}^{N} (y(x_n, \mathbf{w}) - t_n)^2 = \dfrac{1}{2} \Vert y - V \cdot w \Vert^2$. which is can solved by minimization, $\hat{w} = \underset{w}{\arg \min} E(w)$.
Define residual $r = y - V \cdot w$ then error function becomes $E(\mathbf{w}) = \dfrac{1}{2} r^2 $ Because error function is quadratic function, the minimization of the error function has a unique solution.
Then we can get derivatives, and when it becomes to $0$, error function has minimum value.
$$\begin{aligned} \dfrac{\partial E}{\partial w} &= \begin{bmatrix} \dfrac{\partial E}{\partial w_0} \\ \dfrac{\partial E}{\partial w_1} \\ \vdots \\ \dfrac{\partial E}{\partial w_M} \end{bmatrix} \\
&= \begin{bmatrix}
\dfrac{\partial E}{\partial r_1}\dfrac{\partial r_1}{\partial w_0} + \dfrac{\partial E}{\partial r_2}\dfrac{\partial r_2}{\partial w_0} + \cdots +\dfrac{\partial E}{\partial r_N}\dfrac{\partial r_N}{\partial w_0} \\
\dfrac{\partial E}{\partial r_1}\dfrac{\partial r_1}{\partial w_1} + \dfrac{\partial E}{\partial r_2}\dfrac{\partial r_2}{\partial w_1} + \cdots +\dfrac{\partial E}{\partial r_N}\dfrac{\partial r_N}{\partial w_1} \\
\vdots \\
\dfrac{\partial E}{\partial r_1}\dfrac{\partial r_1}{\partial w_M} + \dfrac{\partial E}{\partial r_2}\dfrac{\partial r_2}{\partial w_M} + \cdots +\dfrac{\partial E}{\partial r_N}\dfrac{\partial r_N}{\partial w_M}
\end{bmatrix} \\
&= \begin{bmatrix}
\dfrac{\partial r_1}{\partial w_0} & \dfrac{\partial r_2}{\partial w_0} & \cdots & \dfrac{\partial r_N}{\partial w_0} \\
\dfrac{\partial r_1}{\partial w_1} & \dfrac{\partial r_2}{\partial w_1} & \cdots & \dfrac{\partial r_N}{\partial w_1} \\
\vdots & \vdots & \ddots & \vdots \\
\dfrac{\partial r_1}{\partial w_M} & \dfrac{\partial r_2}{\partial w_M} & \cdots & \dfrac{\partial r_N}{\partial w_M}
\end{bmatrix} \cdot
\begin{bmatrix} \dfrac{\partial E}{\partial r_1} \\ \dfrac{\partial E}{\partial r_2} \\ \vdots \\ \dfrac{\partial E}{\partial r_N} \end{bmatrix} \\
&= \dfrac{\partial r}{\partial w} \cdot \dfrac{\partial E}{\partial r} \\
&= V^T \cdot (y - V\cdot w) = 0
\end{aligned}$$
So, we can find solution of coefficient $w$.
$$w = (V^TV)^{-1}V^Ty$$
```
def poly_solution(x, t, m):
V = vandermonde_matrix(x, m)
return np.linalg.inv(np.dot(V.T, V)).dot(V.T).dot(t)
print(f"Solution of coefficients are {poly_solution(x, t, M).round(3)}")
# confirm we are right
from numpy.polynomial import polynomial as P
print(P.polyfit(x, t, M).round(3))
```
Let's find optimal degree of polynomial now!
```
def get_rms_error(t_hat, t, n_sample, m):
error = error_function(t_hat, t)
rms = root_mean_square_error(error, n_sample)
return rms
all_w = []
all_rms_train = []
all_rms_test = []
for m in range(10):
optimal_w = poly_solution(x, t, m)
t_hat = polynomial_function(x, optimal_w, m)
t_hat_test = polynomial_function(x_test, optimal_w, m)
rms_train = get_rms_error(t_hat, t, N, m) # N=10
rms_test = get_rms_error(t_hat_test, t_test, N_test, m) # N_test = 100
print(f"M={m} | rms_train: {rms_train:.4f} rms_test: {rms_test:.4f}")
# Plot predicted line
plt.plot(x_sin, t_sin, c="green", label="sin function")
plt.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model M={m}")
plt.scatter(x, t)
plt.xlim((0, 1))
plt.ylim((-1.25, 1.25))
plt.xlabel('x', fontsize=16)
plt.ylabel('t', rotation=0, fontsize=16)
plt.legend()
plt.show()
all_w.append(optimal_w)
all_rms_train.append(rms_train)
all_rms_test.append(rms_test)
plt.scatter(np.arange(10), all_rms_train, facecolors='none', edgecolors='b')
plt.plot(np.arange(10), all_rms_train, c='b', label='Training')
plt.scatter(np.arange(len(all_rms_test)), all_rms_test, facecolors='none', edgecolors='r')
plt.plot(np.arange(len(all_rms_test)), all_rms_test, c='r', label='Test')
plt.legend()
plt.xlim((-0.1, 10))
plt.ylim((-0.1, 1.2))
plt.ylabel("root-mean-squared Error", fontsize=16)
plt.xlabel("M", fontsize=16)
plt.show()
np.set_printoptions(precision=3)
for i in [0, 1, 3, 9]:
print(f"coefficients at M={i} is {all_w[i].round(3)}")
```
### Test for Different size of datas
```
np.random.seed(seed)
N1 = 15
N2 = 100
x1, x2 = np.random.rand(N1), np.random.rand(N2)
t1 = np.sin(2*np.pi*x1) + np.random.randn(N1) * 0.1
t2 = np.sin(2*np.pi*x2) + np.random.randn(N2) * 0.1
optimal_w1 = poly_solution(x1, t1, m=9)
optimal_w2 = poly_solution(x2, t2, m=9)
# Plot predicted line
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
def plot(x, t, x_sin, t_sin, optimal_w, m, ax):
ax.plot(x_sin, t_sin, c="green", label="sin function")
ax.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model N={len(x)}")
ax.scatter(x, t)
ax.set_xlim((0, 1))
ax.set_ylim((-1.25, 1.25))
ax.set_xlabel('x', fontsize=16)
ax.set_ylabel('t', rotation=0, fontsize=16)
ax.legend()
plot(x1, t1, x_sin, t_sin, optimal_w1, m=9, ax=ax1)
plot(x2, t2, x_sin, t_sin, optimal_w2, m=9, ax=ax2)
plt.show()
```
## Regularization
$$
E(\mathbf{w}) = \dfrac{1}{2} \Vert y - V \cdot w \Vert^2 + \frac{\lambda}{2} \Vert w \Vert^2 \qquad \cdots (4)
$$
where, $\Vert \mathbf{w} \Vert^2 \equiv \mathbf{w}^T\mathbf{w}=w_0^2 + w_1^2 + \cdots w_M^2$
easy to get solution for this
$$
\begin{aligned}
\frac{\partial E(w)}{\partial w} &= V^Ty-V^TV\cdot w+\lambda w = 0 \\
w &= (V^TV- \lambda I_{(M+1)})^{-1}V^Ty
\end{aligned}
$$
when regularizer $\lambda \uparrow$, means that more regularization
```
def ridge_solution(x, t, m, alpha=0):
V = vandermonde_matrix(x, m)
return np.linalg.inv(np.dot(V.T, V) - alpha * np.eye(m+1)).dot(V.T).dot(t)
M=9
optimal_w1 = ridge_solution(x, t, m=M, alpha=1e-8)
optimal_w2 = ridge_solution(x, t, m=M, alpha=1.0)
# Plot predicted line
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 6))
def plot_ridge(x, t, x_sin, t_sin, optimal_w, m, text, ax):
ax.plot(x_sin, t_sin, c="green", label="sin function")
ax.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model M={m}")
ax.scatter(x, t)
ax.set_xlim((0, 1))
ax.set_ylim((-1.25, 1.25))
ax.set_xlabel('x', fontsize=16)
ax.set_ylabel('t', rotation=0, fontsize=16)
ax.legend()
ax.annotate(text, (0.6, 0.5), fontsize=14)
plot_ridge(x, t, x_sin, t_sin, optimal_w1, m=M, text='lambda = 1e-8', ax=ax1)
plot_ridge(x, t, x_sin, t_sin, optimal_w2, m=M, text='lambda = 1.0', ax=ax2)
plt.show()
print(f"coefficients at lambda=1e-8 is {optimal_w1.round(3)}")
print(f"coefficients at lambda=1.0 is {optimal_w2.round(3)}")
```
## see ridge effect
```
all_w = []
all_rms_train = []
all_rms_test = []
M = 9
for alpha in np.exp(np.arange(-28, -15)):
optimal_w = ridge_solution(x, t, m=M, alpha=alpha)
t_hat = polynomial_function(x, optimal_w, m=M)
t_hat_test = polynomial_function(x_test, optimal_w, m=M)
rms_train = get_rms_error(t_hat, t, N, m=M)
rms_test = get_rms_error(t_hat_test, t_test, N_test, m=M) # N_test = 100
print(f"lambda={alpha} | rms_train: {rms_train:.4f} rms_test: {rms_test:.4f}")
# Plot predicted line
# plt.plot(x_sin, t_sin, c="green", label="sin function")
# plt.plot(x_sin, polynomial_function(x_sin, optimal_w, m), c="red", label=f"model M={m}")
# plt.scatter(x, t)
# plt.xlim((0, 1))
# plt.ylim((-1.25, 1.25))
# plt.xlabel('x', fontsize=16)
# plt.ylabel('t', rotation=0, fontsize=16)
# plt.legend()
# plt.show()
all_w.append(optimal_w)
all_rms_train.append(rms_train)
all_rms_test.append(rms_test)
plt.scatter(np.arange(len(all_rms_train)), all_rms_train, facecolors='none', edgecolors='b')
plt.plot(np.arange(len(all_rms_train)), all_rms_train, c='b', label='Training')
plt.scatter(np.arange(len(all_rms_test)), all_rms_test, facecolors='none', edgecolors='r')
plt.plot(np.arange(len(all_rms_test)), all_rms_test, c='r', label='Test')
plt.legend()
plt.xticks(np.arange(len(all_rms_test)), np.arange(-28, -15))
plt.ylabel("root-mean-squared Error", fontsize=16)
plt.xlabel("np.log(lambda)", fontsize=16)
plt.show()
```
| true |
code
| 0.681356 | null | null | null | null |
|
## Initialization
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import math
import scipy.io
from scipy.special import expit
from math import *
from scipy import optimize
sns.set_style('whitegrid')
%matplotlib inline
```
## Loading Data
```
mat = scipy.io.loadmat('ex4data1.mat')
X = mat['X']
y = mat['y']
X = np.insert(X,0,1,axis= 1)
m,n = X.shape
input_layer_size = 400
hidden_layer_size = 25
num_labels = 10
_lambda = 1
```
## Function Section
```
#functions Sections
def magic_display(matrix = None):
if matrix is None:
# selecting 100 random rows of the X
rand_indces = np.random.permutation(m)[0:100]
X_dis = X[rand_indces]
else:
X_dis = matrix
if( len(X_dis.shape) > 1 ):
m_test,n_test = X_dis.shape
axis_bound = 1
else:
m_test = 1
n_test = X_dis.shape[0]
axis_bound = 0
# each number width , height in plot
example_width = int(round(sqrt(n_test)))
example_height = int(round( n_test / example_width ))
# number of numbers to show in plot
display_rows = floor(sqrt(m_test))
display_cols = ceil(m_test / display_rows )
# padding between numbers
pad = 2
# intilazation array for holding previos 100 random numbers
display_array = np.ones((
pad + display_rows * ( example_height + pad ),
pad + display_cols * ( example_width + pad )
))
count = 0;
for i in range(display_rows):
for j in range(display_cols):
if( count >= m_test ):
break
# max_val of each row in X_dis
max_val = np.max( X_dis[count : count+1], axis= axis_bound)
# Starting x,y point of numbers shape in array
ex_x_range = pad + ( i ) * ( example_height + pad )
ex_y_range = pad + ( j ) * ( example_width + pad )
if(m_test > 1):
ex_arr = X_dis[ count : count + 1 , 1:].reshape(example_height , example_width)
else:
ex_arr = X_dis[1:].reshape(example_height , example_width)
# Setting values
display_array[ ex_x_range : ex_x_range + example_height,
ex_y_range : ex_y_range + example_width ] = np.divide(ex_arr , max_val)
count += 1
# Plotting 100 random data
plt.figure(figsize=(12,8))
# Get rod of grid
plt.grid(False)
plt.imshow(display_array)
def hyp(matrix):
return expit(matrix)
def neural_cost_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, _lam):
# initialization some varibles
if(len(X.shape) > 1):
axis_bound = 1
else:
axis_bound = 0
# reshaping from one dimensional to 2d dimensional parameter vector
end_indx_theta1 = hidden_layer_size * ( input_layer_size + 1 )
Theta1 = np.reshape( nn_params[0 : end_indx_theta1 ],
( hidden_layer_size, input_layer_size + 1 ))
# reshaping from one dimensional to 2d dimensional parameter vector
Theta2 = np.reshape( nn_params[end_indx_theta1 : ],
( num_labels, hidden_layer_size + 1 ))
# Copmuting hidden level activation
z_2 = np.dot(X, Theta1.T )
hidden_activation = hyp( z_2 )
hidden_activation = np.insert( hidden_activation, 0, 1, axis=axis_bound )
# Copmuting output level activation
z_3 = np.dot(hidden_activation, Theta2.T)
out_activation = hyp(z_3)
# finding hypotesis matrix
h = out_activation
# Computing Log(sigmoid(x)) for all of the hypotesis elements
h1 = np.log(h)
# Computing Log( 1 - simgoid(x)) for all of the hypotesis elements
h2 = np.log(1 - h)
# Creating new matrix for y
new_y0 = ( y - 1 ).copy()
new_y1 = np.zeros(out_activation.shape)
new_y1[np.arange(0,out_activation.shape[0]),new_y0.T] = 1
# Computing Regularization Part Varibles
Theta1_pow2 = Theta1 * Theta1
Theta2_pow2 = Theta2 * Theta2
#Computing Cost of the hypotesis
J = ( -1 / m ) * sum(sum( new_y1 * h1 + (1 - new_y1) * h2)) + \
( _lam / ( 2 * m )) * ( sum(sum( Theta1_pow2 )) + sum(sum( Theta2_pow2 )) )
return J
def neural_gradient_function(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, _lam):
# initialization some varibles
if(len(X.shape) > 1):
axis_bound = 1
else:
axis_bound = 0
# Number of training examples
m = X.shape[0]
# reshaping from one dimensional to 2d dimensional parameter vector
end_indx_theta1 = hidden_layer_size * ( input_layer_size + 1 )
Theta1 = np.reshape( nn_params[0 : end_indx_theta1 ],
( hidden_layer_size, input_layer_size + 1 ))
# reshaping from one dimensional to 2d dimensional parameter vector
Theta2 = np.reshape( nn_params[end_indx_theta1 : ],
( num_labels, hidden_layer_size + 1 ))
# Defining Delta's
Delta1 = np.zeros(Theta1.shape)
Delta2 = np.zeros(Theta2.shape)
# Defining Theta_grad Matrixs
Theta1_grad = np.zeros(Theta1.shape)
Theta2_grad = np.zeros(Theta2.shape)
for i in range(m):
X_input = X[i : i + 1,:]
# Copmuting hidden level activation
z_2 = np.dot( X_input, Theta1.T )
hidden_activation = hyp( z_2 )
hidden_activation = np.insert( hidden_activation, 0, 1, axis=axis_bound )
# Copmuting output level activation
z_3 = np.dot( hidden_activation, Theta2.T )
out_activation = hyp( z_3 )
# finding hypotesis matrix
h = out_activation
# Creating new matrix for y
new_y0 = ( y - 1 ).copy()
new_y1 = np.zeros(out_activation.shape[1])
new_y1[new_y0[i]] = 1
# Computing erros
out_error = h - new_y1
z_2 = np.insert(z_2, 0, 1, axis=1)
hidden_error = np.dot( out_error , Theta2 ).T * sigmoid_gradient(z_2).T
hidden_error = hidden_error[ 1: ]
# Computing Delta
Delta1 = Delta1 + hidden_error * X_input
Delta2 = Delta2 + out_error.T * hidden_activation
Theta1_grad[:, 0:1 ] = ( 1 / m ) * ( Delta1[:, 0:1 ] )
Theta1_grad[:, 1: ] = ( 1 / m ) * ( Delta1[:, 1: ] ) + ( _lam / m ) * Theta1[:, 1: ]
Theta2_grad[:, 0:1 ] = ( 1 / m ) * ( Delta2[:, 0:1 ] )
Theta2_grad[:, 1: ] = ( 1 / m ) * ( Delta2[:, 1: ] ) + ( _lam / m ) * Theta2[:, 1: ]
# Converting Weigths to 1 Dimensional Matrix's
Theta1_grad_flat = np.array(Theta1_grad.flat)
Theta2_grad_flat = np.array(Theta2_grad.flat)
return np.concatenate((Theta1_grad_flat, Theta2_grad_flat)) * 1e-3
def sigmoid_gradient(matrix):
return hyp(matrix) * ( 1 - hyp(matrix) )
def checking_gradient(_lambda):
if(_lambda == None):
_lambda = 0
input_layer_size = 3
hidden_layer_size = 5
num_labels = 3
m = 5
Theta1 = debug_initialaize_weights(hidden_layer_size, input_layer_size)
Theta2 = debug_initialaize_weights(num_labels, hidden_layer_size)
X = debug_initialaize_weights(m, input_layer_size - 1)
y = 1 + np.mod(np.arange(0,m), num_labels)
# initialization some varibles
if(len(X.shape) > 1):
axis_bound = 1
else:
axis_bound = 0
# Inserting 1's column to matrix
X = np.insert( X, 0, 1, axis= axis_bound)
Theta1_flat = np.array(Theta1.flat)
Theta2_flat = np.array(Theta2.flat)
Theta = np.concatenate((Theta1_flat, Theta2_flat))
grad = neural_gradient_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda)
numerical_grad = numerical_gradinet_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda)
print(np.linalg.norm(numerical_grad - grad) / np.linalg.norm(numerical_grad + grad))
def numerical_gradinet_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda):
new_grad = np.zeros(Theta.size)
p = np.zeros(Theta.size)
e = 1e-4
for i in range(Theta.size):
p[i] = e
j1 = neural_cost_function(Theta + p, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda)
j2 = neural_cost_function(Theta - p, input_layer_size, hidden_layer_size, num_labels, X, y, _lambda)
new_grad[i] = (j1 - j2) / ( 2 * e )
p[i] = 0
return new_grad
def debug_initialaize_weights(output_layer, input_layer):
matrix = np.zeros((output_layer, input_layer + 1))
return np.sin(np.arange(1,matrix.size + 1)).reshape(matrix.shape) / 10
checking_gradient(3)
```
## Visualizing Data
```
magic_display()
```
## Feedforward Propagation Algorithm
```
# Loading Weights
weights = scipy.io.loadmat('ex4weights.mat')
Theta1 = weights['Theta1']
Theta2 = weights['Theta2']
Theta1.shape
Theta2.shape
# Converting Weigths to 1 Dimensional Matrix's
Theta1_flat = np.array(Theta1.flat)
Theta2_flat = np.array(Theta2.flat)
# Creating New 1d Matrix for holding all of the weights
Theta = np.concatenate((Theta1_flat, Theta2_flat))
neural_cost_function(Theta, input_layer_size, hidden_layer_size, num_labels, X, y, 3)
```
| true |
code
| 0.543166 | null | null | null | null |
|
# Analyse wavefields
This notebook checks the velocity models and FD simulations output by `generate_velocity_models.py` and `generate_forward_simulations.py` are sensible.
```
import glob
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import scipy as sp
import sys
sys.path.insert(0, '../shared_modules/')
import plot_utils
%matplotlib inline
```
## Load example velocity model and FD simulation
```
# PARAMETERS
VEL_RUN = "marmousi"
SIM_RUN = "marmousi_2ms"
VEL_DIR = "velocity/" + VEL_RUN + "/"
OUT_SIM_DIR = "gather/" + SIM_RUN + "/"
isim=(20,1)
wavefields = np.load(OUT_SIM_DIR + "wavefields_%.8i_%.8i.npy"%(isim[0],isim[1]))
wavefields = wavefields[::4]
gather = np.load(OUT_SIM_DIR + "gather_%.8i_%.8i.npy"%(isim[0],isim[1]))
velocity = np.load(VEL_DIR + "velocity_%.8i.npy"%(isim[0]))
source_is = np.load(OUT_SIM_DIR + "source_is.npy")
receiver_is = np.load(OUT_SIM_DIR + "receiver_is.npy")
DELTAT = 0.002
source_i = source_is[isim[0],isim[1]]
print(velocity.shape, velocity[0,0])
print(wavefields.shape, np.max(wavefields))
print(gather.shape)
print(receiver_is.shape, source_is.shape)
#print(receiver_is)
#print(source_is)
print(source_i)
```
## Create wavefield animation
```
%matplotlib notebook
# define initial plots
fig = plt.figure(figsize=(13.5,6))
plt.subplot(1,2,2)
plt.imshow(velocity.T, cmap="viridis")
cb = plt.colorbar()
cb.ax.set_ylabel('P-wave velocity (m/s)')
plt.subplot(1,2,1)
plt.imshow(velocity.T, alpha=0.4, cmap="gray_r")
im = plt.imshow(wavefields[0].T, aspect=1, cmap=plot_utils.rgb, alpha=0.4, vmin = -2, vmax=2)
cb = plt.colorbar()
cb.ax.set_ylabel('P-wave amplitude')
plt.scatter(receiver_is[:,0], receiver_is[:,1])
plt.scatter(source_i[0], source_i[1])
# define animation update function
def update(i):
# set the data in the im object
plt.title("t = %i"%(i))
im.set_data(wavefields[i].T)
return [im]# tells the animator which parts of the plot to update
# start animation
# important: keep the instance to maintain timer
ani = animation.FuncAnimation(fig, update, frames=range(0,wavefields.shape[0],10), interval=100, blit=False)
plt.subplots_adjust(left=0.0, right=1., bottom=0.05, top=0.95, hspace=0.0, wspace=0.0)
plt.show()
ani._stop()
```
## Check wavefields and gather match
```
# check wavefields and gather match
gather_test = wavefields[:,receiver_is[:,0], receiver_is[:,1]].T
print(gather.shape, gather_test.shape)
print(np.allclose(gather, gather_test))
# plot gather
%matplotlib inline
print(gather.mean(), 5*gather.std())
gathern = gather/(1)
t = np.arange(gather.shape[1], dtype=np.float32)
t_gain = (t**2.5)
t_gain = t_gain/np.median(t_gain)
plt.figure(figsize=(12,8))
plt.imshow((gathern*t_gain).T, aspect=0.1, cmap="Greys", vmin=-1, vmax=1)
plt.colorbar()
plt.figure(figsize=(20,10))
plt.plot(t.flatten(),(gathern*t_gain)[10,:])
plt.scatter(t.flatten(),np.zeros(gather.shape[1]), s=0.1)
```
## Plot average frequency spectrum of gather
```
# plot average frequency spectrum of gather
s = np.abs(np.fft.fft(gather, axis=1))
s = np.sum(s, axis=0)
f = np.fft.fftfreq(s.shape[0], DELTAT)
plt.figure(figsize=(10,5))
plt.plot(f[np.argsort(f)], s[np.argsort(f)])
plt.xlim(0, 250)
plt.show()
print(f[np.argmax(s)])# dominant frequency
plt.plot(t,t_gain)
```
| true |
code
| 0.562056 | null | null | null | null |
|
# Week 3: Transfer Learning
Welcome to this assignment! This week, you are going to use a technique called `Transfer Learning` in which you utilize an already trained network to help you solve a similar problem to the one it was originally trained to solve.
Let's get started!
```
import os
import zipfile
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import img_to_array, load_img
```
## Dataset
For this assignment, you will use the `Horse or Human dataset`, which contains images of horses and humans.
Download the `training` and `validation` sets by running the cell below:
```
# Get the Horse or Human training dataset
!wget -q -P /content/ https://storage.googleapis.com/tensorflow-1-public/course2/week3/horse-or-human.zip
# Get the Horse or Human validation dataset
!wget -q -P /content/ https://storage.googleapis.com/tensorflow-1-public/course2/week3/validation-horse-or-human.zip
test_local_zip = './horse-or-human.zip'
zip_ref = zipfile.ZipFile(test_local_zip, 'r')
zip_ref.extractall('/tmp/training')
val_local_zip = './validation-horse-or-human.zip'
zip_ref = zipfile.ZipFile(val_local_zip, 'r')
zip_ref.extractall('/tmp/validation')
zip_ref.close()
```
This dataset already has an structure that is compatible with Keras' `flow_from_directory` so you don't need to move the images into subdirectories as you did in the previous assignments. However, it is still a good idea to save the paths of the images so you can use them later on:
```
# Define the training and validation base directories
train_dir = '/tmp/training'
validation_dir = '/tmp/validation'
# Directory with training horse pictures
train_horses_dir = os.path.join(train_dir, 'horses')
# Directory with training humans pictures
train_humans_dir = os.path.join(train_dir, 'humans')
# Directory with validation horse pictures
validation_horses_dir = os.path.join(validation_dir, 'horses')
# Directory with validation human pictures
validation_humans_dir = os.path.join(validation_dir, 'humans')
# Check the number of images for each class and set
print(f"There are {len(os.listdir(train_horses_dir))} images of horses for training.\n")
print(f"There are {len(os.listdir(train_humans_dir))} images of humans for training.\n")
print(f"There are {len(os.listdir(validation_horses_dir))} images of horses for validation.\n")
print(f"There are {len(os.listdir(validation_humans_dir))} images of humans for validation.\n")
```
Now take a look at a sample image of each one of the classes:
```
print("Sample horse image:")
plt.imshow(load_img(f"{os.path.join(train_horses_dir, os.listdir(train_horses_dir)[0])}"))
plt.show()
print("\nSample human image:")
plt.imshow(load_img(f"{os.path.join(train_humans_dir, os.listdir(train_humans_dir)[0])}"))
plt.show()
```
`matplotlib` makes it easy to see that these images have a resolution of 300x300 and are colored, but you can double check this by using the code below:
```
# Load the first example of a horse
sample_image = load_img(f"{os.path.join(train_horses_dir, os.listdir(train_horses_dir)[0])}")
# Convert the image into its numpy array representation
sample_array = img_to_array(sample_image)
print(f"Each image has shape: {sample_array.shape}")
```
As expected, the sample image has a resolution of 300x300 and the last dimension is used for each one of the RGB channels to represent color.
## Training and Validation Generators
Now that you know the images you are dealing with, it is time for you to code the generators that will fed these images to your Network. For this, complete the `train_val_generators` function below:
**Important Note:** The images have a resolution of 300x300 but the `flow_from_directory` method you will use allows you to set a target resolution. In this case, **set a `target_size` of (150, 150)**. This will heavily lower the number of trainable parameters in your final network, yielding much quicker training times without compromising the accuracy!
```
# GRADED FUNCTION: train_val_generators
def train_val_generators(TRAINING_DIR, VALIDATION_DIR):
### START CODE HERE
# Instantiate the ImageDataGenerator class
# Don't forget to normalize pixel values and set arguments to augment the images
train_datagen = None
# Pass in the appropriate arguments to the flow_from_directory method
train_generator = train_datagen.flow_from_directory(directory=None,
batch_size=32,
class_mode=None,
target_size=(None, None))
# Instantiate the ImageDataGenerator class (don't forget to set the rescale argument)
# Remember that validation data should not be augmented
validation_datagen = None
# Pass in the appropriate arguments to the flow_from_directory method
validation_generator = validation_datagen.flow_from_directory(directory=None,
batch_size=32,
class_mode=None,
target_size=(None, None))
### END CODE HERE
return train_generator, validation_generator
# Test your generators
train_generator, validation_generator = train_val_generators(train_dir, validation_dir)
```
**Expected Output:**
```
Found 1027 images belonging to 2 classes.
Found 256 images belonging to 2 classes.
```
## Transfer learning - Create the pre-trained model
Download the `inception V3` weights into the `/tmp/` directory:
```
# Download the inception v3 weights
!wget --no-check-certificate \
https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5 \
-O /tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5
```
Now load the `InceptionV3` model and save the path to the weights you just downloaded:
```
# Import the inception model
from tensorflow.keras.applications.inception_v3 import InceptionV3
# Create an instance of the inception model from the local pre-trained weights
local_weights_file = '/tmp/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
```
Complete the `create_pre_trained_model` function below. You should specify the correct `input_shape` for the model (remember that you set a new resolution for the images instead of the native 300x300) and make all of the layers non-trainable:
```
# GRADED FUNCTION: create_pre_trained_model
def create_pre_trained_model(local_weights_file):
### START CODE HERE
pre_trained_model = InceptionV3(input_shape = (None, None, None),
include_top = False,
weights = None)
pre_trained_model.load_weights(local_weights_file)
# Make all the layers in the pre-trained model non-trainable
for None in None:
None = None
### END CODE HERE
return pre_trained_model
```
Check that everything went well by comparing the last few rows of the model summary to the expected output:
```
pre_trained_model = create_pre_trained_model(local_weights_file)
# Print the model summary
pre_trained_model.summary()
```
**Expected Output:**
```
batch_normalization_v1_281 (Bat (None, 3, 3, 192) 576 conv2d_281[0][0]
__________________________________________________________________________________________________
activation_273 (Activation) (None, 3, 3, 320) 0 batch_normalization_v1_273[0][0]
__________________________________________________________________________________________________
mixed9_1 (Concatenate) (None, 3, 3, 768) 0 activation_275[0][0]
activation_276[0][0]
__________________________________________________________________________________________________
concatenate_5 (Concatenate) (None, 3, 3, 768) 0 activation_279[0][0]
activation_280[0][0]
__________________________________________________________________________________________________
activation_281 (Activation) (None, 3, 3, 192) 0 batch_normalization_v1_281[0][0]
__________________________________________________________________________________________________
mixed10 (Concatenate) (None, 3, 3, 2048) 0 activation_273[0][0]
mixed9_1[0][0]
concatenate_5[0][0]
activation_281[0][0]
==================================================================================================
Total params: 21,802,784
Trainable params: 0
Non-trainable params: 21,802,784
```
To check that all the layers in the model were set to be non-trainable, you can also run the cell below:
```
total_params = pre_trained_model.count_params()
num_trainable_params = sum([w.shape.num_elements() for w in pre_trained_model.trainable_weights])
print(f"There are {total_params:,} total parameters in this model.")
print(f"There are {num_trainable_params:,} trainable parameters in this model.")
```
**Expected Output:**
```
There are 21,802,784 total parameters in this model.
There are 0 trainable parameters in this model.
```
## Creating callbacks for later
You have already worked with callbacks in the first course of this specialization so the callback to stop training once an accuracy of 99.9% is reached, is provided for you:
```
# Define a Callback class that stops training once accuracy reaches 99.9%
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')>0.999):
print("\nReached 99.9% accuracy so cancelling training!")
self.model.stop_training = True
```
## Pipelining the pre-trained model with your own
Now that the pre-trained model is ready, you need to "glue" it to your own model to solve the task at hand.
For this you will need the last output of the pre-trained model, since this will be the input for your own. Complete the `output_of_last_layer` function below.
**Note:** For grading purposes use the `mixed7` layer as the last layer of the pre-trained model. However, after submitting feel free to come back here and play around with this.
```
# GRADED FUNCTION: output_of_last_layer
def output_of_last_layer(pre_trained_model):
### START CODE HERE
last_desired_layer = None
print('last layer output shape: ', last_desired_layer.output_shape)
last_output = None
print('last layer output: ', last_output)
### END CODE HERE
return last_output
```
Check that everything works as expected:
```
last_output = output_of_last_layer(pre_trained_model)
```
**Expected Output (if `mixed7` layer was used):**
```
last layer output shape: (None, 7, 7, 768)
last layer output: KerasTensor(type_spec=TensorSpec(shape=(None, 7, 7, 768), dtype=tf.float32, name=None), name='mixed7/concat:0', description="created by layer 'mixed7'")
```
Now you will create the final model by adding some additional layers on top of the pre-trained model.
Complete the `create_final_model` function below. You will need to use Tensorflow's [Functional API](https://www.tensorflow.org/guide/keras/functional) for this since the pretrained model has been created using it.
Let's double check this first:
```
# Print the type of the pre-trained model
print(f"The pretrained model has type: {type(pre_trained_model)}")
```
To create the final model, you will use Keras' Model class by defining the appropriate inputs and outputs as described in the first way to instantiate a Model in the [docs](https://www.tensorflow.org/api_docs/python/tf/keras/Model).
Note that you can get the input from any existing model by using its `input` attribute and by using the Funcional API you can use the last layer directly as output when creating the final model.
```
# GRADED FUNCTION: create_final_model
def create_final_model(pre_trained_model, last_output):
# Flatten the output layer to 1 dimension
x = layers.Flatten()(last_output)
### START CODE HERE
# Add a fully connected layer with 1024 hidden units and ReLU activation
x = None
# Add a dropout rate of 0.2
x = None
# Add a final sigmoid layer for classification
x = None
# Create the complete model by using the Model class
model = Model(inputs=None, outputs=None)
# Compile the model
model.compile(optimizer = RMSprop(learning_rate=0.0001),
loss = None,
metrics = [None])
### END CODE HERE
return model
# Save your model in a variable
model = create_final_model(pre_trained_model, last_output)
# Inspect parameters
total_params = model.count_params()
num_trainable_params = sum([w.shape.num_elements() for w in model.trainable_weights])
print(f"There are {total_params:,} total parameters in this model.")
print(f"There are {num_trainable_params:,} trainable parameters in this model.")
```
**Expected Output:**
```
There are 47,512,481 total parameters in this model.
There are 38,537,217 trainable parameters in this model.
```
Wow, that is a lot of parameters!
After submitting your assignment later, try re-running this notebook but use the original resolution of 300x300, you will be surprised to see how many more parameters are for that case.
Now train the model:
```
# Run this and see how many epochs it should take before the callback
# fires, and stops training at 99.9% accuracy
# (It should take a few epochs)
callbacks = myCallback()
history = model.fit(train_generator,
validation_data = validation_generator,
epochs = 100,
verbose = 2,
callbacks=callbacks)
```
The training should have stopped after less than 10 epochs and it should have reached an accuracy over 99,9% (firing the callback). This happened so quickly because of the pre-trained model you used, which already contained information to classify humans from horses. Really cool!
Now take a quick look at the training and validation accuracies for each epoch of training:
```
# Plot the training and validation accuracies for each epoch
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
```
You will need to submit this notebook for grading. To download it, click on the `File` tab in the upper left corner of the screen then click on `Download` -> `Download .ipynb`. You can name it anything you want as long as it is a valid `.ipynb` (jupyter notebook) file.
**Congratulations on finishing this week's assignment!**
You have successfully implemented a convolutional neural network that leverages a pre-trained network to help you solve the problem of classifying humans from horses.
**Keep it up!**
| true |
code
| 0.612744 | null | null | null | null |
|
# Introduction to Taxi ETL Job
This is the Taxi ETL job to generate the input datasets for the Taxi XGBoost job.
## Prerequirement
### 1. Download data
All data could be found at https://www1.nyc.gov/site/tlc/about/tlc-trip-record-data.page
### 2. Download needed jars
* [cudf-21.12.2-cuda11.jar](https://repo1.maven.org/maven2/ai/rapids/cudf/21.12.2/)
* [rapids-4-spark_2.12-21.12.0.jar](https://repo1.maven.org/maven2/com/nvidia/rapids-4-spark_2.12/21.12.0/rapids-4-spark_2.12-21.12.0.jar)
### 3. Start Spark Standalone
Before running the script, please setup Spark standalone mode
### 4. Add ENV
```
$ export SPARK_JARS=cudf-21.12.2-cuda11.jar,rapids-4-spark_2.12-21.12.0.jar
```
### 5.Start Jupyter Notebook with spylon-kernal or toree
```
$ jupyter notebook --allow-root --notebook-dir=${your-dir} --config=${your-configs}
```
## Import Libs
```
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DataTypes.{DoubleType, IntegerType, StringType}
import org.apache.spark.sql.types.{FloatType, StructField, StructType}
```
## Script Settings
### 1. File Path Settings
* Define input file path
```
val dataRoot = sys.env.getOrElse("DATA_ROOT", "/data")
val rawPath = dataRoot + "/taxi/taxi-etl-input-small.csv"
val outPath = dataRoot + "/taxi/output"
```
## Function and Object Define
### Define the constants
* Define input file schema
```
val rawSchema = StructType(Seq(
StructField("vendor_id", StringType),
StructField("pickup_datetime", StringType),
StructField("dropoff_datetime", StringType),
StructField("passenger_count", IntegerType),
StructField("trip_distance", DoubleType),
StructField("pickup_longitude", DoubleType),
StructField("pickup_latitude", DoubleType),
StructField("rate_code", StringType),
StructField("store_and_fwd_flag", StringType),
StructField("dropoff_longitude", DoubleType),
StructField("dropoff_latitude", DoubleType),
StructField("payment_type", StringType),
StructField("fare_amount", DoubleType),
StructField("surcharge", DoubleType),
StructField("mta_tax", DoubleType),
StructField("tip_amount", DoubleType),
StructField("tolls_amount", DoubleType),
StructField("total_amount", DoubleType)
))
def dataRatios: (Int, Int, Int) = {
val ratios = (80, 20)
(ratios._1, ratios._2, 100 - ratios._1 - ratios._2)
}
val (trainRatio, evalRatio, trainEvalRatio) = dataRatios
```
* Build the spark session and dataframe
```
// Build the spark session and data reader as usual
val sparkSession = SparkSession.builder.appName("taxi-etl").getOrCreate
val df = sparkSession.read.option("header", true).schema(rawSchema).csv(rawPath)
```
* Define some ETL functions
```
def dropUseless(dataFrame: DataFrame): DataFrame = {
dataFrame.drop(
"dropoff_datetime",
"payment_type",
"surcharge",
"mta_tax",
"tip_amount",
"tolls_amount",
"total_amount")
}
def encodeCategories(dataFrame: DataFrame): DataFrame = {
val categories = Seq("vendor_id", "rate_code", "store_and_fwd_flag")
(categories.foldLeft(dataFrame) {
case (df, category) => df.withColumn(category, hash(col(category)))
}).withColumnRenamed("store_and_fwd_flag", "store_and_fwd")
}
def fillNa(dataFrame: DataFrame): DataFrame = {
dataFrame.na.fill(-1)
}
def removeInvalid(dataFrame: DataFrame): DataFrame = {
val conditions = Seq(
Seq("fare_amount", 0, 500),
Seq("passenger_count", 0, 6),
Seq("pickup_longitude", -75, -73),
Seq("dropoff_longitude", -75, -73),
Seq("pickup_latitude", 40, 42),
Seq("dropoff_latitude", 40, 42))
conditions
.map { case Seq(column, min, max) => "%s > %d and %s < %d".format(column, min, column, max) }
.foldLeft(dataFrame) {
_.filter(_)
}
}
def convertDatetime(dataFrame: DataFrame): DataFrame = {
val datetime = col("pickup_datetime")
dataFrame
.withColumn("pickup_datetime", to_timestamp(datetime))
.withColumn("year", year(datetime))
.withColumn("month", month(datetime))
.withColumn("day", dayofmonth(datetime))
.withColumn("day_of_week", dayofweek(datetime))
.withColumn(
"is_weekend",
col("day_of_week").isin(1, 7).cast(IntegerType)) // 1: Sunday, 7: Saturday
.withColumn("hour", hour(datetime))
.drop(datetime.toString)
}
def addHDistance(dataFrame: DataFrame): DataFrame = {
val P = math.Pi / 180
val lat1 = col("pickup_latitude")
val lon1 = col("pickup_longitude")
val lat2 = col("dropoff_latitude")
val lon2 = col("dropoff_longitude")
val internalValue = (lit(0.5)
- cos((lat2 - lat1) * P) / 2
+ cos(lat1 * P) * cos(lat2 * P) * (lit(1) - cos((lon2 - lon1) * P)) / 2)
val hDistance = lit(12734) * asin(sqrt(internalValue))
dataFrame.withColumn("h_distance", hDistance)
}
// def preProcess(dataFrame: DataFrame): DataFrame = {
// val processes = Seq[DataFrame => DataFrame](
// dropUseless,
// encodeCategories,
// fillNa,
// removeInvalid,
// convertDatetime,
// addHDistance
// )
// processes
// .foldLeft(dataFrame) { case (df, process) => process(df) }
// }
```
* Define main ETL function
```
def preProcess(dataFrame: DataFrame, splits: Array[Int]): Array[DataFrame] = {
val processes = Seq[DataFrame => DataFrame](
dropUseless,
encodeCategories,
fillNa,
removeInvalid,
convertDatetime,
addHDistance
)
processes
.foldLeft(dataFrame) { case (df, process) => process(df) }
.randomSplit(splits.map(_.toDouble))
}
val dataset = preProcess(df, Array(trainRatio, trainEvalRatio, evalRatio))
```
## Run ETL Process and Save the Result
```
val t0 = System.currentTimeMillis
for ((name, index) <- Seq("train", "eval", "trans").zipWithIndex) {
dataset(index).write.mode("overwrite").parquet(outPath + "/parquet/" + name)
dataset(index).write.mode("overwrite").csv(outPath + "/csv/" + name)
}
val t1 = System.currentTimeMillis
println("Elapsed time : " + ((t1 - t0).toFloat / 1000) + "s")
sparkSession.stop()
```
| true |
code
| 0.413655 | null | null | null | null |
|
# Calculate Shapley values
Shapley values as used in coalition game theory were introduced by William Shapley in 1953.
[Scott Lundberg](http://scottlundberg.com/) applied Shapley values for calculating feature importance in [2017](http://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions.pdf).
If you want to read the paper, I recommend reading:
Abstract, 1 Introduction, 2 Additive Feature Attribution Methods, (skip 2.1, 2.2, 2.3), and 2.4 Classic Shapley Value Estimation.
Lundberg calls this feature importance method "SHAP", which stands for SHapley Additive exPlanations.
Here’s the formula for calculating Shapley values:
$ \phi_{i} = \sum_{S \subseteq M \setminus i} \frac{|S|! (|M| - |S| -1 )!}{|M|!} [f(S \cup i) - f(S)]$
A key part of this is the difference between the model’s prediction with the feature $i$, and the model’s prediction without feature $i$.
$S$ refers to a subset of features that doesn’t include the feature for which we're calculating $\phi_i$.
$S \cup i$ is the subset that includes features in $S$ plus feature $i$.
$S \subseteq M \setminus i$ in the $\Sigma$ symbol is saying, all sets $S$ that are subsets of the full set of features $M$, excluding feature $i$.
##### Options for your learning journey
* If you’re okay with just using this formula, you can skip ahead to the coding section below.
* If you would like an explanation for what this formula is doing, please continue reading here.
## Optional (explanation of this formula)
The part of the formula with the factorials calculates the number of ways to generate the collection of features, where order matters.
$\frac{|S|! (|M| - |S| -1 )!}{|M|!}$
#### Adding features to a Coalition
The following concepts come from coalition game theory, so when we say "coalition", think of it as a team, where members of the team are added, one after another, in a particular order.
Let’s imagine that we’re creating a coalition of features, by adding one feature at a time to the coalition, and including all $|M|$ features. Let’s say we have 3 features total. Here are all the possible ways that we can create this “coalition” of features.
<ol>
<li>$x_0,x_1,x_2$</li>
<li>$x_0,x_2,x_1$</li>
<li>$x_1,x_0,x_2$</li>
<li>$x_1,x_2,x_0$</li>
<li>$x_2,x_0,x_1$</li>
<li>$x_2,x_1,x_0$</li>
</ol>
Notice that for $|M| = 3$ features, there are $3! = 3 \times 2 \times 1 = 6$ possible ways to create the coalition.
#### marginal contribution of a feature
For each of the 6 ways to create a coalition, let's see how to calculate the marginal contribution of feature $x_2$.
<ol>
<li>Model’s prediction when it includes features 0,1,2, minus the model’s prediction when it includes only features 0 and 1.
$x_0,x_1,x_2$: $f(x_0,x_1,x_2) - f(x_0,x_1)$
<li>Model’s prediction when it includes features 0 and 2, minus the prediction when using only feature 0. Notice that feature 1 is added after feature 2, so it’s not included in the model.
$x_0,x_2,x_1$: $f(x_0,x_2) - f(x_0)$</li>
<li>Model's prediction including all three features, minus when the model is only given features 1 and 0.
$x_1,x_0,x_2$: $f(x_1,x_0,x_2) - f(x_1,x_0)$</li>
<li>Model's prediction when given features 1 and 2, minus when the model is only given feature 1.
$x_1,x_2,x_0$: $f(x_1,x_2) - f(x_1)$</li>
<li>Model’s prediction if it only uses feature 2, minus the model’s prediction if it has no features. When there are no features, the model’s prediction would be the average of the labels in the training data.
$x_2,x_0,x_1$: $f(x_2) - f( )$
</li>
<li>Model's prediction (same as the previous one)
$x_2,x_1,x_0$: $f(x_2) - f( )$
</li>
Notice that some of these marginal contribution calculations look the same. For example the first and third sequences, $f(x_0,x_1,x_2) - f(x_0,x_1)$ would get the same result as $f(x_1,x_0,x_2) - f(x_1,x_0)$. Same with the fifth and sixth. So we can use factorials to help us calculate the number of permutations that result in the same marginal contribution.
#### break into 2 parts
To get to the formula that we saw above, we can break up the sequence into two sections: the sequence of features before adding feature $i$; and the sequence of features that are added after feature $i$.
For the set of features that are added before feature $i$, we’ll call this set $S$. For the set of features that are added after feature $i$ is added, we’ll call this $Q$.
So, given the six sequences, and that feature $i$ is $x_2$ in this example, here’s what set $S$ and $Q$ are for each sequence:
<ol>
<li>$x_0,x_1,x_2$: $S$ = {0,1}, $Q$ = {}</li>
<li>$x_0,x_2,x_1$: $S$ = {0}, $Q$ = {1} </li>
<li>$x_1,x_0,x_2$: $S$ = {1,0}, $Q$ = {} </li>
<li>$x_1,x_2,x_0$: $S$ = {1}, $Q$ = {0} </li>
<li>$x_2,x_0,x_1$: $S$ = {}, $Q$ = {0,1} </li>
<li>$x_2,x_1,x_0$: $S$ = {}, $Q$ = {1,0} </li>
</ol>
So for the first and third sequences, these have the same set S = {0,1} and same set $Q$ = {}.
Another way to calculate that there are two of these sequences is to take $|S|! \times |Q|! = 2! \times 0! = 2$.
Similarly, the fifth and sixth sequences have the same set S = {} and Q = {0,1}.
Another way to calculate that there are two of these sequences is to take $|S|! \times |Q|! = 0! \times 2! = 2$.
#### And now, the original formula
To use the notation of the original formula, note that $|Q| = |M| - |S| - 1$.
Recall that to calculate that there are 6 total sequences, we can use $|M|! = 3! = 3 \times 2 \times 1 = 6$.
We’ll divide $|S|! \times (|M| - |S| - 1)!$ by $|M|!$ to get the proportion assigned to each marginal contribution.
This is the weight that will be applied to each marginal contribution, and the weights sum to 1.
So that’s how we get the formula:
$\frac{|S|! (|M| - |S| -1 )!}{|M|!} [f(S \cup i) - f(S)]$
for each set $S \subseteq M \setminus i$
We can sum up the weighted marginal contributions for all sets $S$, and this represents the importance of feature $i$.
You’ll get to practice this in code!
```
import sys
!{sys.executable} -m pip install numpy==1.14.5
!{sys.executable} -m pip install scikit-learn==0.19.1
!{sys.executable} -m pip install graphviz==0.9
!{sys.executable} -m pip install shap==0.25.2
import sklearn
import shap
import numpy as np
import graphviz
from math import factorial
```
## Generate input data and fit a tree model
We'll create data where features 0 and 1 form the "AND" operator, and feature 2 does not contribute to the prediction (because it's always zero).
```
# AND case (features 0 and 1)
N = 100
M = 3
X = np.zeros((N,M))
X.shape
y = np.zeros(N)
X[:1 * N//4, 1] = 1
X[:N//2, 0] = 1
X[N//2:3 * N//4, 1] = 1
y[:1 * N//4] = 1
# fit model
model = sklearn.tree.DecisionTreeRegressor(random_state=0)
model.fit(X, y)
# draw model
dot_data = sklearn.tree.export_graphviz(model, out_file=None, filled=True, rounded=True, special_characters=True)
graph = graphviz.Source(dot_data)
graph
```
### Calculate Shap values
We'll try to calculate the local feature importance of feature 0.
We have 3 features, $x_0, x_1, x_2$. For feature $x_0$, determine what the model predicts with or without $x_0$.
Subsets S that exclude feature $x_0$ are:
{}
{$x_1$}
{$x_2$}
{$x_1,x_2$}
We want to see what the model predicts with feature $x_0$ compared to the model without feature $x_0$:
$f(x_0) - f( )$
$f(x_0,x_1) - f(x_1)$
$f(x_0,x_2) - f(x_2)$
$f(x_0,x_1,x_2) - f(x_1,x_2)$
## Sample data point
We'll calculate the local feature importance of a sample data point, where
feature $x_0 = 1$
feature $x_1 = 1$
feature $x_2 = 1$
```
sample_values = np.array([1,1,1])
print(f"sample values to calculate local feature importance on: {sample_values}")
```
## helper function
To make things easier, we'll use a helper function that takes the entire feature set M, and also a list of the features (columns) that we want, and puts them together into a 2D array.
```
def get_subset(X, feature_l):
"""
Given a 2D array containing all feature columns,
and a list of integers representing which columns we want,
Return a 2D array with just the subset of features desired
"""
cols_l = []
for f in feature_l:
cols_l.append(X[:,f].reshape(-1,1))
return np.concatenate(cols_l, axis=1)
# try it out
tmp = get_subset(X,[0,2])
tmp[0:10]
```
## helper function to calculate permutation weight
This helper function calculates
$\frac{|S|! (|M| - |S| - 1)!}{|M|!}$
```
from math import factorial
def calc_weight(size_S, num_features):
return factorial(size_S) * factorial(num_features - size_S - 1) / factorial(num_features)
```
Try it out when size of S is 2 and there are 3 features total.
The answer should be equal to $\frac{2! \times (3-2-1)!}{3!} = \frac{2 \times 1}{6} = \frac{1}{3}$
```
calc_weight(size_S=2,num_features=3)
```
## case A
Calculate the prediction of a model that uses features 0 and 1
Calculate the prediction of a model that uses feature 1
Calculate the difference (the marginal contribution of feature 0)
$f(x_0,x_1) - f(x_1)$
#### Calculate $f(x_0,x_1)$
```
# S_union_i
S_union_i = get_subset(X,[0,1])
# fit model
f_S_union_i = sklearn.tree.DecisionTreeRegressor()
f_S_union_i.fit(S_union_i, y)
```
Remember, for the sample input for which we'll calculate feature importance, we chose values of 1 for all features.
```
# This will throw an error
try:
f_S_union_i.predict(np.array([1,1]))
except Exception as e:
print(e)
```
The error message says:
>Reshape your data either using array.reshape(-1, 1) if your data has a single feature or array.reshape(1, -1) if it contains a single sample.
So we'll reshape the data so that it represents a sample (a row), which means it has 1 row and 1 or more columns.
```
# feature 0 and feature 1 are both 1 in the sample input
sample_input = np.array([1,1]).reshape(1,-1)
sample_input
```
The prediction of the model when it has features 0 and 1 is:
```
pred_S_union_i = f_S_union_i.predict(sample_input)
pred_S_union_i
```
When feature 0 and feature 1 are both 1, the prediction of the model is 1
#### Calculate $f(x_1)$
```
# S
S = get_subset(X,[1])
f_S = sklearn.tree.DecisionTreeRegressor()
f_S.fit(S, y)
```
The sample input for feature 1 is 1.
```
sample_input = np.array([1]).reshape(1,-1)
```
The model's prediction when it is only training on feature 1 is:
```
pred_S = f_S.predict(sample_input)
pred_S
```
When feature 1 is 1, then the prediction of this model is 0.5. If you look at the data in X, this makes sense, because when feature 1 is 1, half of the time, the label in y is 0, and half the time, the label in y is 1. So on average, the prediction is 0.5
#### Calculate difference
```
diff_A = pred_S_union_i - pred_S
diff_A
```
#### Calculate the weight
Calculate the weight assigned to the marginal contribution. In this case, if this marginal contribution occurs 1 out of the 6 possible permutations of the 3 features, then its weight is 1/6
```
size_S = S.shape[1] # should be 1
weight_A = calc_weight(size_S, M)
weight_A # should be 1/6
```
## Quiz: Case B
Calculate the prediction of a model that uses features 0 and 2
Calculate the prediction of a model that uses feature 2
Calculate the difference
$f(x_0,x_2) - f(x_2)$
#### Calculate $f(x_0,x_2)$
```
# TODO
S_union_i = get_subset(X,[0,2])
f_S_union_i = sklearn.tree.DecisionTreeRegressor()
f_S_union_i.fit(S_union_i, y)
sample_input = np.array([1,1]).reshape(1,-1)
pred_S_union_i = f_S_union_i.predict(sample_input)
pred_S_union_i
```
Since we're using features 0 and 2, and feature 2 doesn't help with predicting the output, then the model really just depends on feature 0. When feature 0 is 1, half of the labels are 0, and half of the labels are 1. So the average prediction is 0.5
#### Calculate $f(x_2)$
```
# TODO
S = get_subset(X,[2])
f_S = sklearn.tree.DecisionTreeRegressor()
f_S.fit(S, y)
sample_input = np.array([1]).reshape(1,-1)
pred_S = f_S.predict(sample_input)
pred_S
```
Since feature 2 doesn't help with predicting the labels in y, and feature 2 is 0 for all 100 training observations, then the prediction of the model is the average of all 100 training labels. 1/4 of the labels are 1, and the rest are 0. So that prediction is 0.25
#### Calculate the difference in predictions
```
# TODO
diff_B = pred_S_union_i - pred_S
diff_B
```
#### Calculate the weight
```
# TODO
size_S = S.shape[1] # is 1
weight_B = calc_weight(size_S, M)
weight_B # should be 1/6
```
# Quiz: Case C
Calculate the prediction of a model that uses features 0,1 and 2
Calculate the prediction of a model that uses feature 1 and 2
Calculate the difference
$f(x_0,x_1,x_2) - f(x_1,x_2)$
#### Calculate $f(x_0,x_1,x_2) $
```
# TODO
S_union_i = get_subset(X,[0,1,2])
f_S_union_i = sklearn.tree.DecisionTreeRegressor()
f_S_union_i.fit(S_union_i, y)
sample_input = np.array([1,1,1]).reshape(1,-1)
pred_S_union_i = f_S_union_i.predict(sample_input)
pred_S_union_i
```
When we use all three features, the model is able to predict that if feature 0 and feature 1 are both 1, then the label is 1.
#### Calculate $f(x_1,x_2)$
```
# TODO
S = get_subset(X,[1,2])
f_S = sklearn.tree.DecisionTreeRegressor()
f_S.fit(S, y)
sample_input = np.array([1,1]).reshape(1,-1)
pred_S = f_S.predict(sample_input)
pred_S
```
When the model is trained on features 1 and 2, then its training data tells it that half of the time, when feature 1 is 1, the label is 0; and half the time, the label is 1. So the average prediction of the model is 0.5
#### Calculate difference in predictions
```
# TODO
diff_C = pred_S_union_i - pred_S
diff_C
```
#### Calculate weights
```
# TODO
size_S = S.shape[1]
weight_C = calc_weight(size_S,M) # should be 2 / 6 = 1/3
weight_C
```
## Quiz: case D: remember to include the empty set!
The empty set is also a set. We'll compare how the model does when it has no features, and see how that compares to when it gets feature 0 as input.
Calculate the prediction of a model that uses features 0.
Calculate the prediction of a model that uses no features.
Calculate the difference
$f(x_0) - f()$
#### Calculate $f(x_0)$
```
# TODO
S_union_i = get_subset(X,[0])
f_S_union_i = sklearn.tree.DecisionTreeRegressor()
f_S_union_i.fit(S_union_i, y)
sample_input = np.array([1]).reshape(1,-1)
pred_S_union_i = f_S_union_i.predict(sample_input)
pred_S_union_i
```
With just feature 0 as input, the model predicts 0.5
#### Calculate $f()$
**hint**: you don't have to fit a model, since there are no features to input into the model.
```
# TODO
# with no input features, the model will predict the average of the labels, which is 0.25
pred_S = np.mean(y)
pred_S
```
With no input features, the model's best guess is the average of the labels, which is 0.25
#### Calculate difference in predictions
```
# TODO
diff_D = pred_S_union_i - pred_S
diff_D
```
#### Calculate weight
We expect this to be: 0! * (3-0-1)! / 3! = 2/6 = 1/3
```
# TODO
size_S = 0
weight_D = calc_weight(size_S,M) # weight is 1/3
weight_D
```
# Calculate Shapley value
For a single sample observation, where feature 0 is 1, feature 1 is 1, and feature 2 is 1, calculate the shapley value of feature 0 as the weighted sum of the differences in predictions.
$\phi_{i} = \sum_{S \subseteq N \setminus i} weight_S \times (f(S \cup i) - f(S))$
```
# TODO
shap_0 = # ...
shap_0
```
## Verify with the shap library
The [shap](https://github.com/slundberg/shap) library is written by Scott Lundberg, the creator of Shapley Additive Explanations.
```
sample_values = np.array([1,1,1])
shap_values = shap.TreeExplainer(model).shap_values(sample_values)
print(f"Shapley value for feature 0 that we calculated: {shap_0}")
print(f"Shapley value for feature 0 is {shap_values[0]}")
print(f"Shapley value for feature 1 is {shap_values[1]}")
print(f"Shapley value for feature 2 is {shap_values[2]}")
```
## Quiz: Does this make sense?
The shap libary outputs the shap values for features 0, 1 and 2. We can see that the shapley value for feature 0 matches what we calculated. The Shapley value for feature 1 is also given the same importance as feature 0.
* Given that the training data is simulating an AND operation, do you think these values make sense?
* Do you think feature 0 and 1 are equally important, or is one more important than the other?
* Does the importane of feature 2 make sense as well?
* How does this compare to the feature importance that's built into sci-kit learn?
## Answer
## Note
This method is general enough that it works for any model, not just trees. There is an optimized way to calculate this when the complex model being explained is a tree-based model. We'll look at that next.
## Solution
[Solution notebook](calculate_shap_solution.ipynb)
| true |
code
| 0.288757 | null | null | null | null |
|
# Allosteric pathways with current flow analysis on protein-cofactor networks
*This tutorial shows how to build and analyze networks that include protein residues and cofactors (e.g. lipids or small molecules).*
***Note***: To build and analyze a residue interaction network of the isolated protein only, just skip the steps in Section 2b and 3a, and inputs named *interactor_atom_inds_file.npy* or *additional_interactor_**.
## Citing this work
The code and developments here are described in two papers. <br>
**[1]** P.W. Kang, A.M. Westerlund, J. Shi, K. MacFarland White, A.K. Dou, A.H. Cui, J.R. Silva, L. Delemotte and J. Cui. <br>
*Calmodulin acts as a state-dependent switch to control a cardiac potassium channel opening*. 2020<br><br>
**[2]** A.M. Westerlund, O. Fleetwood, S. Perez-Conesa and L. Delemotte. <br>
*Network analysis reveals how lipids and other cofactors influence membrane protein allostery*. 2020
[1] is an applications-oriented paper describing how to analyze **residue interaction networks** of **isolated proteins**. <br>
[2] is a methods-oriented paper of how to build and analyze **residue interaction networks** that include **proteins and cofactors**.
## Short background
A residue interaction network is typically obtained from the element-wise product of two matrices: <br>
  1) Contact map. <br>
  2) Correlation (of node fluctuations) map.
For protein residue interaction networks, the node fluctuations correspond to protein residue fluctuations around an equilibrium position [1]. The method used to build contact and correlation maps which include cofactor nodes is described in details in [2].
### Contact map
The contact map here is defined using a truncated Gaussian kernel $K$ to smooth the contacts. For a frame with given a distance $d$ between two nodes
$$
K(d) =
\begin{cases}
1 & \text{if } d \le c \\
\exp (-\frac{d^2}{2\sigma^2}) / \exp (-\frac{c^2}{2\sigma^2}) & \text{otherwise}
\end{cases}
$$
By default, $c=0.45$ nm and $\sigma=0.138$ nm. <br>
The cutoff, $c=0.45$, ensures a contact if $d \le 4.5$ Å. The standard deviation, $\sigma=0.138$, is chosen such that $K(d=0.8 \text{ nm}) = 10^{-5}$. <br><br>
The final contact map is averaged over frames.
### Correlation map
The correlation of node (protein residues in the case of isolated proteins) fluctuations is calculated using mutual information.
$$
M_{ij} = H_i + H_j - H_{ij},
$$
where
$$
H_i = -\int\limits_X \rho(x)\ln \rho(x).
$$
$\rho_i(x)$ is the density of distances from the node equilibrium position. This is estimated with Gaussian mixture models and the Bayesian information criterion model selection.
### Including cofactors in the network
Cofactors, such as lipids and small molecules, are treated slighlty differently than protein residues. The details are described in [2]. Practically, cofactors are processesed and added to the network in separate steps than the protein residues. The network nodes that represent cofactors are called *interactor nodes*. The following is needed to add cofactors in the network:
1. **Trajectory (and .pdb file) with protein and cofactors**: If the trajectory is centered on the protein, make sure that the other molecules are not split across simulation box boundaries. In gromacs, for example, this may be avoided in *gmx trjconv* by using the option *-pbc res*. <br>
2. **Definition of interactors**: A cofactor may be described by one or several *interactors*. An interactor could e.g. be the lipid head group. We therefore have to specify which cofactor atoms form an interactor. More details are found in Section 2b. <br>
3. **Contact map and fluctuations**: The practical details are outlined in Sections 2b and 3a.
### Current flow analysis
The networks are analyzed using a current flow analysis [3,4] framework. The code supports both current flow betweenness and current flow closeness analysis. In short, the current flow computes the net diffusion along edges between network nodes. The net throughput of a node is given by the sum over edges.
Current flow betweenness is useful for identifying allosteric pathways [5,1]. Specifically, it shows how important each residue is for transmitting allosteric pathways from a source (allosteric site) to a sink (functional site). Current flow closeness centrality [3], instead indicates signaling efficiency within the network (using a "distance" measured in current flow).
To perform current flow analysis, you need a contact map and a similarity map (e.g. mutual information or Pearson correlation). These are computed in Section 2-3. The practical details are described in Section 4.
## Additional references
[3] U. Brandes and D. Fleischer, Springer, Berlin, Heidelberg, 2005 <br>
[4] M. E. J. Newman, Social Networks, 2005 <br>
[5] W.M. Botello-Smith and Y. Luo, J. Chem. Theory Comput., 2019
## 1. Setup
```
import allopath
import numpy as np
# Set the trajectory that should be analyzed.
structure=['input_data/my_system.pdb']
trajs=['input_data/system_traj1.dcd','input_data/system_traj2.dcd']
# Specify how many cores to run the calculations on.
n_cores=4
# Set the output directories (out_dir is where the main data will be saved,
# while out_dir_MI will contain the MI matrix data, see below on how they are used).
out_dir='Results_data/'
out_dir_MI='Results_data/MI_data/'
file_label='my_system' # Simulation label which will be appended to filenames of all written files (optional)
dt=1 # Trajectory stride (default=1)
```
## 2. Semi-binary contact maps
------------------------------------------
### 2a. Protein residue contact map
To compute the protein (only including protein residue-residue interactions) contact map we will use _ContactMap_.
***allopath.ContactMap***(**self,** *topology_file*, \**kwargs)
where *kwargs* is a dictionary with the keyword arguments (https://docs.python.org/2/glossary.html). This means that to contruct a _ContactMap_ object we have to give at least the topology_file (_structure_) as input (but in principle we want the average over a trajectory):
> CM = allopath.ContactMap(structure)
We now create a dictionary, *kwargs*, to define the named/keyword arguments that should not assume default values, such as the trajectory, ie. you may include all named input arguments that you want to modify and remove those that you wish to keep at default value.
List of input keyword parameters:
* **trajectory_files**: Input trajectory files (.xtc, .dcd, etc)
* **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory (this is complementary to *trajectory_files*).
* **file_label**: "File end name": label of the system that will be appended to the end of the produced files.
* **out_directory**: The directory where data should be written.
* **dt**: Trajectory stride.
* **query**: Atom-selection used on the trajectory, e.g. "protein and !(type H)" or "protein and name CA".
* **n_cores**: Number of jobs to run with joblib.
* **cutoff**: Cutoff value, $c$, in the truncated Gaussian kernel. For distances < cutoff, the contact will be set to one (default $c=0.45$ nm, see "Background: contact map" for definition).
* **std_dev**: Standard deviation value, $\sigma$, in the truncated Gaussian kernel. (default $\sigma=0.138$ nm => 1e-5 contact at 0.8 nm, see "Background: contact map" for definition)
* **per_frame**: Whether or not to compute contact map per frame instead of averaging over the trajectory (default=False).
* **start_frame**: Defines which frame to start calculations from. Used in combination with *per_frame*=True.
* **end_frame**: Defines which frame to end calculations at. Used in combination with *per_frame*=True.
* **ref_cmap_file**: File with reference cmap (e.g. average over all frames). Is used to make computations sparse/speed up calculation. Used in combination with *per_frame*=True.
The default values are: <br>
{'trajectory_files': '', <br>
'trajectory_file_directory': '', <br>
'dt': 1, <br>
'n_cores': 4, <br>
'out_directory': '', <br>
'file_label': '', <br>
'cutoff': 0.45, <br>
'query': 'protein and !(type H)', <br>
'start_frame': 0, <br>
'end_frame': -1, <br>
'ref_cmap_file': '', <br>
'per_frame': False, <br>
'std_dev': 0.138} <br>
Note that the trajectory files can either be given by explicitly naming them and inputting as *trajectory_files* (as we do with _trajs_, see below), or by simply inputting a directory containing all the '.xtc' or '.dcd' files that should be analyzed (*trajectory_file_directory*).
```
# Set inputs
kwargs={
'trajectory_files': trajs,
'file_label': file_label,
'out_directory': out_dir,
'dt': dt,
'n_cores': n_cores
}
# Compute contact map and write to file
CM = allopath.ContactMap(structure, **kwargs)
CM.run()
```
### 2b. Interactor node - protein residue contact map
The contact map of interactor-interactor and interactor-protein residue node contacts wille be computed using *CofactorInteractors*.
***allopath.CofactorInteractors***(**self,** *topology_file*, \**kwargs)
The *CofactorInteractors* is used to both compute the interactions that include cofactors, and the cofactor fluctuations. The cofactor fluctuations will be used as input to the MI calculations.
List of input keyword parameters to create a contact map:
* **trajectory_files**: Input trajectory files (.xtc, .dcd, etc)
* **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory.
* **file_label**: "File end name": label of the system that will be appended to the end of the produced files.
* **out_directory**: The directory where data should be written.
* **dt**: Trajectory stride.
* **cofactor_domain_selection**: A file containing cofactor-interactor selections. Each row should list the atoms that make up an interactor. Example of a domain selection file content: <br><br>
*resname POPC and name N C11 C12 C13 C14 C15 P O13 O14 O12 O11 C1 C2 <br>
resname POPC and name O21 C21 C22 O22 C23 C24 C25 C26 C27 C28 C29 C210 C211 C212 C213 C214 C215 C216 C217 C218 <br>
resname POPC and name C3 O31 C31 C32 O32 C33 C34 C35 C36 C37 C38 C39 C310 C311 C312 C313 C314 C315 C316*
<br><br>
* **cutoff**: Cutoff value, $c$, for binary residue-lipid contacts. For distances < cutoff, the contact will be set to one (default=0.45 nm).
* **std_dev**: Standard deviation value, $\sigma$, on the semi-binary Gaussian-kernel. (default=0.138 nm => 1e-5 contact at 0.8 nm)
The default values are: <br>
{'trajectory_files': '', <br>
'trajectory_file_directory': '', <br>
'dt': 1, <br>
'out_directory': '', <br>
'file_label': '', <br>
'cofactor_domain_selection': '', <br>
'cofactor_interactor_inds': '', <br>
'cofactor_interactor_coords':, '', <br>
'compute_cofactor_interactor_fluctuations': False, <br>
'cofactor_interactor_atom_inds': '', <br>
'cutoff': 0.45, <br>
'std_dev': 0.138} <br>
```
# Set inputs
cofactor_domain_selection_file='input_data/cofactor_domain_selection.txt'
kwargs={
'trajectory_files': trajs,
'file_label': file_label,
'out_directory': out_dir,
'dt': dt,
'cofactor_domain_selection': cofactor_domain_selection_file
}
# Compute contact map and write to file
CI = allopath.CofactorInteractors(structure, **kwargs)
CI.run()
```
## 3. Mutual information
-----------------------------------
To compute mutual information (MI) between nodes we use *MutualInformation* and *CofactorInteractors*.
The MI is done in **four** steps. <br>
**(a)** Computing the interactor node fluctuations using *CofactorInteractors*. These will be given as input to *MutualInformation*.<br>
**(b)** Computing the off-diagonal elements in the MI matrix using *MutualInformation*. Because this is computationally demanding, we can 1) use the contact map as input to ignore non-contacting residues and 2) split the matrix into blocks that can be processed in parallel (although we will do it in sequence in this tutorial).
> We will divide the matrix into 4 blocks along the column and 4 blocks along the rows. As we include the diagonal blocks but use symmetry on off-diagonal blocks, we get *n_matrix_block_cols=4 and *n_blocks*= n_matrix_block_cols(n_matrix_block_cols-1)/2 + n_matrix_block_cols = 10 number of blocks. The input argument *i_block* should be between 1 and *n_blocks*, denoting which block should be constructed. <br>
**(c)** Computing the diagonal elements in the MI matrix using *MutualInformation*. This requires *do_diagonal*=True as input. *Note: This is only needed if you normalize the mutual information in allopath.CurrentFlow.* (Section 4)<br>
**(d)** Building the full off-diagonal matrix based on blocks.<br><br>
*Note:* The calculations in **(b)** and **(c)** are time consuming, but they are structured so that they can be launched in parallel. **(d)** cannot be done until the calculations in **(b)** have finished.
### 3a. Computing interactor node fluctuations
The interactor fluctuations will be computed using *CofactorInteractors*.
***allopath.CofactorInteractors***(**self,** *topology_file*, \**kwargs)
As mentioned, *CofactorInteractors* is used to both compute the interactions that include cofactors, and the cofactor fluctuations. To compute interactor fluctuations, we need to set **compute_cofactor_interactor_fluctuations=True** in *kwargs*.
List of input keyword parameters to compute interactor fluctuations:
* **trajectory_files**: Input trajectory files (.xtc, .dcd, etc)
* **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory.
* **file_label**: "File end name": label of the system that will be appended to the end of the produced files.
* **out_directory**: The directory where data should be written.
* **dt**: Trajectory stride.
* **cofactor_interactor_inds**: (generated when computing the interactor node contact map).
* **cofactor_interactor_coords**:(generated when computing the interactor node contact map).
* **compute_interactor_node_fluctuations**: Whether or not to compute the fluctuations. Default is False. Set to True.
The default values are: <br>
{'trajectory_files': '', <br>
'trajectory_file_directory': '', <br>
'dt': 1, <br>
'out_directory': '', <br>
'file_label': '', <br>
'cofactor_domain_selection': '', <br>
'cofactor_interactor_inds': '', <br>
'cofactor_interactor_coords':, '', <br>
'compute_cofactor_interactor_fluctuations': False, <br>
'cofactor_interactor_atom_inds': '', <br>
'cutoff': 0.45, <br>
'std_dev': 0.138} <br>
```
# Set inputs
cofactor_interactor_inds = out_dir+'cofactor_interactor_indices_'+file_label+'.npy'
cofactor_interactor_coords = out_dir+'cofactor_interactor_coords_'+file_label+'.npy'
kwargs={
'trajectory_files': trajs,
'file_label': file_label,
'out_directory': out_dir,
'dt': dt,
'cofactor_interactor_inds': cofactor_interactor_inds,
'cofactor_interactor_coords': cofactor_interactor_coords,
'compute_interactor_node_fluctuations': True
}
# Compute interactor node fluctuations and write to file
CI = allopath.CofactorInteractors(structure, **kwargs)
CI.run()
```
### 3b. Computing off-diagonal elements
The MI matrix is obtained with *MutualInformation*.
***allopath.MutualInformation*** (**self,** *topology_file*, \**kwargs)
Similarly to *ContactMap* and *CofactorInteractors* it is in principle enough to input the structure.
> MI = allopath.MutualInformation(structure)
List of input keyword parameters:
* **trajectory_files**: Input trajectory files (.xtc, .dcd, etc)
* **trajectory_file_directory**:Input directory with trajectory files (.xtc, .dcd, etc.). This will load all trajectory files in the specified directory.
* **file_label**: "File end name": label of the system that will be appended to the end of the produced files.
* **out_directory**: The directory where data should be written.
* **dt**: Trajectory stride.
* **n_cores**: Number of jobs to run with joblib.
* **n_matrix_block_cols**: Number of blocks of the column of the MI matrix. Example: 4 blocks => 10 parts (upper triangle + diagonal). See part (a) above.
* **i_block**: The matrix block for which MI should be calculated. See part (a) above.
* **n_split_sets**: Number of sampled sets with the same size as the original data set to use for more accurate estimate of entropy. Can also be used to check unceratinty of the MI matrix.
* **additional_interactor_protein_contacts**: The interactor contact map (computed in Section 2b).
* **additional_interactor_fluctuations**: The interactor fluctuations (computed in Section 3a).
* **n_components_range:** Array with the lower and upper limit of GMM components used to estimate densities.
* **do_diagonal**: Whether or not to compute diagonal of residue-residue mutual information (default=False).
The default values are: <br>
{'trajectory_files': '', <br>
'trajectory_file_directory': '', <br>
'dt': 1, <br>
'out_directory': '', <br>
'file_label': '', <br>
'n_cores': -1, <br>
'contact_map_file': '', <br>
'i_block': 0, <br>
'n_matrix_block_cols': 1 <br>
'n_split_sets': 0, <br>
'additional_interactor_protein_contacts': '', <br>
'additional_interactor_fluctuations': '', <br>
'n_components_range': [1,4], <br>
'do_diagonal': False
} <br>
To compute the off-diagonal elements, we use the default *do_diagonal*=False and split the matrix into 10 blocks. We also do 10 bootstrap samplings to obtain a better entropy estimate.
```
n_blocks = 10
n_cols = 4
n_bootstraps = 10
contact_map = out_dir+'distance_matrix_semi_bin_'+file_label+'.txt'
additional_interactor_fluctuations = out_dir+'interactor_centroid_fluctuations_'+file_label+'.npy'
additional_interactor_protein_contacts = out_dir+'cofactor_protein_residue_semi_binary_cmap_'+file_label+'.npy'
n_components_range = [1,4]
for i_block in range(1,n_blocks+1):
# Set inputs
kwargs={
'trajectory_files': trajs,
'dt': dt,
'contact_map_file': contact_map,
'additional_interactor_fluctuations': additional_interactor_fluctuations,
'additional_interactor_protein_contacts': additional_interactor_protein_contacts,
'i_block': i_block,
'n_matrix_block_cols': n_cols,
'n_split_sets': n_bootstraps,
'n_components_range': n_components_range,
'file_label': file_label,
'out_directory': out_dir_MI,
'n_cores': n_cores,
}
# Compute mutual information matrix
MI = allopath.MutualInformation(structure, **kwargs)
MI.run()
```
### 3c. Computing diagonal elements
To estimate the diagonal elements, we use the same inputs as above except setting *do_diagonal*=True. Moreover, the matrix is not divided into blocks since the diagonal is much faster to compute.
***Note:*** *This step is only needed if you choose to normalize the mutual information in allopath.CurrentFlow (Section 4).*
```
# Set inputs
kwargs={
'trajectory_files': trajs,
'dt': dt,
'additional_interactor_fluctuations': additional_interactor_fluctuations,
'n_split_sets': n_bootstraps,
'file_label': file_label,
'out_directory': out_dir_MI,
'n_components_range': n_components_range,
'n_cores': n_cores,
'do_diagonal': True
}
# Compute diagonal of the MI matrix
MI = allopath.MutualInformation(structure, **kwargs)
MI.run()
```
### 3d. Building matrix from blocks
Next, the full MI matrix is built.
***allopath.from_matrix.build_matrix*** (*base_file_name*, *n_blocks*, file_label='', out_directory='')
We use the same parameters as above.
List of input parameters:
* **base_file_name**: the base name of each file to be processed. This is given by *base_file_name*=*path_to_data*+'res_res_MI_part_' .
* **n_blocks**: Total number of generated matrix blocks.
* **file_label**: "File end name": label of the system that will be appended to the end of the produced files (default is '').
* **out_directory**: The directory where data should be written to (default is '').
The input *base_file_name* is named after the files in "Results_data/MI_data/".
```
base_file_name=out_dir+'MI_data/res_res_MI_part_'
# Set inputs
kwargs={
'file_label': file_label,
'out_directory': out_dir+'MI_data/'
}
# Build matrix
allopath.from_matrix_blocks.build_matrix(base_file_name, n_blocks, **kwargs)
```
## 4. Current flow analysis
-----------------------------------
Current flow analysis is done with *CurrentFlow*.
***allopath.CurrentFlow*** (**self,** *similarity_map_filename*, *contact_map_filenames*, *sources_filename*, *sinks_filename*, \**kwargs)
To run current flow analysis in its simplest form, the files containing the similarity map (ie. our MI matrix), the contact map and the source and sink indices are needed.
> allopath.CurrentFlow(similarity_map_filename, contact_map_filename, sources_filename, sinks_filename)
Explanation of input (positional) parameters:
* **similarity_map_filename**: File containing the similarity map (ie. the mutual information matrix).
* **contact_map_filenames**: File containing the contact map(s). If multiple are given, one current flow profile per contact map will be computed (*Note: multiple network calculations are only supported for isolated-protein networks*).
* **sources_filename**: File containing the residue indices of the sources.
* **sinks_filenams**: File containing the residues indices of the sinks.
Explanation of input keyword parameters:
* **similarity_map_diagonal_filename**: File containing the diagonal elements of the mutual information matrix.
* **additional_interactor_protein_contacts**: The interactor contact map (computed in Section 2b).
* **out_directory**: The directory where data should be written.
* **file_label**: "File end name": label of the system that will be appended to the end of the produced files.
* **n_chains**: The number of (homomeric) chains/subunits in the main-protein (e.g. a tetrameric ion channel => n_chains = 4).
* **n_cores**: Number of jobs to run with joblib.
* **cheap_write**: If set to True, fewer files will be written.
* **start_frame**: Used if multiple contact maps are supplied. *start_frame* is the index of the first frame to analyze.
* **normalize_similarity_map**: Whether or not to normalize the similarity map with symmetric unertainty (*Note: applies to mutual information maps; Witten & Frank, 2005*)
* **auxiliary_protein_indices**: Residue indices of auxiliary subunits. This is used when symmeterizing current flow over subunits (chains). The auxiliary subunits will also be averaged over chains, ie. one auxiliary subunit per chain is assumed. If there is no auxiliary subunit, just ignore this input to the current flow script.
* **compute_current_flow_closeness**: Whether or not to compute current flow closeness instead of current flow betweenness.
The default values are: <br>
{'out_directory': '', <br>
'file_label': '', <br>
'similarity_map_diagonal_filename': '', <br>
'n_chains': 1, <br>
'n_cores': 1, <br>
'cheap_write': False, <br>
'start_frame': 0, <br>
'normalize_similarity_map': False, <br>
'auxiliary_protein_indices': '', <br>
'additional_interactor_protein_contacts': '', <br>
'compute_current_flow_closeness': False } <br>
```
similarity_map = out_dir+'MI_data/res_res_MI_compressed_'+file_label+'.npy'
similarity_map_diagonal = out_dir+'MI_data/diagonal_MI_'+file_label+'.npy'
contact_maps = [out_dir+'distance_matrix_semi_bin_'+file_label+'.txt']
additional_interactor_protein_contacts = out_dir+'cofactor_protein_residue_semi_binary_cmap_'+file_label+'.npy'
n_chains=4
source_inds='input_data/inds_sources.txt'
sink_inds='input_data/inds_sinks.txt'
aux_inds='input_data/auxiliary_prot_inds.txt'
compute_current_flow_closeness = False # False (ie. default) => will compute current flow betweenness.
# Set this to True to compute current flow closeness centrality between each
# source and all sinks instead.
kwargs={
'file_label': file_label,
'out_directory': out_dir,
'n_chains': n_chains,
'n_cores': n_cores,
'similarity_map_diagonal_filename': similarity_map_diagonal,
'normalize_similarity_map': False,
'auxiliary_protein_indices': aux_inds,
'additional_interactor_protein_contacts': additional_interactor_protein_contacts,
'compute_current_flow_closeness': compute_current_flow_closeness
}
CF = allopath.CurrentFlow(similarity_map, contact_maps, source_inds, sink_inds, **kwargs)
CF.run()
```
## 5. Project current flow on structure
----------------------------------------------------
As a last step, we project the current flow onto the structure (PDB file) with *make_pdb*. The current flow of ech residue will be mapped to the beta-column in the PDB. This can be visualized in VMD by setting the "Coloring method" to "Beta" in "Graphical Representations".
> ***allopath.make_pdb.project_current_flow***(*pdb_file*, *current_flow_file*, \**kwargs)
Explanation of input (positional arguments) parameters:
* **pdb_file**: The .pdb file corresponding to the first trajectory frame. *Note: .gro does not work.*
* **current_flow_file**: File containing the current flow. This is created by *CurrentFlow*, Section 4. **Note:** For homomultimers (using *n_chains > 1* in *CurrentFlow*), the file is *out_dir+'average_current_flow_'+file_label+'.npy'*. For *n_chains = 1*, the file is *out_dir+'current_flow_betweenness_'+file_label+'.npy'*.
Explanation of input keyword arguments:
* **out_directory**: The directory where pdb should be written.
* **file_label**: "File end name": label of the system that will be appended to the end of the produced pdb.
* **max_min_normalize**: Whether or not to scale the current flow between 0 and 1.
* **interactor_atom_inds_file**: The atom indices used to define the interactors (generated in Section 2b).
The default values are: <br>
{'out_directory': '', <br>
'file_label': '', <br>
'max_min_normalize': False,<br>
'interactor_atom_inds_file': None }
```
out_file = out_dir+'PDBs/current_flow_'+file_label+'.pdb'
current_flow = out_dir+'average_current_flow_'+file_label+'.npy'
interactor_atom_inds_file = out_dir+'cofactor_interactor_atom_indices_'+file_label+'.npy'
kwargs={
'out_directory': out_dir+'PDBs/',
'file_label': file_label,
'interactor_atom_inds_file': interactor_atom_inds_file
}
# Create PDB with current flow values on the beta column
allopath.make_pdb.project_current_flow(structure[0], current_flow, **kwargs)
```
| true |
code
| 0.616647 | null | null | null | null |
|
---
_You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-machine-learning/resources/bANLa) course resource._
---
## Applied Machine Learning, Module 1: A simple classification task
### Import required modules and load data file
```
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
fruits = pd.read_table('fruit_data_with_colors.txt')
fruits.head()
# create a mapping from fruit label value to fruit name to make results easier to interpret
lookup_fruit_name = dict(zip(fruits.fruit_label.unique(), fruits.fruit_name.unique()))
lookup_fruit_name
```
The file contains the mass, height, and width of a selection of oranges, lemons and apples. The heights were measured along the core of the fruit. The widths were the widest width perpendicular to the height.
### Examining the data
```
# plotting a scatter matrix
from matplotlib import cm
X = fruits[['height', 'width', 'mass', 'color_score']]
y = fruits['fruit_label']
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
cmap = cm.get_cmap('gnuplot')
scatter = pd.scatter_matrix(X_train, c= y_train, marker = 'o', s=40, hist_kwds={'bins':15}, figsize=(9,9), cmap=cmap)
# plotting a 3D scatter plot
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection = '3d')
ax.scatter(X_train['width'], X_train['height'], X_train['color_score'], c = y_train, marker = 'o', s=100)
ax.set_xlabel('width')
ax.set_ylabel('height')
ax.set_zlabel('color_score')
plt.show()
```
### Create train-test split
```
# For this example, we use the mass, width, and height features of each fruit instance
X = fruits[['mass', 'width', 'height']]
y = fruits['fruit_label']
# default is 75% / 25% train-test split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
```
### Create classifier object
```
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 5)
```
### Train the classifier (fit the estimator) using the training data
```
knn.fit(X_train, y_train)
```
### Estimate the accuracy of the classifier on future data, using the test data
```
knn.score(X_test, y_test)
```
### Use the trained k-NN classifier model to classify new, previously unseen objects
```
# first example: a small fruit with mass 20g, width 4.3 cm, height 5.5 cm
fruit_prediction = knn.predict([[20, 4.3, 5.5]])
lookup_fruit_name[fruit_prediction[0]]
# second example: a larger, elongated fruit with mass 100g, width 6.3 cm, height 8.5 cm
fruit_prediction = knn.predict([[100, 6.3, 8.5]])
lookup_fruit_name[fruit_prediction[0]]
```
### Plot the decision boundaries of the k-NN classifier
```
from adspy_shared_utilities import plot_fruit_knn
plot_fruit_knn(X_train, y_train, 5, 'uniform') # we choose 5 nearest neighbors
```
### How sensitive is k-NN classification accuracy to the choice of the 'k' parameter?
```
k_range = range(1,20)
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(X_train, y_train)
scores.append(knn.score(X_test, y_test))
plt.figure()
plt.xlabel('k')
plt.ylabel('accuracy')
plt.scatter(k_range, scores)
plt.xticks([0,5,10,15,20]);
```
### How sensitive is k-NN classification accuracy to the train/test split proportion?
```
t = [0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2]
knn = KNeighborsClassifier(n_neighbors = 5)
plt.figure()
for s in t:
scores = []
for i in range(1,1000):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 1-s)
knn.fit(X_train, y_train)
scores.append(knn.score(X_test, y_test))
plt.plot(s, np.mean(scores), 'bo')
plt.xlabel('Training set proportion (%)')
plt.ylabel('accuracy');
```
| true |
code
| 0.653431 | null | null | null | null |
|
# Data pre-processing steps
1. Remove columns that contain "Call" data
2. Transpose the dataframe so that each row is a patient and each column is a gene
3. Remove gene description and set the gene accession numbers as the column headers
4. Merge the data (expression values) with the class labels (patient numbers)
```
import itertools
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
testfile='../input/data_set_ALL_AML_independent.csv'
trainfile='../input/data_set_ALL_AML_train.csv'
patient_cancer='../input/actual.csv'
train = pd.read_csv(trainfile)
test = pd.read_csv(testfile)
patient_cancer = pd.read_csv(patient_cancer)
train.head()
# Remove "call" columns from training a test dataframes
train_keepers = [col for col in train.columns if "call" not in col]
test_keepers = [col for col in test.columns if "call" not in col]
train = train[train_keepers]
test = test[test_keepers]
train.head()
# Transpose the columns and rows so that genes become features and rows become observations
train = train.T
test = test.T
train.head()
# Clean up the column names for training data
train.columns = train.iloc[1]
train = train.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
# Clean up the column names for training data
test.columns = test.iloc[1]
test = test.drop(["Gene Description", "Gene Accession Number"]).apply(pd.to_numeric)
train.head()
```
### Combine the data (gene expression) with class labels (patient numbers)
```
# Reset the index. The indexes of two dataframes need to be the same before you combine them
train = train.reset_index(drop=True)
# Subset the first 38 patient's cancer types
pc_train = patient_cancer[patient_cancer.patient <= 38].reset_index(drop=True)
# Combine dataframes for first 38 patients: Patient number + cancer type + gene expression values
train = pd.concat([pc_train,train], axis=1)
# Handle the test data for patients 38 through 72
# Clean up the index
test = test.reset_index(drop=True)
# Subset the last patient's cancer types to test
pc_test = patient_cancer[patient_cancer.patient > 38].reset_index(drop=True)
# Combine dataframes for last patients: Patient number + cancer type + gene expression values
test = pd.concat([pc_test,test], axis=1)
```
# EDA
---
There's a bunch of data, so to speed things up, only using a small sample of the training data for the EDA.
```
sample = train.iloc[:,2:].sample(n=100, axis=1)
sample["cancer"] = train.cancer
sample.describe().round()
from sklearn import preprocessing
```
### Distribution of the random sample before standardizing
---
```
sample = sample.drop("cancer", axis=1)
sample.plot(kind="hist", legend=None, bins=20, color='k')
sample.plot(kind="kde", legend=None);
```
### Distribution of the random sample after standardizing
---
```
sample_scaled = pd.DataFrame(preprocessing.scale(sample))
sample_scaled.plot(kind="hist", normed=True, legend=None, bins=10, color='k')
sample_scaled.plot(kind="kde", legend=None);
```
# Process the full set
---
```
# StandardScaler to remove mean and scale to unit variance
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(train.iloc[:,2:])
scaled_train = scaler.transform(train.iloc[:,2:])
scaled_test = scaler.transform(test.iloc[:,2:])
x_train = train.iloc[:,2:]
y_train = train.iloc[:,1]
x_test = test.iloc[:,2:]
y_test = test.iloc[:,1]
```
# Classifiers
---
```
# Grid Search for tuning parameters
from sklearn.model_selection import GridSearchCV
# RandomizedSearch for tuning (possibly faster than GridSearch)
from sklearn.model_selection import RandomizedSearchCV
# Bayessian optimization supposedly faster than GridSearch
from bayes_opt import BayesianOptimization
# Metrics
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, log_loss
## Models
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
```
# Helper functions
```
# CHERCHEZ FOR PARAMETERS
def cherchez(estimator, param_grid, search):
"""
This is a helper function for tuning hyperparameters using teh two search methods.
Methods must be GridSearchCV or RandomizedSearchCV.
Inputs:
estimator: Logistic regression, SVM, KNN, etc
param_grid: Range of parameters to search
search: Grid search or Randomized search
Output:
Returns the estimator instance, clf
"""
try:
if search == "grid":
clf = GridSearchCV(
estimator=estimator,
param_grid=param_grid,
scoring=None,
n_jobs=-1,
cv=10,
verbose=0,
return_train_score=True
)
elif search == "random":
clf = RandomizedSearchCV(
estimator=estimator,
param_distributions=param_grid,
n_iter=10,
n_jobs=-1,
cv=10,
verbose=0,
random_state=1,
return_train_score=True
)
except:
print('Search argument has to be "grid" or "random"')
sys.exit(0)
# Fit the model
clf.fit(X=scaled_train, y=y_train)
return clf
# Function for plotting the confusion matrices
def plot_confusion_matrix(cm, title="Confusion Matrix"):
"""
Plots the confusion matrix. Modified verison from
http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
Inputs:
cm: confusion matrix
title: Title of plot
"""
classes=["AML", "ALL"]
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.bone)
plt.title(title)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
plt.ylabel('Actual')
plt.xlabel('Predicted')
thresh = cm.mean()
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] < thresh else "black")
```
# Models being tested
1. Logisitc Regresison
- Using Grid search and Randomized search for tuning hyperparameters
2. C-Support Vector Classification (SVM)
- Using Grid search and Randomized search for tuning hyperparameters
3. K-Nearest Neighbors Classifier
- Using Grid search and Randomized search for tuning hyperparameters
4. Decision Tree Classifier
- Using only Grid search
```
# Logistic Regression
# Paramaters
logreg_params = {}
logreg_params["C"] = [0.01, 0.1, 10, 100]
logreg_params["fit_intercept"] = [True, False]
logreg_params["warm_start"] = [True,False]
logreg_params["random_state"] = [1]
lr_dist = {}
lr_dist["C"] = scipy.stats.expon(scale=.01)
lr_dist["fit_intercept"] = [True, False]
lr_dist["warm_start"] = [True,False]
lr_dist["random_state"] = [1]
logregression_grid = cherchez(LogisticRegression(), logreg_params, search="grid")
acc = accuracy_score(y_true=y_test, y_pred=logregression_grid.predict(scaled_test))
cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=logregression_grid.predict(scaled_test))
print("**Grid search results**")
print("Best training accuracy:\t", logregression_grid.best_score_)
print("Test accuracy:\t", acc)
logregression_random = cherchez(LogisticRegression(), lr_dist, search="random")
acc = accuracy_score(y_true=y_test, y_pred=logregression_random.predict(scaled_test))
cfmatrix_rand = confusion_matrix(y_true=y_test, y_pred=logregression_random.predict(scaled_test))
print("**Random search results**")
print("Best training accuracy:\t", logregression_random.best_score_)
print("Test accuracy:\t", acc)
plt.subplots(1,2)
plt.subplots_adjust(left=-0.5, bottom=None, right=None, top=None, wspace=0.5, hspace=None)
plot_confusion_matrix(cfmatrix_rand, title="Random Search Confusion Matrix")
plt.subplot(121)
plot_confusion_matrix(cfmatrix_grid, title="Grid Search Confusion Matrix")
# SVM
svm_param = {
"C": [.01, .1, 1, 5, 10, 100],
"gamma": [0, .01, .1, 1, 5, 10, 100],
"kernel": ["rbf"],
"random_state": [1]
}
svm_dist = {
"C": scipy.stats.expon(scale=.01),
"gamma": scipy.stats.expon(scale=.01),
"kernel": ["rbf"],
"random_state": [1]
}
svm_grid = cherchez(SVC(), svm_param, "grid")
acc = accuracy_score(y_true=y_test, y_pred=svm_grid.predict(scaled_test))
cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=svm_grid.predict(scaled_test))
print("**Grid search results**")
print("Best training accuracy:\t", svm_grid.best_score_)
print("Test accuracy:\t", acc)
svm_random = cherchez(SVC(), svm_dist, "random")
acc = accuracy_score(y_true=y_test, y_pred=svm_random.predict(scaled_test))
cfmatrix_rand = confusion_matrix(y_true=y_test, y_pred=svm_random.predict(scaled_test))
print("**Random search results**")
print("Best training accuracy:\t", svm_random.best_score_)
print("Test accuracy:\t", acc)
plt.subplots(1,2)
plt.subplots_adjust(left=-0.5, bottom=None, right=None, top=None, wspace=0.5, hspace=None)
plot_confusion_matrix(cfmatrix_rand, title="Random Search Confusion Matrix")
plt.subplot(121)
plot_confusion_matrix(cfmatrix_grid, title="Grid Search Confusion Matrix")
# KNN
knn_param = {
"n_neighbors": [i for i in range(1,30,5)],
"weights": ["uniform", "distance"],
"algorithm": ["ball_tree", "kd_tree", "brute"],
"leaf_size": [1, 10, 30],
"p": [1,2]
}
knn_dist = {
"n_neighbors": scipy.stats.randint(1,33),
"weights": ["uniform", "distance"],
"algorithm": ["ball_tree", "kd_tree", "brute"],
"leaf_size": scipy.stats.randint(1,1000),
"p": [1,2]
}
knn_grid = cherchez(KNeighborsClassifier(), knn_param, "grid")
acc = accuracy_score(y_true=y_test, y_pred=knn_grid.predict(scaled_test))
cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=svm_grid.predict(scaled_test))
print("**Grid search results**")
print("Best training accuracy:\t", knn_grid.best_score_)
print("Test accuracy:\t", acc)
knn_random = cherchez(KNeighborsClassifier(), knn_dist, "random")
acc = accuracy_score(y_true=y_test, y_pred=knn_random.predict(scaled_test))
cfmatrix_rand = confusion_matrix(y_true=y_test, y_pred=knn_random.predict(scaled_test))
print("**Random search results**")
print("Best training accuracy:\t", knn_random.best_score_)
print("Test accuracy:\t", acc)
plt.subplots(1,2)
plt.subplots_adjust(left=-0.5, bottom=None, right=None, top=None, wspace=0.5, hspace=None)
plot_confusion_matrix(cfmatrix_rand, title="Random Search Confusion Matrix")
plt.subplot(121)
plot_confusion_matrix(cfmatrix_grid, title="Grid Search Confusion Matrix")
# Decision tree classifier
dtc_param = {
"max_depth": [None],
"min_samples_split": [2],
"min_samples_leaf": [1],
"min_weight_fraction_leaf": [0.],
"max_features": [None],
"random_state": [4],
"max_leaf_nodes": [None], # None = infinity or int
"presort": [True, False]
}
dtc_grid = cherchez(DecisionTreeClassifier(), dtc_param, "grid")
acc = accuracy_score(y_true=y_test, y_pred=dtc_grid.predict(scaled_test))
cfmatrix_grid = confusion_matrix(y_true=y_test, y_pred=dtc_grid.predict(scaled_test))
print("**Grid search results**")
print("Best training accuracy:\t", dtc_grid.best_score_)
print("Test accuracy:\t", acc)
plot_confusion_matrix(cfmatrix_grid, title="Decision Tree Confusion Matrix")
```
| true |
code
| 0.67814 | null | null | null | null |
|
# Math Part 1
```
from __future__ import print_function
import tensorflow as tf
import numpy as np
from datetime import date
date.today()
author = "kyubyong. https://github.com/Kyubyong/tensorflow-exercises"
tf.__version__
np.__version__
sess = tf.InteractiveSession()
```
NOTE on notation
* _x, _y, _z, ...: NumPy 0-d or 1-d arrays
* _X, _Y, _Z, ...: NumPy 2-d or higer dimensional arrays
* x, y, z, ...: 0-d or 1-d tensors
* X, Y, Z, ...: 2-d or higher dimensional tensors
## Arithmetic Operators
Q1. Add x and y element-wise.
```
_x = np.array([1, 2, 3])
_y = np.array([-1, -2, -3])
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
Q2. Subtract y from x element-wise.
```
_x = np.array([3, 4, 5])
_y = np.array(3)
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
Q3. Multiply x by y element-wise.
```
_x = np.array([3, 4, 5])
_y = np.array([1, 0, -1])
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
Q4. Multiply x by 5 element-wise.
```
_x = np.array([1, 2, 3])
x = tf.convert_to_tensor(_x)
```
Q5. Predict the result of this.
```
_x = np.array([10, 20, 30], np.int32)
_y = np.array([2, 3, 5], np.int32)
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
out1 = tf.div(x, y)
out2 = tf.truediv(x, y)
print(np.array_equal(out1.eval(), out2.eval()))
print(out1.eval(), out1.eval().dtype) # tf.div() returns the same results as input tensors.
print(out2.eval(), out2.eval().dtype)# tf.truediv() always returns floating point results.
```
Q6. Get the remainder of x / y element-wise.
```
_x = np.array([10, 20, 30], np.int32)
_y = np.array([2, 3, 7], np.int32)
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
Q7. Compute the pairwise cross product of x and y.
```
_x = np.array([1, 2, 3], np.int32)
_y = np.array([4, 5, 6], np.int32)
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
## Basic Math Functions
Q8. Add x, y, and z element-wise.
```
_x = np.array([1, 2, 3], np.int32)
_y = np.array([4, 5, 6], np.int32)
_z = np.array([7, 8, 9], np.int32)
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
z = tf.convert_to_tensor(_y)
```
Q9. Compute the absolute value of X element-wise.
```
_X = np.array([[1, -1], [3, -3]])
X = tf.convert_to_tensor(_X)
```
Q10. Compute numerical negative value of x, elemet-wise.
```
_x = np.array([1, -1])
x = tf.convert_to_tensor(_x)
```
Q11. Compute an element-wise indication of the sign of x, element-wise.
```
_x = np.array([1, 3, 0, -1, -3])
x = tf.convert_to_tensor(_x)
```
Q12. Compute the reciprocal of x, element-wise.
```
_x = np.array([1, 2, 2/10])
x = tf.convert_to_tensor(_x)
```
Q13. Compute the square of x, element-wise.
```
_x = np.array([1, 2, -1])
x = tf.convert_to_tensor(_x)
```
Q14. Predict the results of this, paying attention to the difference among the family functions.
```
_x = np.array([2.1, 1.5, 2.5, 2.9, -2.1, -2.5, -2.9])
x = tf.convert_to_tensor(_x)
```
Q15. Compute square root of x element-wise.
```
_x = np.array([1, 4, 9], dtype=np.float32)
x = tf.convert_to_tensor(_x)
```
Q16. Compute the reciprocal of square root of x element-wise.
```
_x = np.array([1., 4., 9.])
x = tf.convert_to_tensor(_x)
```
Q17. Compute $x^y$, element-wise.
```
_x = np.array([[1, 2], [3, 4]])
_y = np.array([[1, 2], [1, 2]])
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
Q17. Compute $e^x$, element-wise.
```
_x = np.array([1., 2., 3.], np.float32)
x = tf.convert_to_tensor(_x)
```
Q18. Compute natural logarithm of x element-wise.
```
_x = np.array([1, np.e, np.e**2])
x = tf.convert_to_tensor(_x)
```
Q19. Compute the max of x and y element-wise.
```
_x = np.array([2, 3, 4])
_y = np.array([1, 5, 2])
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
Q20. Compute the min of x and y element-wise.
```
_x = np.array([2, 3, 4])
_y = np.array([1, 5, 2])
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
Q21. Compuete the sine, cosine, and tangent of x, element-wise.
```
_x = np.array([-np.pi, np.pi, np.pi/2])
x = tf.convert_to_tensor(_x)
```
Q22. Compute (x - y)(x - y) element-wise.
```
_x = np.array([2, 3, 4])
_y = np.array([1, 5, 1])
x = tf.convert_to_tensor(_x)
y = tf.convert_to_tensor(_y)
```
| true |
code
| 0.616763 | null | null | null | null |
|
# Dask pipeline
## Example: Tracking the International Space Station with Dask
In this notebook we will be using two APIs:
1. [Google Maps Geocoder](https://developers.google.com/maps/documentation/geocoding/overview)
2. [Open Notify API for ISS location](http://api.open-notify.org/)
We will use them to keep track of the ISS location and next lead time in relation to a list of cities. To create our diagrams and intelligently parallelise data, we use Dask, especially [Dask Delayed](../refactoring/performance/dask.html#Dask-Delayed).
### 1. Imports
```
import requests
import logging
import sys
import numpy as np
from time import sleep
from datetime import datetime
from math import radians
from dask import delayed
from operator import itemgetter
from sklearn.neighbors import DistanceMetric
```
### 2. Logger
```
logger = logging.getLogger()
logger.setLevel(logging.INFO)
```
### 3. Latitude and longitude pairs from a list of cities
see also [Location APIs](https://locationiq.com/)
```
def get_lat_long(address):
resp = requests.get(
'https://eu1.locationiq.org/v1/search.php',
params={'key': '92e7ba84cf3465',
'q': address,
'format': 'json'}
)
if resp.status_code != 200:
print('There was a problem with your request!')
print(resp.content)
return
data = resp.json()[0]
return {
'name': data.get('display_name'),
'lat': float(data.get('lat')),
'long': float(data.get('lon')),
}
get_lat_long('Berlin, Germany')
locations = []
for city in ['Seattle, Washington',
'Miami, Florida',
'Berlin, Germany',
'Singapore',
'Wellington, New Zealand',
'Beirut, Lebanon',
'Beijing, China',
'Nairobi, Kenya',
'Cape Town, South Africa',
'Buenos Aires, Argentina']:
locations.append(get_lat_long(city))
sleep(2)
locations
```
## 4. Retrieve ISS data and determine lead times for cities
```
def get_spaceship_location():
resp = requests.get('http://api.open-notify.org/iss-now.json')
location = resp.json()['iss_position']
return {'lat': float(location.get('latitude')),
'long': float(location.get('longitude'))}
def great_circle_dist(lon1, lat1, lon2, lat2):
"Found on SO: http://stackoverflow.com/a/41858332/380442"
dist = DistanceMetric.get_metric('haversine')
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
X = [[lat1, lon1], [lat2, lon2]]
kms = 6367
return (kms * dist.pairwise(X)).max()
def iss_dist_from_loc(issloc, loc):
distance = great_circle_dist(issloc.get('long'),
issloc.get('lat'),
loc.get('long'), loc.get('lat'))
logging.info('ISS is ~%dkm from %s', int(distance), loc.get('name'))
return distance
def iss_pass_near_loc(loc):
resp = requests.get('http://api.open-notify.org/iss-pass.json',
params={'lat': loc.get('lat'),
'lon': loc.get('long')})
data = resp.json().get('response')[0]
td = datetime.fromtimestamp(data.get('risetime')) - datetime.now()
m, s = divmod(int(td.total_seconds()), 60)
h, m = divmod(m, 60)
logging.info('ISS will pass near %s in %02d:%02d:%02d',loc.get('name'), h, m, s)
return td.total_seconds()
iss_dist_from_loc(get_spaceship_location(), locations[4])
iss_pass_near_loc(locations[4])
```
## 5. Create a `delayed` pipeline
```
output = []
for loc in locations:
issloc = delayed(get_spaceship_location)()
dist = delayed(iss_dist_from_loc)(issloc, loc)
output.append((loc.get('name'), dist))
closest = delayed(lambda x: sorted(x,
key=itemgetter(1))[0])(output)
closest
```
## 6. Show DAG
```
closest.visualize()
```
## 7. `compute()`
```
closest.compute()
```
| true |
code
| 0.366406 | null | null | null | null |
|
# Multi-Timescale Prediction
This notebook showcases some ways to use the **MTS-LSTM** from our recent publication to generate predictions at multiple timescales: [**"Rainfall-Runoff Prediction at Multiple Timescales with a Single Long Short-Term Memory Network"**](https://arxiv.org/abs/2010.07921).
Let's assume we have a set of daily meteorological forcing variables and a set of hourly variables, and we want to generate daily and hourly discharge predictions.
Now, we could just go and train two separate LSTMs: One on the daily forcings to generate daily predictions, and one on the hourly forcings to generate hourly ones.
One problem with this approach: It takes a _lot_ of time, even if you run it on a GPU.
The reason is that the hourly model would crunch through a years' worth of hourly data to predict a single hour (assuming we provide the model input sequences with the same look-back that we usually use with daily data).
That's $365 \times 24 = 8760$ time steps to process for each prediction.
Not only does this take ages to train and evaluate, but also the training procedure becomes quite unstable and it is theoretically really hard for the model to learn dependencies over that many time steps.
What's more, the daily and hourly predictions might end up being inconsistent, because the two models are entirely unrelated.
## MTS-LSTM
MTS-LSTM solves these issues: We can use a single model to predict both hourly and daily discharge, and with some tricks, we can push the model toward predictions that are consistent across timescales.
### The Intuition
The basic idea of MTS-LSTM is this: we can process time steps that are far in the past at lower temporal resolution.
As an example, to predict discharge of September 10 9:00am, we'll certainly need fine-grained data for the previous few days or weeks.
We might also need information from several months ago, but we probably _don't_ need to know if it rained at 6:00am or 7:00am on May 15.
It's just so long ago that the fine resolution doesn't matter anymore.
### How it's Implemented
The MTS-LSTM architecture follows this principle: To predict today's daily and hourly dicharge, we start feeding daily meteorological information from up to a year ago into the LSTM.
At some point, say 14 days before today, we split our processing into two branches:
1. The first branch just keeps going with daily inputs until it outputs today's daily prediction.
So far, there's no difference to normal daily-only prediction.
2. The second branch is where it gets interesting: We take the LSTM state from 14 days before today, apply a linear transformation to it, and then use the resulting states as the starting point for another LSTM, which we feed the 14 days of _hourly_ data until it generates today's 24 hourly predictions.
Thus, in a single forward pass through the MTS-LSTM, we've generated both daily and hourly predictions.
If you prefer visualizations, here's what the architecture looks like:

You can see how the first 362 input steps are done at the daily timescale (the visualization uses 362 days, but in reality this is a tunable hyperparameter).
Starting with day 363, two things happen:
- The _daily_ LSTM just keeps going with daily inputs.
- We take the hidden and cell states from day 362 and pass them through a linear layer. Starting with these new states, the _hourly_ LSTM begins processing hourly inputs.
Finally, we pass the LSTMs' outputs through a linear output layer ($\text{FC}^H$ and $\text{FC}^D$) and get our predictions.
### Some Variations
Now that we have this model, we can think of a few variations:
1. Because the MTS-LSTM has an individual branch for each timescale, we can actually use a different forcings product at each timescale (e.g., daily Daymet and hourly NLDAS). Going even further, we can use _multiple_ sets of forcings at each timescale (e.g., daily Daymet and Maurer, but only hourly NLDAS). This can improve predictions a lot (see [Kratzert et al., 2020](https://hess.copernicus.org/preprints/hess-2020-221/)).
2. We could also use the same LSTM weights in all timescales' branches. We call this model the shared MTS-LSTM (sMTS-LSTM). In our results, the shared version generated slightly better predictions if all we have is one forcings dataset. The drawback is that the model doesn't support per-timescale forcings. Thus, if you have several forcings datasets, you'll most likely get better predictions if you use MTS-LSTM (non-shared) and leverage all your datasets.
3. We can link the daily and hourly predictions during training to nudge the model towards predictions that are consistent across timescales. We do this by means of a regularization of the loss function that increases the loss if the average daily prediction aggregated from hourly predictions does not match the daily prediction.
## Using MTS-LSTM
So, let's look at some code to train and evaluate an MTS-LSTM!
The following code uses the `neuralhydrology` package to train an MTS-LSTM on daily and hourly discharge prediction.
For the sake of a quick example, we'll train our model on just a single basin.
When you actually care about the quality of your predictions, you'll generally get much better model performance when training on hundreds of basins.
```
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
from neuralhydrology.evaluation import metrics, get_tester
from neuralhydrology.nh_run import start_run, eval_run
from neuralhydrology.utils.config import Config
```
Every experiment in `neuralhydrology` uses a configuration file that specifies its setup.
Let's look at some of the relevant configuration options:
```
run_config = Config(Path("1_basin.yml"))
print('model:\t\t', run_config.model)
print('use_frequencies:', run_config.use_frequencies)
print('seq_length:\t', run_config.seq_length)
```
`model` is obvious: We want to use the MTS-LSTM. For the sMTS-LSTM, we'd set `run_config.shared_mtslstm = True`.
In `use_frequencies`, we specify the timescales we want to predict.
In `seq_length`, we specify for each timescale the look-back window. Here, we'll start with 365 days look-back, and the hourly LSTM branch will get the last 14 days ($336/24 = 14$) at an hourly resolution.
As we're using the MTS-LSTM (and not sMTS-LSTM), we can use different input variables at each frequency.
Here, we use Maurer and Daymet as daily inputs, while the hourly model component uses NLDAS, Maurer, and Daymet.
Note that even though Daymet and Maurer are daily products, we can use them to support the hourly predictions.
```
print('dynamic_inputs:')
run_config.dynamic_inputs
```
## Training
We start model training of our single-basin toy example with `start_run`.
```
start_run(config_file=Path("1_basin.yml"))
```
## Evaluation
Given the trained model, we can generate and evaluate its predictions.
```
run_dir = Path("runs/test_run_1410_151521") # you'll find this path in the output of the training above.
# create a tester instance and start evaluation
tester = get_tester(cfg=run_config, run_dir=run_dir, period="test", init_model=True)
results = tester.evaluate(save_results=False, metrics=run_config.metrics)
results.keys()
```
Let's take a closer look at the predictions and do some plots, starting with the daily results.
Note that units are mm/h even for daily values, since we predict daily averages.
```
# extract observations and simulations
daily_qobs = results["01022500"]["1D"]["xr"]["qobs_mm_per_hour_obs"]
daily_qsim = results["01022500"]["1D"]["xr"]["qobs_mm_per_hour_sim"]
fig, ax = plt.subplots(figsize=(16,10))
ax.plot(daily_qobs["date"], daily_qobs, label="Observed")
ax.plot(daily_qsim["date"], daily_qsim, label="Simulated")
ax.legend()
ax.set_ylabel("Discharge (mm/h)")
ax.set_title(f"Test period - daily NSE {results['01022500']['1D']['NSE_1D']:.3f}")
# Calculate some metrics
values = metrics.calculate_all_metrics(daily_qobs.isel(time_step=-1), daily_qsim.isel(time_step=-1))
print("Daily metrics:")
for key, val in values.items():
print(f" {key}: {val:.3f}")
```
...and finally, let's look more closely at the last few months' hourly predictions:
```
# extract a date slice of observations and simulations
hourly_xr = results["01022500"]["1H"]["xr"].sel(date=slice("10-1995", None))
# The hourly data is indexed with two indices: The date (in days) and the time_step (the hour within that day).
# As we want to get a continuous plot of several days' hours, we select all 24 hours of each day and then stack
# the two dimensions into one consecutive datetime dimension.
hourly_xr = hourly_xr.isel(time_step=slice(-24, None)).stack(datetime=['date', 'time_step'])
hourly_xr['datetime'] = hourly_xr.coords['date'] + hourly_xr.coords['time_step']
hourly_qobs = hourly_xr["qobs_mm_per_hour_obs"]
hourly_qsim = hourly_xr["qobs_mm_per_hour_sim"]
fig, ax = plt.subplots(figsize=(16,10))
ax.plot(hourly_qobs["datetime"], hourly_qobs, label="Observation")
ax.plot(hourly_qsim["datetime"], hourly_qsim, label="Simulation")
ax.set_ylabel("Discharge (mm/h)")
ax.set_title(f"Test period - hourly NSE {results['01022500']['1H']['NSE_1H']:.3f}")
_ = ax.legend()
```
| true |
code
| 0.700255 | null | null | null | null |
|
# Helium Hydride (Tapered HeH+) Exemplar
## Step 0: Import various libraries
```
# Imports for QSCOUT
import jaqalpaq
from jaqalpaq.core import circuitbuilder
from jaqalpaq.core.circuit import normalize_native_gates
from jaqalpaq import pygsti
from qscout.v1 import native_gates
# Imports for basic mathematical functionality
from math import pi
import numpy as np
# Imports for OpenFermion(-PySCF)
import openfermion as of
from openfermion.hamiltonians import MolecularData
from openfermionpyscf import run_pyscf
# Import for VQE optimizer
from scipy import optimize
```
## Step 1: SCF calculation to assmble the second-quantized Hamiltonian
```
# Set the basis set, spin, and charge of the H2 molecule
basis = 'sto-3g'
multiplicity = 1
charge = 1 #Charge is 1 for HeH+
# Set calculation parameters
run_scf = 1
run_fci = 1
delete_input = True
# Note: this option is critical as it ensures that the integrals are written out to an HDF5 file
delete_output = False
# Generate molecule at some bond length (0.8 Angstroms here)
geometry = [('He', (0., 0., 0.)), ('H', (0., 0., 0.8))]
molecule = MolecularData(
geometry, basis, multiplicity, charge,
filename='./HeH+_2_sto-3g_single_0.8') #Set file location of data
# Run pyscf to generate new molecular data for sto-3g HeH+
molecule = run_pyscf(molecule,
run_scf=run_scf,
run_fci=run_fci,
verbose=False)
print("Bond Length in Angstroms: {}".format(0.8))
print("FCI (Exact) energy in Hartrees: {}".format(molecule.fci_energy))
```
## Step 2: Convert the fermionic Hamiltonian to a qubit Hamiltonian
```
#Get the Hamiltonian for HeH+
hamiltonian = molecule.get_molecular_hamiltonian()
hamiltonian_ferm = of.get_fermion_operator(hamiltonian)
hamiltonian_bk = of.symmetry_conserving_bravyi_kitaev(hamiltonian_ferm, active_orbitals=4, active_fermions=2)
#Define terms and coefficients of our Hamiltonian
terms = []
cs = [] #Coefficients
for term in hamiltonian_bk.terms:
paulis = [None, None]
for pauli in term:
paulis[pauli[0]] = pauli[1]
terms += [paulis]
cs += [hamiltonian_bk.terms[term]]
```
## Step 3: Define UCC Ansatz circuit in JaqalPaq
```
def ansatz(theta):
term_probs = []
for i in range(len(terms)):
sexpr = [
'circuit',
#Define constants +-pi/2
('let', 'pi2', pi/2),
('let', 'npi2', -pi/2),
#Create a qubit register
('register', 'q', 2),
('map', 'q0', 'q', 0),
('map', 'q1', 'q', 1),
#Define a hadamard macro
('macro',
'hadamard',
'a',
('sequential_block',
('gate', 'Sy', 'a'),
('gate', 'Px', 'a'),
),
),
#Prepare the state |11>
('gate', 'prepare_all'),
('gate', 'Px', 'q0'),
('gate', 'Px', 'q1'),
#Apply the UCC Ansatz exp[-i*theta(X1 Y0)]
('gate', 'MS', 'q1', 'q0', 'npi2', 0),
('gate', 'Rz', 'q1', theta),
('gate', 'MS', 'q1', 'q0', 'pi2', 0),
]
#Change basis for measurement depending on term
for j, qubit in enumerate(terms[i]):
if qubit == 'X':
sexpr+=('gate', 'hadamard', ('array_item', 'q', j)),
if qubit == 'Y':
sexpr+=('gate', 'Sxd', ('array_item', 'q', j)),
sexpr+=('gate', 'measure_all'),
circuit=circuitbuilder.build(sexpr, native_gates=normalize_native_gates(native_gates.NATIVE_GATES))
#Format results of simulation as a list of lists
sim_result = pygsti.forward_simulate_circuit(circuit)
probs = []
for state in sim_result:
probs += [sim_result[state]] #Append probabilities of each state for a particular term
term_probs += [probs] #Combine lists of probabilities of each term in Hamiltonian
return term_probs
```
## Step 4: Define functions to calculate energy expectation value of Ansatz state
```
#Calculate energy of one term of the Hamiltonian for one possible state
def term_energy(term, state, coefficient, prob):
parity = 1
for i in range(len(term)):
#Change parity if state is occupied and is acted on by a pauli operator
if term[i] != None and state[i] == '1':
parity = -1*parity
return coefficient*prob*parity
#Calculate energy of the molecule for a given value of theta
def calculate_energy(theta):
energy = 0
probs = ansatz(theta[0]) #Convert tuple (from optimization) to float for circuit
for i in range(len(terms)): #For each term in the hamiltonian
for j in range(len(probs[0])): #For each possible state
term = terms[i]
state = '{0:02b}'.format(j) #convert state to binary (# of qubits)
coefficient = cs[i].real
prob = probs[i][j]
energy += term_energy(term, state, coefficient, prob)
return energy
```
## Step 5: Minimize the energy expectation value in 𝜃
```
#Minimize the energy using classical optimization
optimize.minimize(fun=calculate_energy, x0=[0.01], method="COBYLA") #Can use "L-BFGS-B" instead
```
## Step 6: Loop over previous steps to calculate ground state energy at different bond lengths
```
# Set the basis set, spin, and charge of the H2 molecule
basis = 'sto-3g'
multiplicity = 1
charge = 1
# Set calculation parameters
run_scf = 1
run_fci = 1
delete_input = True
# Note: this option is critical as it ensures that the integrals are written out to an HDF5 file
delete_output = False
optimized_energies = []
exact_energies = []
#Loop over bond lengths from 0.5 to 2.0 angstroms
n_pts = 16 #Number of points
bond_lengths = np.linspace(0.5,2.0,n_pts)
for diatomic_bond_length in bond_lengths:
# Generate molecule at some bond length
geometry = [('He', (0., 0., 0.)), ('H', (0., 0., diatomic_bond_length))]
molecule = MolecularData(
geometry, basis, multiplicity, charge,
description=str(round(diatomic_bond_length, 2)),
filename='./HeH+_2_sto-3g_single_dissociation')
# Run pyscf
molecule = run_pyscf(molecule,
run_scf=run_scf,
run_fci=run_fci,
verbose=False)
# Get the fermionic Hamiltonian for H2 and map it into qubits using the Bravyi-Kitaev encoding
hamiltonian = molecule.get_molecular_hamiltonian()
hamiltonian_ferm = of.get_fermion_operator(hamiltonian)
hamiltonian_bk = of.symmetry_conserving_bravyi_kitaev(hamiltonian_ferm, active_orbitals=4, active_fermions=2)
#Define terms and coefficients of our Hamiltonian
terms = []
cs = [] #Coefficients
for term in hamiltonian_bk.terms:
paulis = [None, None]
for pauli in term:
paulis[pauli[0]] = pauli[1]
terms += [paulis]
cs += [hamiltonian_bk.terms[term]]
# Minimize the expectation value of the energy using a classical optimizer (COBYLA)
result = optimize.minimize(fun=calculate_energy, x0=[0.01], method="COBYLA")
optimized_energies.append(result.fun)
exact_energies.append(molecule.fci_energy)
print("R={}\t Optimized Energy: {}".format(str(round(diatomic_bond_length, 2)), result.fun))
```
## Step 7: Plot the dissociation curve
```
import matplotlib
import matplotlib.pyplot as pyplot
# Plot the various energies for different bond lengths
fig = pyplot.figure(figsize=(10,7))
pyplot.rcParams['font.size']=18
bkcolor = '#ffffff'
ax = fig.add_subplot(1, 1, 1)
pyplot.subplots_adjust(left=.2)
ax.set_xlabel('R (Angstroms)')
ax.set_ylabel(r'E (Hartrees)')
ax.set_title(r'HeH+ 2-qubit bond dissociation curve')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
bond_lengths = [float(x) for x in bond_lengths]
ax.plot(bond_lengths, optimized_energies, 'o', label='UCCSD', color='red')
ax.plot(bond_lengths, exact_energies, '-', label='Full-CI', color='black')
ax.legend(frameon=False)
pyplot.show()
fig.savefig("HeH+ Bond Dissociation Curve.pdf")
```
| true |
code
| 0.463505 | null | null | null | null |
|
# Ungraded Lab: Walkthrough of ML Metadata
Keeping records at each stage of the project is an important aspect of machine learning pipelines. Especially in production models which involve many iterations of datasets and re-training, having these records will help in maintaining or debugging the deployed system. [ML Metadata](https://www.tensorflow.org/tfx/guide/mlmd) addresses this need by having an API suited specifically for keeping track of any progress made in ML projects.
As mentioned in earlier labs, you have already used ML Metadata when you ran your TFX pipelines. Each component automatically records information to a metadata store as you go through each stage. It allowed you to retrieve information such as the name of the training splits or the location of an inferred schema.
In this notebook, you will look more closely at how ML Metadata can be used directly for recording and retrieving metadata independent from a TFX pipeline (i.e. without using TFX components). You will use TFDV to infer a schema and record all information about this process. These will show how the different components are related to each other so you can better interact with the database when you go back to using TFX in the next labs. Moreover, knowing the inner workings of the library will help you adapt it for other platforms if needed.
Let's get to it!
## Imports
```
from ml_metadata.metadata_store import metadata_store
from ml_metadata.proto import metadata_store_pb2
import tensorflow as tf
print('TF version: {}'.format(tf.__version__))
import tensorflow_data_validation as tfdv
print('TFDV version: {}'.format(tfdv.version.__version__))
import urllib
import zipfile
```
## Download dataset
You will be using the [Chicago Taxi](https://data.cityofchicago.org/Transportation/Taxi-Trips/wrvz-psew) dataset for this lab. Let's download the CSVs into your workspace.
```
# Download the zip file from GCP and unzip it
url = 'https://storage.googleapis.com/artifacts.tfx-oss-public.appspot.com/datasets/chicago_data.zip'
zip, headers = urllib.request.urlretrieve(url)
zipfile.ZipFile(zip).extractall()
zipfile.ZipFile(zip).close()
print("Here's what we downloaded:")
!ls -R data
```
## Process Outline
Here is the figure shown in class that describes the different components in an ML Metadata store:
<img src='images/mlmd_overview.png' alt='image of mlmd overview'>
The green box in the middle shows the data model followed by ML Metadata. The [official documentation](https://www.tensorflow.org/tfx/guide/mlmd#data_model) describe each of these and we'll show it here as well for easy reference:
* `ArtifactType` describes an artifact's type and its properties that are stored in the metadata store. You can register these types on-the-fly with the metadata store in code, or you can load them in the store from a serialized format. Once you register a type, its definition is available throughout the lifetime of the store.
* An `Artifact` describes a specific instance of an ArtifactType, and its properties that are written to the metadata store.
* An `ExecutionType` describes a type of component or step in a workflow, and its runtime parameters.
* An `Execution` is a record of a component run or a step in an ML workflow and the runtime parameters. An execution can be thought of as an instance of an ExecutionType. Executions are recorded when you run an ML pipeline or step.
* An `Event` is a record of the relationship between artifacts and executions. When an execution happens, events record every artifact that was used by the execution, and every artifact that was produced. These records allow for lineage tracking throughout a workflow. By looking at all events, MLMD knows what executions happened and what artifacts were created as a result. MLMD can then recurse back from any artifact to all of its upstream inputs.
* A `ContextType` describes a type of conceptual group of artifacts and executions in a workflow, and its structural properties. For example: projects, pipeline runs, experiments, owners etc.
* A `Context` is an instance of a ContextType. It captures the shared information within the group. For example: project name, changelist commit id, experiment annotations etc. It has a user-defined unique name within its ContextType.
* An `Attribution` is a record of the relationship between artifacts and contexts.
* An `Association` is a record of the relationship between executions and contexts.
As mentioned earlier, you will use TFDV to generate a schema and record this process in the ML Metadata store. You will be starting from scratch so you will be defining each component of the data model. The outline of steps involve:
1. Defining the ML Metadata's storage database
1. Setting up the necessary artifact types
1. Setting up the execution types
1. Generating an input artifact unit
1. Generating an execution unit
1. Registering an input event
1. Running the TFDV component
1. Generating an output artifact unit
1. Registering an output event
1. Updating the execution unit
1. Seting up and generating a context unit
1. Generating attributions and associations
You can then retrieve information from the database to investigate aspects of your project. For example, you can find which dataset was used to generate a particular schema. You will also do that in this exercise.
For each of these steps, you may want to have the [MetadataStore API documentation](https://www.tensorflow.org/tfx/ml_metadata/api_docs/python/mlmd/MetadataStore) open so you can lookup any of the methods you will be using to interact with the metadata store. You can also look at the `metadata_store` protocol buffer [here](https://github.com/google/ml-metadata/blob/r0.24.0/ml_metadata/proto/metadata_store.proto) to see descriptions of each data type covered in this tutorial.
## Define ML Metadata's Storage Database
The first step would be to instantiate your storage backend. As mentioned in class, there are several types supported such as fake (temporary) database, SQLite, MySQL, and even cloud-based storage. For this demo, you will just be using a fake database for quick experimentation.
```
# Instantiate a connection config
connection_config = metadata_store_pb2.ConnectionConfig()
# Set an empty fake database proto
connection_config.fake_database.SetInParent()
# Setup the metadata store
store = metadata_store.MetadataStore(connection_config)
```
## Register ArtifactTypes
Next, you will create the artifact types needed and register them to the store. Since our simple exercise will just involve generating a schema using TFDV, you will only create two artifact types: one for the **input dataset** and another for the **output schema**. The main steps will be to:
* Declare an `ArtifactType()`
* Define the name of the artifact type
* Define the necessary properties within these artifact types. For example, it is important to know the data split name so you may want to have a `split` property for the artifact type that holds datasets.
* Use `put_artifact_type()` to register them to the metadata store. This generates an `id` that you can use later to refer to a particular artifact type.
*Bonus: For practice, you can also extend the code below to create an artifact type for the statistics.*
```
# Create ArtifactType for the input dataset
data_artifact_type = metadata_store_pb2.ArtifactType()
data_artifact_type.name = 'DataSet'
data_artifact_type.properties['name'] = metadata_store_pb2.STRING
data_artifact_type.properties['split'] = metadata_store_pb2.STRING
data_artifact_type.properties['version'] = metadata_store_pb2.INT
# Register artifact type to the Metadata Store
data_artifact_type_id = store.put_artifact_type(data_artifact_type)
# Create ArtifactType for Schema
schema_artifact_type = metadata_store_pb2.ArtifactType()
schema_artifact_type.name = 'Schema'
schema_artifact_type.properties['name'] = metadata_store_pb2.STRING
schema_artifact_type.properties['version'] = metadata_store_pb2.INT
# Register artifact type to the Metadata Store
schema_artifact_type_id = store.put_artifact_type(schema_artifact_type)
print('Data artifact type:\n', data_artifact_type)
print('Schema artifact type:\n', schema_artifact_type)
print('Data artifact type ID:', data_artifact_type_id)
print('Schema artifact type ID:', schema_artifact_type_id)
```
## Register ExecutionType
You will then create the execution types needed. For the simple setup, you will just declare one for the data validation component with a `state` property so you can record if the process is running or already completed.
```
# Create ExecutionType for Data Validation component
dv_execution_type = metadata_store_pb2.ExecutionType()
dv_execution_type.name = 'Data Validation'
dv_execution_type.properties['state'] = metadata_store_pb2.STRING
# Register execution type to the Metadata Store
dv_execution_type_id = store.put_execution_type(dv_execution_type)
print('Data validation execution type:\n', dv_execution_type)
print('Data validation execution type ID:', dv_execution_type_id)
```
## Generate input artifact unit
With the artifact types created, you can now create instances of those types. The cell below creates the artifact for the input dataset. This artifact is recorded in the metadata store through the `put_artifacts()` function. Again, it generates an `id` that can be used for reference.
```
# Declare input artifact of type DataSet
data_artifact = metadata_store_pb2.Artifact()
data_artifact.uri = './data/train/data.csv'
data_artifact.type_id = data_artifact_type_id
data_artifact.properties['name'].string_value = 'Chicago Taxi dataset'
data_artifact.properties['split'].string_value = 'train'
data_artifact.properties['version'].int_value = 1
# Submit input artifact to the Metadata Store
data_artifact_id = store.put_artifacts([data_artifact])[0]
print('Data artifact:\n', data_artifact)
print('Data artifact ID:', data_artifact_id)
```
## Generate execution unit
Next, you will create an instance of the `Data Validation` execution type you registered earlier. You will set the state to `RUNNING` to signify that you are about to run the TFDV function. This is recorded with the `put_executions()` function.
```
# Register the Execution of a Data Validation run
dv_execution = metadata_store_pb2.Execution()
dv_execution.type_id = dv_execution_type_id
dv_execution.properties['state'].string_value = 'RUNNING'
# Submit execution unit to the Metadata Store
dv_execution_id = store.put_executions([dv_execution])[0]
print('Data validation execution:\n', dv_execution)
print('Data validation execution ID:', dv_execution_id)
```
## Register input event
An event defines a relationship between artifacts and executions. You will generate the input event relationship for dataset artifact and data validation execution units. The list of event types are shown [here](https://github.com/google/ml-metadata/blob/master/ml_metadata/proto/metadata_store.proto#L187) and the event is recorded with the `put_events()` function.
```
# Declare the input event
input_event = metadata_store_pb2.Event()
input_event.artifact_id = data_artifact_id
input_event.execution_id = dv_execution_id
input_event.type = metadata_store_pb2.Event.DECLARED_INPUT
# Submit input event to the Metadata Store
store.put_events([input_event])
print('Input event:\n', input_event)
```
## Run the TFDV component
You will now run the TFDV component to generate the schema of dataset. This should look familiar since you've done this already in Week 1.
```
# Infer a schema by passing statistics to `infer_schema()`
train_data = './data/train/data.csv'
train_stats = tfdv.generate_statistics_from_csv(data_location=train_data)
schema = tfdv.infer_schema(statistics=train_stats)
schema_file = './schema.pbtxt'
tfdv.write_schema_text(schema, schema_file)
print("Dataset's Schema has been generated at:", schema_file)
```
## Generate output artifact unit
Now that the TFDV component has finished running and schema has been generated, you can create the artifact for the generated schema.
```
# Declare output artifact of type Schema_artifact
schema_artifact = metadata_store_pb2.Artifact()
schema_artifact.uri = schema_file
schema_artifact.type_id = schema_artifact_type_id
schema_artifact.properties['version'].int_value = 1
schema_artifact.properties['name'].string_value = 'Chicago Taxi Schema'
# Submit output artifact to the Metadata Store
schema_artifact_id = store.put_artifacts([schema_artifact])[0]
print('Schema artifact:\n', schema_artifact)
print('Schema artifact ID:', schema_artifact_id)
```
## Register output event
Analogous to the input event earlier, you also want to define an output event to record the ouput artifact of a particular execution unit.
```
# Declare the output event
output_event = metadata_store_pb2.Event()
output_event.artifact_id = schema_artifact_id
output_event.execution_id = dv_execution_id
output_event.type = metadata_store_pb2.Event.DECLARED_OUTPUT
# Submit output event to the Metadata Store
store.put_events([output_event])
print('Output event:\n', output_event)
```
## Update the execution unit
As the TFDV component has finished running successfully, you need to update the `state` of the execution unit and record it again to the store.
```
# Mark the `state` as `COMPLETED`
dv_execution.id = dv_execution_id
dv_execution.properties['state'].string_value = 'COMPLETED'
# Update execution unit in the Metadata Store
store.put_executions([dv_execution])
print('Data validation execution:\n', dv_execution)
```
## Setting up Context Types and Generating a Context Unit
You can group the artifacts and execution units into a `Context`. First, you need to define a `ContextType` which defines the required context. It follows a similar format as artifact and event types. You can register this with the `put_context_type()` function.
```
# Create a ContextType
expt_context_type = metadata_store_pb2.ContextType()
expt_context_type.name = 'Experiment'
expt_context_type.properties['note'] = metadata_store_pb2.STRING
# Register context type to the Metadata Store
expt_context_type_id = store.put_context_type(expt_context_type)
```
Similarly, you can create an instance of this context type and use the `put_contexts()` method to register to the store.
```
# Generate the context
expt_context = metadata_store_pb2.Context()
expt_context.type_id = expt_context_type_id
# Give the experiment a name
expt_context.name = 'Demo'
expt_context.properties['note'].string_value = 'Walkthrough of metadata'
# Submit context to the Metadata Store
expt_context_id = store.put_contexts([expt_context])[0]
print('Experiment Context type:\n', expt_context_type)
print('Experiment Context type ID: ', expt_context_type_id)
print('Experiment Context:\n', expt_context)
print('Experiment Context ID: ', expt_context_id)
```
## Generate attribution and association relationships
With the `Context` defined, you can now create its relationship with the artifact and executions you previously used. You will create the relationship between schema artifact unit and experiment context unit to form an `Attribution`.
Similarly, you will create the relationship between data validation execution unit and experiment context unit to form an `Association`. These are registered with the `put_attributions_and_associations()` method.
```
# Generate the attribution
expt_attribution = metadata_store_pb2.Attribution()
expt_attribution.artifact_id = schema_artifact_id
expt_attribution.context_id = expt_context_id
# Generate the association
expt_association = metadata_store_pb2.Association()
expt_association.execution_id = dv_execution_id
expt_association.context_id = expt_context_id
# Submit attribution and association to the Metadata Store
store.put_attributions_and_associations([expt_attribution], [expt_association])
print('Experiment Attribution:\n', expt_attribution)
print('Experiment Association:\n', expt_association)
```
## Retrieving Information from the Metadata Store
You've now recorded the needed information to the metadata store. If we did this in a persistent database, you can track which artifacts and events are related to each other even without seeing the code used to generate it. See a sample run below where you investigate what dataset is used to generate the schema. (**It would be obvious which dataset is used in our simple demo because we only have two artifacts registered. Thus, assume that you have thousands of entries in the metadata store.*)
```
# Get artifact types
store.get_artifact_types()
# Get 1st element in the list of `Schema` artifacts.
# You will investigate which dataset was used to generate it.
schema_to_inv = store.get_artifacts_by_type('Schema')[0]
# print output
print(schema_to_inv)
# Get events related to the schema id
schema_events = store.get_events_by_artifact_ids([schema_to_inv.id])
print(schema_events)
```
You see that it is an output of an execution so you can look up the execution id to see related artifacts.
```
# Get events related to the output above
execution_events = store.get_events_by_execution_ids([schema_events[0].execution_id])
print(execution_events)
```
You see the declared input of this execution so you can select that from the list and lookup the details of the artifact.
```
# Look up the artifact that is a declared input
artifact_input = execution_events[0]
store.get_artifacts_by_id([artifact_input.artifact_id])
```
Great! Now you've fetched the dataset artifact that was used to generate the schema. You can approach this differently and we urge you to practice using the different methods of the [MetadataStore API](https://www.tensorflow.org/tfx/ml_metadata/api_docs/python/mlmd/MetadataStore) to get more familiar with interacting with the database.
### Wrap Up
In this notebook, you got to practice using ML Metadata outside of TFX. This should help you understand its inner workings so you will know better how to query ML Metadata stores or even set it up for your own use cases. TFX leverages this library to keep records of pipeline runs and you will get to see more of that in the next labs. Next up, you will review how to work with schemas and in the next notebook, you will see how it can be implemented in a TFX pipeline.
| true |
code
| 0.608798 | null | null | null | null |
|
## Exercise: Pricing a European Call Option under Risk Neutrality
#### John Stachurski
Let's price a European option under the assumption of risk neutrality (for simplicity).
Suppose that the current time is $t=0$ and the expiry date is $n$.
We need to evaluate
$$ P_0 = \beta^n \mathbb E_0 \max\{ S_n - K, 0 \} $$
given
* the discount factor $\beta$
* the strike price $K$
* the stochastic process $\{S_t\}$
A common model for $\{S_t\}$ is
$$ \ln \frac{S_{t+1}}{S_t} = \mu + \sigma \xi_{t+1} $$
where $\{ \xi_t \}$ is IID and standard normal. However, its predictions are in some ways counterfactual. For example, volatility is not stationary but rather changes over time. Here's an improved version:
$$ \ln \frac{S_{t+1}}{S_t} = \mu + \sigma_t \xi_{t+1} $$
where
$$
\sigma_t = \exp(h_t),
\quad
h_{t+1} = \rho h_t + \nu \eta_{t+1}
$$
Compute the price of the option $P_0$ by Monte Carlo, averaging over realizations $S_n^1, \ldots, S_n^M$ of $S_n$ and appealing to the law of large numbers:
$$ \mathbb E_0 \max\{ S_n - K, 0 \}
\approx
\frac{1}{M} \sum_{m=1}^M \max \{S_n^m - K, 0 \}
$$
Use the following parameters:
```
β = 0.96
μ = 0.005
S0 = 10
h0 = 0
K = 100
n = 10
ρ = 0.5
ν = 0.01
M = 5_000_000
```
**Suggestion**: Start without jitting your functions, as jitted functions are harder to debug. Chose a smaller value for `M` and try to get your code to run. Then think about jitting.
The distribution of prices is heavy tailed, so the result has high variance even for large `M`. My best estimate is around $1,530.
### Solution
```
import numpy as np
from numpy.random import randn
from numba import jit, prange
from quantecon import tic, toc
```
Here's a solution that's jitted but not parallelized. A parallelized solution is below.
```
@jit(nopython=True)
def compute_call_price(β=0.96,
μ=0.005,
S0=10,
h0=0,
K=100,
n=10,
ρ=0.5,
ν=0.01,
M=5_000_000):
current_sum = 0.0
for m in range(M):
s = np.log(S0)
h = h0
for t in range(n):
s = s + μ + np.exp(h) * randn()
h = ρ * h + ν * randn()
current_sum += np.maximum(np.exp(s) - K, 0)
return β**n + current_sum / M
tic()
price = compute_call_price()
toc()
tic()
price = compute_call_price()
toc()
price
```
Let's try to parallelize this task.
```
@jit(nopython=True, parallel=True)
def compute_call_price_parallel(β=0.96,
μ=0.005,
S0=10,
h0=0,
K=100,
n=10,
ρ=0.5,
ν=0.01,
M=50_000_000):
current_sum = 0.0
for m in prange(M):
s = np.log(S0)
h = h0
for t in range(n):
s = s + μ + np.exp(h) * randn()
h = ρ * h + ν * randn()
current_sum += np.maximum(np.exp(s) - K, 0)
return β**n + current_sum / M
tic()
price = compute_call_price_parallel()
toc()
tic()
price = compute_call_price_parallel()
toc()
price
```
| true |
code
| 0.451871 | null | null | null | null |
|
Objective
------------------------
Try out different hypothesis to investigate the effect of lockdown measures on mobility
- Assume that mobility is affected by weather, lockdown and miscellanous
- Consider misc. info to be one such as week info (if it is a holisday week etc...)
- Assume mobility follows a weekly pattern (people tend to spend less time in parks Mo-Fr for example). Exploit assumptions about human routines here
- Consider every day independent of one another
Methodology
----------------------------------
Consider
- Derive features for weather (initially consider simply the medan temperature)
- Lockdown index (some number)
- Mobility value
- is_weekend
# Data Sources
In order to run the cells the data has to be downloaded manually from these sources. Special thanks to the following sources for providing an open source license to access the data.
* Apple mobility data: https://covid19.apple.com/mobility
* Oxford stringency: https://github.com/OxCGRT/covid-policy-tracker
* Weather forecast from Yr, delivered by the Norwegian Meteorological Institute and NRK: https://api.met.no/weatherapi/locationforecast/2.0/
* Historical weather data from https://mesonet.agron.iastate.edu/ASOS/
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from ipywidgets import Dropdown,IntSlider
from IPython.display import display
import os
%matplotlib inline
from functools import reduce
try:
import graphviz
except:
!pip install graphviz
import graphviz
try:
import pydotplus
except:
!pip install pydotplus
from IPython.display import display
import networkx as nx
try:
import pydot
except:
!pip install pydot
try:
from dowhy import CausalModel
except:
#!pip install sympy
!pip install -I dowhy
from dowhy import CausalModel
```
Hypothesis I
------------------
Consider daily data for Berlin
Weather: historical air temperature
Mobility: Apple (Transit)
Stringency: OXCGRT
```
from project_lib import Project
project = Project.access()
Oxford_Stringency_Index_credentials = project.get_connected_data(name="Oxford Stringency Index")
import dsx_core_utils, os, io
import pandas as pd
from sqlalchemy import create_engine
import sqlalchemy
sqla_url= "db2+ibm_db://" + Oxford_Stringency_Index_credentials['username']+ ':' + Oxford_Stringency_Index_credentials['password'] + "@"+ Oxford_Stringency_Index_credentials['host'] + ":50001/BLUDB;Security=ssl;"
#sqlalchemy
engine = create_engine(sqla_url, pool_size=10, max_overflow=20)
conn = engine.connect()
# @hidden_cell
# The following code contains the credentials for a connection in your Project.
# You might want to remove those credentials before you share your notebook.
from project_lib import Project
project = Project.access()
Apple_transit_mobility_credentials = project.get_connected_data(name="Apple Transit Mobility")
apple_sqla_url= "db2+ibm_db://" + Apple_transit_mobility_credentials['username']+ ':' + Apple_transit_mobility_credentials['password'] + "@"+ Apple_transit_mobility_credentials['host'] + ":50001/BLUDB;Security=ssl;"
#sqlalchemy
apple_engine = create_engine(apple_sqla_url, pool_size=10, max_overflow=20)
apple_conn = apple_engine.connect()
app_mob_df = pd.read_sql_table(Apple_transit_mobility_credentials['datapath'].split("/")[-1].lower(), apple_conn,index_col=['Timestamp'])
be_app_trans_df = app_mob_df[app_mob_df.region=='Berlin']
be_app_trans_df.drop(columns=['region'],inplace=True)
ox_df = pd.read_sql_table("oxford_stringency_index", conn)
#ox_df.rename({'datetime_date':'date'},axis=1,inplace=True)
# Stringency Germany
#ox_df = pd.read_csv("/project_data/data_asset/sun/oxcgrt/OxCGRT_latest.csv")
ox_df["date"] = pd.to_datetime(ox_df["date"],format="%Y%m%d")
be_ox_df = ox_df[ox_df.countrycode=="DEU"]
be_ox_df.index= be_ox_df['date']
be_ox_df = be_ox_df[['stringencyindex']]
be_ox_df.rename({'stringencyindex':'lockdown'},axis=1,inplace=True)
# Max temperature
be_weather_df = pd.read_csv("/project_data/data_asset/mercury/weather/berlin_historical_weather.csv",index_col=[0])
be_weather_df.index = pd.to_datetime(be_weather_df.index)
dfs = [be_ox_df,be_app_trans_df,be_weather_df]
df_final = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs)
df_final['is_weekend'] = np.where((df_final.index.weekday == 5)|(df_final.index.weekday == 6),1,0)
#df_final.rename({'stringencyindex':'lockdown'},axis=1,inplace=True)
#df_final.to_csv('/project_data/data_asset/mercury/germany_daily_asset_with_other_weather_params.csv')
df_final.head()
fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8))
axs[0].plot(df_final['mobility'])
axs[1].plot(df_final['lockdown'])
axs[2].plot(df_final['air_temperature'])
```
Why do I think day information is good? Looking at the graph above, it suggests that there is a strong periodic component in the mobility info.
Let me plot the Power Spectral Density and check if there is any kind of periodicity in the data.
```
plt.figure(figsize=(16,8))
plt.stem(np.abs(np.fft.fft(df_final[df_final.index<=pd.to_datetime('2020-03-15')]['mobility'].values-np.mean(df_final[df_final.index<=pd.to_datetime('2020-03-15')]['mobility'].values))))
```
Let me consider week of the day as a feature for Causal Inference. Add it as a column in the datasource.
```
df_final.dropna()
h1_causal_graph = nx.DiGraph()
h1_causal_graph.add_edge('is_weekend','mobility')
h1_causal_graph.add_edge('lockdown','mobility')
h1_causal_graph.add_edge('air_temperature','lockdown')
h1_causal_graph.add_edge('air_temperature','mobility')
graph_filename_h1='causal_mobility_weather_h1.dot'
nx.drawing.nx_pydot.write_dot(h1_causal_graph,graph_filename_h1)
with open(graph_filename_h1) as f:
dot_graph = f.read()
graphviz.Source(dot_graph)
h1_model = CausalModel(data=df_final.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h1,proceed_when_unidentifiable=True)
print(h1_model)
h1_estimand = h1_model.identify_effect()
print(h1_estimand)
h1_estimate = h1_model.estimate_effect(h1_estimand,method_name='backdoor.linear_regression',test_significance=True)
print(h1_estimate)
```
Validate the causal effect estimate
```
h1_ref1 = h1_model.refute_estimate(estimand=h1_estimand, estimate=h1_estimate,method_name='placebo_treatment_refuter')
print(h1_ref1)
h1_ref2 = h1_model.refute_estimate(estimand=h1_estimand, estimate=h1_estimate,method_name='random_common_cause')
print(h1_ref2)
```
Hypothesis II
------------------
Using google mobility instead of Apple transit mobility
Consider daily data for Berlin
Weather: historical air temperature
Mobility: Google mobility data - transit station
Stringency: OXCGRT data
```
# @hidden_cell
# The following code contains the credentials for a connection in your Project.
# You might want to remove those credentials before you share your notebook.
Google_mobility_credentials = project.get_connected_data(name="Google mobility")
Google_mobility_df = pd.read_sql_table(Google_mobility_credentials['datapath'].split("/")[-1].lower(),conn)
be_google_mobility_df = Google_mobility_df[Google_mobility_df.sub_region_1=="Berlin"][['transit_stations_percent_change_from_baseline']]
be_google_mobility_df.index = pd.to_datetime(Google_mobility_df[Google_mobility_df.sub_region_1=="Berlin"]['date'])
dfs2 = [be_ox_df,be_google_mobility_df,be_weather_df]
df_final2 = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs2)
df_final2.rename({'transit_stations_percent_change_from_baseline':'mobility','StringencyIndex':'lockdown'},axis=1,inplace=True)
df_final2['is_weekend'] = np.where((df_final2.index.weekday == 5)|(df_final2.index.weekday == 6),1,0)
fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8))
axs[0].plot(df_final2['mobility'])
axs[1].plot(df_final2['lockdown'])
axs[2].plot(df_final2['air_temperature'])
h2_causal_graph = nx.DiGraph()
h2_causal_graph.add_edge('is_weekend','mobility')
h2_causal_graph.add_edge('lockdown','mobility')
h2_causal_graph.add_edge('air_temperature','lockdown')
h2_causal_graph.add_edge('air_temperature','mobility')
graph_filename_h2='causal_mobility_weather_h2.dot'
nx.drawing.nx_pydot.write_dot(h2_causal_graph,graph_filename_h2)
with open(graph_filename_h2) as f:
dot_graph = f.read()
graphviz.Source(dot_graph)
h2_model = CausalModel(data=df_final2.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h2,proceed_when_unidentifiable=True)
print(h2_model)
h2_estimand = h2_model.identify_effect()
print(h2_estimand)
h2_estimate = h2_model.estimate_effect(h2_estimand,method_name='backdoor.linear_regression',test_significance=True)
print(h2_estimate)
h2_ref1 = h2_model.refute_estimate(estimand=h2_estimand, estimate=h2_estimate,method_name='placebo_treatment_refuter')
print(h2_ref1)
h2_ref2 = h2_model.refute_estimate(estimand=h2_estimand, estimate=h2_estimate,method_name='random_common_cause')
print(h2_ref2)
```
**Remark**
As Google data is available only from mid Feb whereas Apple mobility data is available since mid Jan. So, we use Apple mobility data
Hypothesis III
------------------
Consider daily data for Berlin
Weather: historical air temperature
Mobility: Apple (Transit)
Stringency: OXCGRT Clustering data
```
# @hidden_cell
# The following code contains the credentials for a connection in your Project.
# You might want to remove those credentials before you share your notebook.
from project_lib import Project
project = Project.access()
Emergent_DB2_Warehouse_credentials = project.get_connection(name="db2 Warehouse ealuser")
import dsx_core_utils, os, io
import pandas as pd
from sqlalchemy import create_engine
import sqlalchemy
sqla_url= "db2+ibm_db://" + Emergent_DB2_Warehouse_credentials['username']+ ':' + Emergent_DB2_Warehouse_credentials['password'] + "@"+ Emergent_DB2_Warehouse_credentials['host'] + ":50001/BLUDB;Security=ssl;"
#sqlalchemy
engine = create_engine(sqla_url, pool_size=10, max_overflow=20)
stringency_clustering_df = pd.read_sql_query('SELECT * FROM "EALUSER"."STRINGENCY_INDEX_CLUSTERING"',engine)
be_stringency_clustering_df = stringency_clustering_df[stringency_clustering_df.country=="Germany"]
be_stringency_clustering_df.index = pd.to_datetime(be_stringency_clustering_df['state_date'])
be_stringency_clustering_df = be_stringency_clustering_df.rename({'state_value':'lockdown'},axis=1)[['lockdown']]
dfs3 = [be_stringency_clustering_df,be_app_trans_df,be_weather_df]
df_final3 = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs3)
df_final3.rename({'change':'mobility'},axis=1,inplace=True)
df_final3['is_weekend'] = np.where((df_final3.index.weekday == 5)|(df_final3.index.weekday == 6),1,0)
#df_final.to_csv('/project_data/data_asset/mercury/germany_daily_asset_with_other_weather_params.csv')
fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8))
axs[0].plot(df_final3['mobility'])
axs[1].plot(df_final3['lockdown'])
axs[2].plot(df_final3['air_temperature'])
h3_causal_graph = nx.DiGraph()
h3_causal_graph.add_edge('is_weekend','mobility')
h3_causal_graph.add_edge('lockdown','mobility')
h3_causal_graph.add_edge('air_temperature','lockdown')
h3_causal_graph.add_edge('air_temperature','mobility')
graph_filename_h3='causal_mobility_weather_h3.dot'
nx.drawing.nx_pydot.write_dot(h3_causal_graph,graph_filename_h3)
with open(graph_filename_h3) as f:
dot_graph = f.read()
graphviz.Source(dot_graph)
h3_model = CausalModel(data=df_final3.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h3,proceed_when_unidentifiable=True)
print(h3_model)
h3_estimand = h3_model.identify_effect()
print(h3_estimand)
h3_estimate = h3_model.estimate_effect(h3_estimand,method_name='backdoor.linear_regression',test_significance=True)
print(h3_estimate)
h3_ref1 = h3_model.refute_estimate(estimand=h3_estimand, estimate=h3_estimate,method_name='placebo_treatment_refuter')
print(h3_ref1)
h3_ref2 = h3_model.refute_estimate(estimand=h3_estimand, estimate=h3_estimate,method_name='random_common_cause')
print(h3_ref2)
```
**Remark**
The Causal estimate has a really low p value when we use the stringency clustering data. So, we can also replace the raw Oxford stringency data with the stringency clustering data
Hypothesis IV
------------------
Consider daily data for Berlin
Weather: historical air temperature
Mobility: Waze mobility data - Source: https://raw.githubusercontent.com/ActiveConclusion/COVID19_mobility/master/waze_reports/Waze_City-Level_Data.csv
Stringency: OXCGRT data
```
waze_df = pd.read_csv("https://raw.githubusercontent.com/ActiveConclusion/COVID19_mobility/master/waze_reports/Waze_City-Level_Data.csv")
waze_df['Date'] = pd.to_datetime(waze_df['Date'])
be_waze_df = waze_df[waze_df.City=="Berlin"]
be_waze_df.index = be_waze_df['Date']
be_waze_df = be_waze_df[['% Change In Waze Driven Miles/KMs']]
be_waze_df.columns = ['mobility']
dfs4 = [be_ox_df,be_waze_df,be_weather_df]
df_final4 = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs4)
df_final4['is_weekend'] = np.where((df_final4.index.weekday == 5)|(df_final4.index.weekday == 6),1,0)
#df_final4.rename({'StringencyIndex':'lockdown'},axis=1,inplace=True)
df_final4
fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8))
axs[0].plot(df_final4['mobility'])
axs[1].plot(df_final4['lockdown'])
axs[2].plot(df_final4['air_temperature'])
h4_causal_graph = nx.DiGraph()
h4_causal_graph.add_edge('is_weekend','mobility')
h4_causal_graph.add_edge('lockdown','mobility')
h4_causal_graph.add_edge('air_temperature','lockdown')
h4_causal_graph.add_edge('air_temperature','mobility')
graph_filename_h4='causal_mobility_weather_h4.dot'
nx.drawing.nx_pydot.write_dot(h4_causal_graph,graph_filename_h4)
with open(graph_filename_h4) as f:
dot_graph = f.read()
graphviz.Source(dot_graph)
h4_model = CausalModel(data=df_final4.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_h4,proceed_when_unidentifiable=True)
print(h4_model)
h4_estimand = h4_model.identify_effect()
print(h4_estimand)
h4_estimate = h4_model.estimate_effect(h4_estimand,method_name='backdoor.linear_regression',test_significance=True)
print(h4_estimate)
h4_ref1 = h4_model.refute_estimate(estimand=h4_estimand, estimate=h4_estimate,method_name='placebo_treatment_refuter')
print(h4_ref1)
h4_ref2 = h4_model.refute_estimate(estimand=h4_estimand, estimate=h4_estimate,method_name='random_common_cause')
print(h4_ref2)
```
**Comments**
As the data corresponds to only driving data, the plot shows that it is not really affected by the lockdown measures. Moreover, the driving mobility data is available only from 01.03.2020
Hypothesis V
------------------
Consider daily data for other cities/country such as London, New york and Singapore
Weather: historical air temperature
Mobility: Apple mobility (transit)
Stringency: OXCGRT data
1. London - EGLL, GBR
2. New York - NYC, USA
3. Singapore - WSAP, SGP
```
app_df = pd.read_csv("/project_data/data_asset/sun/apple_mobility/applemobilitytrends-2020-10-14.csv")
def region_specific_data(mobility_region,weather_station,stringency_country_code):
cs_app_trans_df = app_df[(app_df.region==mobility_region)&
(app_df.transportation_type=="transit")].drop(['geo_type','region','transportation_type',
'alternative_name','sub-region','country'],axis=1).transpose()
cs_app_trans_df.columns= ['mobility']
# Stringency Germany
if stringency_country_code == "GBR":
# Consider only England
cs_ox_df = ox_df[ox_df.regionname=="England"]
cs_ox_df.index= cs_ox_df['date']
cs_ox_df = cs_ox_df[['stringencyindex']]
elif stringency_country_code == "USA":
# Consider only New York
cs_ox_df = ox_df[ox_df.regionname=="New York"]
cs_ox_df.index= cs_ox_df['date']
cs_ox_df = cs_ox_df[['stringencyindex']]
else:
cs_ox_df = ox_df[ox_df.countrycode==stringency_country_code]
cs_ox_df.index= cs_ox_df['date']
cs_ox_df = cs_ox_df[['stringencyindex']]
# Max temperature
historical_url = "https://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?station={}&data=tmpc&year1=2020&month1=1&day1=1&year2=2020&month2=10&day2=28&tz=Etc%2FUTC&format=onlycomma&latlon=no&missing=M&trace=T&direct=no&report_type=1&report_type=2".format(weather_station)
hist_weather_df = pd.read_csv(historical_url)
# Replace missing and trace as na
hist_weather_df.replace("M",np.nan,inplace=True)
hist_weather_df.replace("M",np.nan,inplace=True)
#Convert to float
hist_weather_df['tmpc'] = hist_weather_df['tmpc'].astype(np.float64)
hist_weather_df['valid'] = pd.to_datetime(hist_weather_df['valid'])
hist_weather_df.rename({'valid':'time','tmpc':'air_temperature'},axis=1, inplace=True)
hist_weather_df.index = hist_weather_df['time']
hist_weather_df = hist_weather_df.resample("1D").median()
dfs = [cs_ox_df,cs_app_trans_df,hist_weather_df]
df_final = reduce(lambda left,right: pd.merge(left,right,left_index=True,right_index=True,how='inner'), dfs)
df_final.rename({'stringencyindex':'lockdown'},axis=1,inplace=True)
df_final['is_weekend'] = np.where((df_final.index.weekday == 5)|(df_final.index.weekday == 6),1,0)
#return df_final
fig,axs = plt.subplots(nrows=3,ncols=1,figsize=(12,8))
axs[0].plot(df_final['mobility'])
axs[1].plot(df_final['lockdown'])
axs[2].plot(df_final['air_temperature'])
fig.suptitle(mobility_region)
plt.show()
causal_graph = nx.DiGraph()
causal_graph.add_edge('is_weekend','mobility')
causal_graph.add_edge('lockdown','mobility')
causal_graph.add_edge('air_temperature','lockdown')
causal_graph.add_edge('air_temperature','mobility')
graph_filename_='causal_mobility_weather_.dot'
nx.drawing.nx_pydot.write_dot(causal_graph,graph_filename_)
with open(graph_filename_) as f:
dot_graph = f.read()
graphviz.Source(dot_graph)
_model = CausalModel(data=df_final.dropna(),treatment=['lockdown'],outcome='mobility',instruments=[],graph=graph_filename_,proceed_when_unidentifiable=True)
print(_model)
_estimand = _model.identify_effect()
print(_estimand)
_estimate = _model.estimate_effect(_estimand,method_name='backdoor.linear_regression',test_significance=True)
print(_estimate)
_ref1 = _model.refute_estimate(estimand=_estimand, estimate=_estimate,method_name='placebo_treatment_refuter')
print(_ref1)
_ref2 = _model.refute_estimate(estimand=_estimand, estimate=_estimate,method_name='random_common_cause')
print(_ref2)
return 1
region_specific_data('London','EGLL', 'GBR')
region_specific_data('New York','NYC', 'USA')
region_specific_data('Singapore','WSAP', 'SGP')
```
**Comments**
* For all three cities the estimator parameters given by the dowhy model are the same: "mobility ~ lockdown+is_weekend+air_temperature+lockdown*is_weekend"
**Author**
* Shri Nishanth Rajendran - AI Development Specialist, R² Data Labs, Rolls Royce
Special thanks to Deepak Srinivasan and Alvaro Corrales Cano
| true |
code
| 0.351659 | null | null | null | null |
|
# Studying avoided crossing for a 1 cavity-2 qubit system, <mark>with and without thermal losses</mark>
1. **Introduction**
2. **Problem parameters**
3. **Setting up operators, Hamiltonian's, and the initial state**
4. **Demonstrating avoided crossing**
* Plotting the ramp pulse generated
* Solving the Master equation and plotting the results (without thermal losses)
5. **Studying the effect of various ramp times on avoided crossing**
* { Case I } <u>No thermal losses</u>
* { Case II } <u>Thermal losses</u>
* Plotting the results
6. Calculating the Fidelity and Concurrence
**Author** : Soumya Shreeram ([email protected])<br>
**Supervisor** : Yu-Chin Chao ([email protected]) <br>
**Date**: 9th August 2019<br>
This script was coded as part of the Helen Edwards Summer Internship program at Fermilab. The code studies the effect of avoided crossing for loading a photon from a qubit into the cavity. This is done by generating pulses with varying ramp times, and raising one of the qubit's frequecy above the cavity.
## 1. Introduction
The Jaynes-Cumming model is used to explain light-matter interaction in a system with a qubit and a single cavity mode. The Hamiltonian $H$ can be extended to describe a 2-qubit and cavity system as,
$$ H = \hbar \omega_c a^{\dagger}a+ \sum_{i=1}^2\frac{1}{2}\hbar \omega_{qi}\ \sigma_i^z + \sum_{i=1}^2\frac{1}{2} \hbar g(a^{\dagger} + a)(\sigma_i^-+\sigma_i^+)$$
which simplifies under the rotating-wave approximation as
$$ H_{\rm RWA} = \hbar \omega_c a^{\dagger}a+ \sum_{i=1}^2\frac{1}{2}\hbar \omega_a \sigma_i^z + \sum_{i=1}^2\frac{1}{2} \ \hbar g\ (a^{\dagger}\sigma_i^- + a\ \sigma_i^+)$$
where $\omega_c$ and $\omega_{qi}$ are the cavity and qubit frequencies, while $a$ and $\sigma_i^-$ are the annihalation operators for the cavity and qubit respectively. Note that $i=1,2$ represents the 2 qubits.
```
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
plt.rcParams.update({'font.size': 16})
import numpy as np
from numpy import ones,vstack
from numpy.linalg import lstsq
from math import pi
from scipy.signal import find_peaks
from time import sleep
import sys
from qutip import *
```
## 2. Problem parameters
Here we use $\hbar=1$; the coupling terms are redefined with a multiple of $2\pi$ before them for convinience.
```
def generateTimePulse(tr, th):
"""
Function that generates the pulse based on the input parameters
@param tr :: ramp up/down time for the pulse
@param th :: hold time for the pulse
@return t_pulse :: np array with 4 times that define the pulse
"""
t_pulse = [0, 0, 0, 0]
t_pulse[0] = 0
t_pulse[1] = tr + t_pulse[0]
t_pulse[2] = t_pulse[1] + th
t_pulse[3] = t_pulse[2] + tr
print("The time pulse is: ", t_pulse)
return t_pulse
"""------------- FREQUENCIES -----------------"""
w_q1 = 2*pi*6.5; # Qubit 1 frequency
w_q2 = 2*pi*6.8; # Qubit 2 frequency: range from 1-9 GHz
w_f = 2*pi*7.1; # Resonator/ Filter frequency
"""------------- COUPLING --------------------"""
g_q1f = 2*pi*0.135 # qubit 1-fitler coupling
#g_q2f = 2*pi*0.415 # qubit 2-fitler coupling
numF = 1 # number of filters
N = 2 # number of fock states
times = np.linspace(0,200,1500)
"""------------- DISSIPATION PARAMETERS -----"""
kappa = 5*10**-3 # cavity dissipation rate
n_th_a = 3*10**-3 # avg. no. of thermal bath excitation
r1 = 5*10**-6 # qubit relaxation rate
r2 = 1*10**-5 # qubit dephasing rate
"""------------- PULSE CONTROL PARAMETERS -----"""
tr = 0 # ramp up and ramp down times
th = 110 # hold time
t_pulse = generateTimePulse(tr, th)
# amplitude to raise pulse above cavity frequency (optional)
d = 0.25
w_top = w_f + 2*pi*d
no_ramps = 800 # number of ramps pulses sent into the Hamiltonian
```
## 3. Setting up the operators, Hamiltonian's, and Initial state
For every qubit: <br> <br>
**sm** $\ \rightarrow \ \hat{\sigma}^{+(-)}$ is the raising and lowering operator of the *qubit* <br>
**sz** $\ \ \rightarrow \ \sigma_z $ is the Pauli-z matrix of the *qubit* <br>
**n** $\ \ \ \rightarrow \ n$ is the number operator
```
def numOp(m):
"""
Computes the number operator
@param loweringMat :: lowering matrix operator for a system
"""
return m.dag()*m
def rwaCoupling(m1, m2):
return m1.dag()*m2 + m2.dag()*m1
def setXYlabel(ax, x, y, req_title, title_):
"""
Generic function to set labels for plots
"""
ax.set_xlabel(x)
ax.set_ylabel(y)
if req_title == True:
ax.set_title(title_)
return
```
### 3.1 Operators
```
# cavity
a = tensor(destroy(N), qeye(2), qeye(2))
nc = numOp(a)
# qubit 1
sm1 = tensor(qeye(N), sigmam(), qeye(2))
sz1 = tensor(qeye(N), sigmaz(), qeye(2))
n1 = numOp(sm1)
# qubit 2
sm2 = tensor(qeye(N), qeye(2), sigmam())
sz2 = tensor(qeye(N), qeye(2), sigmaz())
n2 = numOp(sm2)
# collapse operators
c_ops = []
# cavity relaxation
rate = kappa * (1 + n_th_a)
c_ops.append(np.sqrt(rate) * a)
# cavity excitation
# qubit 1 relaxation
c_ops.append(np.sqrt(r1 * (1+n_th_a)) * sm1)
c_ops.append(np.sqrt(r1 * n_th_a) * sm1.dag())
c_ops.append(np.sqrt(r2) * sz1)
# qubit 2 relaxation
c_ops.append(np.sqrt(r1 * (1+n_th_a)) * sm2)
c_ops.append(np.sqrt(r1 * n_th_a) * sm2.dag())
c_ops.append(np.sqrt(r2) * sz2)
```
### 3.2 Hamiltonian's and initial state
```
# Qubit Hamiltonians (Hq1+Hq2)
Hq1 = 0.5*sz1
Hq2 = 0.5*sz2
# Filter Hamiltonians (refer formula in the Introduction)
Hf = numOp(a)
# Qubit-Filter Hamiltonian
Hqf = g_q1f*(rwaCoupling(a, sm1) + rwaCoupling(a, sm2))
# time-independent Hamiltonian (see later)
H0 = w_f*Hf + w_q2*Hq2 + Hqf
H = H0 + w_q1*Hq1 # Resultant Hamiltonian
```
### 3.3 Initial State
```
# initial state of the system. Qubit 1: excited, Qubit 2: ground st.
psi0 = tensor(basis(N,0), basis(2,0), basis(2,1))
```
## 4. Demonstrating avoided crossing
In this section the qubit frequency is raised above the cavity frequency by applying a linearly varying ramp time $t$ (ns). The signal is held for a time $T-2t$ before it is ramped down again.
Tranformations on closed quantum states can be modelled by unitary operators. The combined time-dependent Hamiltonian for a system undergoing a tranformation that can be representated as,
$$ H(t) = H_0 + \sum_{i=0}^n c_i(t)H_i$$
where $H_0$ is called the time-independent drift Hamiltonian and $H_i$ are the control Hamiltonians with a time varying amplitude $c_i(t)$.
Here we write the Hamiltonian in a function-based time dependent way. See other ways [here](http://qutip.org/docs/latest/guide/dynamics/dynamics-time.html). Here the time-dependent coefficients, $f_n(t)$ of the Hamiltonian (e.g. `wf_t, w1_t,w2_t`) are expressed using Python functions
### 4.1 Functions
```
"""----------------------------------------
PULSE FUNCTIONS
------------------------------------------"""
def fitLine(t_pulse, i, j, w1, w2, t):
"""
Function generates a best fit line between [x1, y1] ->[x2, y2]
Input:
@param t_pulse :: np array containing the 4 points parameterizing the pulse
@param i,j :: indicies of t_pulse determining the start-stop times
@param w1, w2 :: lower and higher frequencies of the ramp pulse
@param t :: interable time variable
Returns:
@polynomial(t) :: best-fit y value at t
"""
# compute coefficients
coefficients = np.polyfit([t_pulse[i], t_pulse[j]], [w1, w2], 1)
# generate best-fit polynmial
polynomial = np.poly1d(coefficients)
return polynomial(t)
def rampUp(t_pulse, w1, w2, t):
"""
Generating a ramp up pulse
Input:
@param t_pulse :: np array containing the 4 points parameterizing the pulse
@param w1, w2 :: lower and higher frequencies of the ramp pulse
@param t :: interable time variable
Returns:
@param w :: int giving the y-value based on t
"""
t0 = t_pulse[0]
t1 = t_pulse[1]
if t0 != t1:
if t < t1:
return w1 + fitLine(t_pulse, 0, 1, 0, (w2-w1), t)*(t>t0)
if t > t1:
return w1 + (w2-w1)*(t>t1)
else:
return w1 + (w2 - w1)*(t > t1)
def rampDown(t_pulse, w1, w2, t):
"""
Generating a ramp Down pulse
Same as the ramp Up pulse given above only with the
"""
t2 = t_pulse[2]
t3 = t_pulse[3]
if t2 != t3:
if t > t2:
return w1 + fitLine(t_pulse, 2, 3, (w2-w1), 0, t)*(t>t2 and t<t3)
if t < t2:
return w1 + (w2-w1)*(t<t2)
else:
return w1 + (w2-w1)*(t<t2)
def wq1_t(t, args=None):
"""
Function defines the time depended co-efficent of the qubit 1
w_q1(t) is a pulse wave going from 0 to height (w_f-w_q1) at T0_1
"""
return (rampUp(t_pulse, w_q1, w_top, t) + rampDown(t_pulse, w_q1, w_top, t)-w_top)
def wq1_tdown(t, args=None):
"""
Function defines the time depended co-efficent of the qubit 1
w_q1(t) is a pulse wave going from 0 to height (w_f-w_q1) at T0_1
"""
return rampDown(t_pulse, w_q1, w_top, t)
def wf_t(t, args=None):
"""
Function defines the time depended co-efficent of the filters
(Although, there is no frequency change of the filters with time)
so w_f(t) = constant
"""
return w_f
def wq2_t(t, args=None):
"""
Function defines the time depended co-efficent of qubit 2
(Although, there is no frequency change of the quibit 2 with time)
so w_q2(t) = constant
"""
return w_q2
"""---------------------------------------------
HAMILTONIAN FUNCTIONS
---------------------------------------------"""
def plotPulse(ax, times, t_pulse, w_q1, w_top, colorCode, label_, ramp):
"""
Plots the required pulse
"""
if ramp == True:
plotting = ax.plot(times, [rampUp(t_pulse, w_q1, w_top, t)/(2*pi) for t in times], colorCode, label=label_)
elif ramp == False:
plotting = ax.plot(times, [rampDown(t_pulse, w_q1, w_top, t)/(2*pi) for t in times], colorCode, label=label_)
if ramp == 'Custom':
plotting = ax.plot(times, [(rampUp(t_pulse, w_q1, w_top, t) + rampDown(t_pulse, w_q1, w_top, t)-w_top)/(2*pi) for t in times], colorCode, label=r"$\Delta$t = %.1f ns"%(t_pulse[1]-t_pulse[0]))
return plotting
def labelTimes(t_r, t_H):
return r"$\Delta t = %.2f {\ \rm ns}, t_{\rm H} = %.2f {\ \rm ns}$"%(t_r, t_H)
def plotFrequencies(ax, times, wf_t, Colour, labels_, linestyle_):
"""
Function plots the frequencies as a function of times
"""
ax.plot(times, np.array(list(map(wf_t, times)))/(2*pi), Colour, linewidth=2, label=labels_, linestyle=linestyle_)
ax.legend(loc = 'center left', bbox_to_anchor = (1.0, 0.5))
return
def setLabels(ax, tr, th, plot_no):
"""
Function sets the labels of the x-y axis in the plot below
"""
if plot_no == 0:
ax.set_ylabel("Frequency (GHz)", fontsize=16)
ax.set_title(labelTimes(tr, th))
else:
ax.set_xlabel("Time (ns)")
ax.set_ylabel("Occupation \n probability")
return
def plotProb(ax, times, component, res, Colour, labels_, linestyle_):
"""
Function plots the occupation probabilities of the components after running mesolve
"""
ax.plot(times, np.real(expect(component, res.states)), Colour, linewidth=1.5, label=labels_, linestyle=linestyle_)
ax.legend(loc = 'center left', bbox_to_anchor = (1.0, 0.5))
return
```
### 4.1 Plotting the ramp pulse generated
The figure below demonstrated how the combination of ramping up and down forms the required pulse.
```
fig, ax = plt.subplots(1, 1, figsize=(7,5))
t_pulse1 = [t_pulse[0], t_pulse[1]+2.5, t_pulse[2]-2.5, t_pulse[3]]
t_pulse2 = [t_pulse[0], (t_pulse[3]-t_pulse[0])/2+t_pulse[0], (t_pulse[3]-t_pulse[0])/2+t_pulse[0], t_pulse[3]]
# plotting the pulses
plotPulse(ax, times, t_pulse, w_q1, w_top, 'g--', r"$\Delta$t = Ramp up", True)
plotPulse(ax, times, t_pulse, w_q1, w_top, 'b--', r"$\Delta$t = Ramp down", False)
plotPulse(ax, times, t_pulse, w_q1, w_top, 'r', ' ', 'Custom')
plotPulse(ax, times, t_pulse1, w_q1, w_top, '#03fcba', ' ', 'Custom')
plotPulse(ax, times, t_pulse2, w_q1, w_top, '#c4f2f1', ' ', 'Custom')
# guide lines
ax.axvline(x=t_pulse[0], color='#f2d4c4', linestyle='--')
ax.axvline(x=t_pulse[3], color='#f2d4c4', linestyle='--')
ax.axvline(x=t_pulse2[2], color='#f2d4c4', linestyle='--')
setXYlabel(ax, 'Time (ns)', 'Frequency (Hz)', False, '')
ax.legend(loc="upper right")
fig.tight_layout()
```
### 4.2 Solving the Master equation and plotting the results (without thermal losses)
```
opts = Options(nsteps = 50000, atol = 1e-30)
# time dependent Hamiltonian
H_t = [H0, [Hq1, wq1_t]]
# Evolving the system
res1 = mesolve(H_t, psi0, times, [], [])
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12,7))
labels_ = ["cavity", "qubit 1", "qubit 2"]
w_list = [wf_t, wq1_t, wq2_t]
colors_ = ['#b4bfbc', 'b', '#b0ed3e']
linestyle_ = ['--', '-', '-']
components_ = [nc, n1, n2]
for i in [0, 1, 2]:
plotFrequencies(axes[0], times, w_list[i], colors_[i], labels_[i], linestyle_[i])
setLabels(axes[0], tr, th, 0)
for i in [0, 1, 2]:
plotProb(axes[1], times, components_[i], res1, colors_[i], labels_[i], linestyle_[i])
setLabels(axes[1], tr, th, 1)
fig.tight_layout()
```
## 5. Studying the effect of various ramp times on avoided crossing
```
def showProgress(idx, n):
"""
Function prints the progress bar for a running function
@param idx :: iterating index
@param n :: total number of iterating variables/ total length
"""
j = (idx+1)/n
sys.stdout.write('\r')
sys.stdout.write("[%-20s] %d%%" % ('='*int(20*j), 100*j))
sys.stdout.flush()
sleep(0.25)
return
def findIndex(times, t4):
"""
Function finds the index in the times array at required point t4
@param times :: np array contains the times at which H is evaluated
@param t4 :: the point at which the pulse ends
@returns param idx_array[0] :: the index of t4 in the times array
"""
idx_array = []
for i, t in enumerate(times):
if t >= t4 and t < t4+1:
idx_array.append(i)
return idx_array[0]
def genTimePulses(rampList):
"""
Generates pulses with variable ramp times
@param rampList :: List with
"""
ramp_vals = np.empty((0, 4))
for dt in rampList:
t_new = [t_pulse[0], t_pulse[1]+dt, t_pulse[2]-dt, t_pulse[3]]
ramp_vals = np.append(ramp_vals, [t_new], axis=0)
return ramp_vals
def printShape(ramp_dt_array):
print("\nDimensions of the resultant 2D array:", np.shape(ramp_dt_array))
return
# get the point after the ramp down excitation
t_idx = findIndex(times, t_pulse[3])
# generating a range of pulse with varying ramp times
rampList = np.linspace(t_pulse[1], (t_pulse[3]-t_pulse[0])/2+t_pulse[0], no_ramps)-t_pulse[1]
# generates the pulses
ramp_vals = genTimePulses(rampList)
```
### { Case I } No thermal losses: Evaluating the excited state population at <mark>all times</mark> of the pulse. The excited state population is studied for a range of different ramp pulses.
```
#ramp_dt_array2D = evaluateHam2D(ramp_vals, True, no_ramps, H0, Hq1, wq1_t)
no_loss = True
exp_vals = []
ramp_exp_arr = []
ramp_dt_array2D = np.empty((0, len(times)))
for i in range(no_ramps):
t_pulse = ramp_vals[i][:]
# time dependent Hamiltonian
H_t = [H0, [Hq1, wq1_t]]
# Evolving the system with/without thermal losses
if no_loss == True:
output = mesolve(H_t, psi0, times, [], [])
else:
output = mesolve(H_t, psi0, times, c_ops, [])
exp_vals = np.real(expect(n1, output.states))
exp_val = np.mean(exp_vals[t_idx:-1])
ramp_dt_array2D = np.append(ramp_dt_array2D, [exp_vals], axis=0)
ramp_exp_arr.append(exp_val)
# progress bar
showProgress(i, no_ramps)
printShape(ramp_dt_array2D)
```
### { Case II } <u>Thermal losses</u>: Evaluating the excited state population at the <mark>end of ramp down</mark> of the pulse. The excited state population is studied for a range of different ramp pulses.
```
no_loss = False
exp_valsi = []
ramp_exp_arri = []
ramp_dt_array2Di = np.empty((0, len(times)))
for i in range(no_ramps):
t_pulse = ramp_vals[i][:]
# time dependent Hamiltonian
H_t = [H0, [Hq1, wq1_t]]
# Evolving the system with/without thermal losses
if no_loss == True:
output = mesolve(H_t, psi0, times, [], [])
else:
output = mesolve(H_t, psi0, times, c_ops, [])
exp_valsi = np.real(expect(n1, output.states))
exp_vali = np.mean(exp_valsi[t_idx:-1])
ramp_dt_array2Di = np.append(ramp_dt_array2Di, [exp_valsi], axis=0)
ramp_exp_arri.append(exp_vali)
# progress bar
showProgress(i, no_ramps)
printShape(ramp_dt_array2Di)
```
### 5.1 Plotting the result obtained for different ramp times <mark>without thermal losses</mark>
```
def plotForVariousRamps(rampList, times, ramp_exp_arr, t_eval):
"""
Plots the variation in the excitation probability as a function of times and ramp up/down times
@param rampList :: array of times by which the ramp time is increased
@param times :: array of times at which H is evaluated
@praem ramp_dt_array2D :: 2D array of occupation probabilities resulting for evaluating at various ramp times
"""
fig, ax = plt.subplots(1, 2, figsize=(11,4))
ax[0].plot(rampList, ramp_exp_arr, 'k.-', markerfacecolor='r', markeredgecolor='r', markersize=8)
setXYlabel(ax[0], r'Ramp times $t$ (ns)', 'Excited population', True, '%d cavity'%(numF) )
Colors_ = ['r', 'b', 'g', '#ffd500']
for i,j in enumerate([0, findIndex(rampList, 18.3), findIndex(rampList, 36.7), findIndex(rampList, 55)]):
ax[1].hlines(ramp_exp_arr[j], times[t_eval], times[-1], color=Colors_[i], linewidth=2.5, label=r'$\Delta t =$ %.2f'%rampList[j])
ax[1].legend()
setXYlabel(ax[1], 'Times (ns)', 'Final occupation probabilty', False, 'Occupation probabilty vs times for various ramps\n' )
fig.tight_layout()
return
def plot3Dramps(rampList, times, ramp_dt_array2D):
"""
3D plot of the variation in the excitation probability as a function of times and ramp up/down times
@param rampList :: array of times by which the ramp time is increased
@param times :: array of times at which H is evaluated
@praem ramp_dt_array2D :: 2D array of occupation probabilities resulting for evaluating at various ramp times
"""
fig = plt.figure(figsize=(12,7))
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(rampList, times)
surf = ax.plot_surface(X, Y, np.transpose(ramp_dt_array2D), rstride=1, cstride=1, cmap=cm.gist_heat, linewidth=1, antialiased=False)
#surf2 = ax.plot_wireframe(X, Y, np.transpose(ramp_dt_array2D), rstride=40, cstride=40, color='k', linewidth=0.5)
# Add a color bar, axis properties
fig.colorbar(surf, shrink=0.5, aspect=10)
ax.set_xlabel('\nRamp times' + r'$\ \Delta t$ (ns)')
ax.set_ylabel('\nTime (ns)')
ax.set_zlabel('\nOccupation Probabilities');
ax.set_title(labelTimes(tr, th))
ax.view_init(16, 25)
plt.show()
return
def FourierTransformOf(rampList, ramp_exp_arr):
"""
Function calculates the Fourier Transform of the input x-y data
@param rampLish :: x-values e.g. array of times
@param ramp_exp_arr :: real valued array whose FFT is calculated
@returns freq_arr :: x-vales in the freqeuncy domain
power :: Fourier transformed values of input ramp_exp_arr
"""
# fft of ram_exp_arr
ramp_FFT = np.fft.rfft(ramp_exp_arr)
power = np.real(ramp_FFT)*np.real(ramp_FFT)+np.imag(ramp_FFT)*np.imag(ramp_FFT)
# generating the FFT frequency array
start_pt = 1/rampList[-1]
freq_arr = np.linspace(start_pt, start_pt*len(power), len(power))
return freq_arr, power
def plotFFT(ax, rampList, ramp_exp_arr):
"""
Function finds the peaks in the FFT spectrum and plots the results
@param rampList :: x-vales e.g. array of times
@param ramp_exp_arr :: real valued array whose FFT is calculated
"""
rampList_FFT, ramp_exp_arr_FFT = FourierTransformOf(rampList, ramp_exp_arr)
# find peaks
peak, _ = find_peaks(ramp_exp_arr_FFT, distance=100)
# plot
ax.plot(rampList_FFT[1:], ramp_exp_arr_FFT[1:], color='#d97829', linestyle=':', marker= '.', markersize=8)
ax.plot(rampList_FFT[peak], ramp_exp_arr_FFT[peak], 'ro')
setXYlabel(ax, 'Frequency (GHz)', r'$\mathcal{F}\ [n_1]:$ 1 cavity', True, '(x, y) = (%.1f, %.2f)'%(ramp_exp_arr_FFT[peak], rampList_FFT[peak]))
fig.tight_layout()
return ramp_exp_arr_FFT[peak], rampList_FFT[peak]
def printResults(y, x):
print(' Power value: ', y)
print(' Frequency value: ', x)
return
plotForVariousRamps(rampList, times, ramp_exp_arr, t_idx)
```
Plotting the Fourier Transform of the above plot showing Excited population as a function of Ramp times (ns). The plot below helps to summarize the shift between slow and fast modes.
```
fig, ax = plt.subplots(1, 2, figsize=(8,4))
br_pt = 20
yf_peak, xf_peak = plotFFT(ax[0], rampList[:findIndex(rampList, br_pt)], ramp_exp_arr[:findIndex(rampList, br_pt)])
ax[0].set_xlim(0.01, 1.5)
yf_peak1, xf_peak1 = plotFFT(ax[1], rampList[findIndex(rampList, br_pt+5):], ramp_exp_arr[findIndex(rampList, br_pt+5):])
ax[1].set_xlim(0, 0.5)
print('Small ramp times (t<%.2f):'%br_pt)
printResults(yf_peak, xf_peak)
print('\nLarge ramp tines (t>%.2f):'%(br_pt+5))
printResults(yf_peak1, xf_peak1)
```
3D plot summing up the above two plots.
```
plot3Dramps(rampList, times, ramp_dt_array2D)
```
### 5.2 Plotting the result obtained for different ramp times <mark>with thermal losses</mark>
```
plotForVariousRamps(rampList, times, ramp_exp_arri, t_idx)
plot3Dramps(rampList, times, ramp_dt_array2Di)
```
## 6. Calculating the Fidelity and Concurrence
```
# extract the final state from the result of the simulation
rho_final = res1.states[-1]
# trace out the resonator mode and print the two-qubit density matrix
rho_qubits = ptrace(rho_final, [1, 2])
rho_qubits
# compare to the ideal result of the sqrtiswap gate (plus phase correction) for the current initial state
rho_qubits_ideal = ket2dm(tensor(phasegate(0), phasegate(-pi/2)) * sqrtiswap() * tensor(basis(2,0), basis(2,1)))
rho_qubits_ideal
print('Fidelity = ', fidelity(rho_qubits, rho_qubits_ideal))
print('Concurrence = ', concurrence(rho_qubits))
```
| true |
code
| 0.721204 | null | null | null | null |
|
# Think Bayes: Chapter 9
This notebook presents code and exercises from Think Bayes, second edition.
Copyright 2016 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
```
from __future__ import print_function, division
% matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import math
import numpy as np
from thinkbayes2 import Pmf, Cdf, Suite, Joint
import thinkplot
```
## Improving Reading Ability
From DASL(http://lib.stat.cmu.edu/DASL/Stories/ImprovingReadingAbility.html)
> An educator conducted an experiment to test whether new directed reading activities in the classroom will help elementary school pupils improve some aspects of their reading ability. She arranged for a third grade class of 21 students to follow these activities for an 8-week period. A control classroom of 23 third graders followed the same curriculum without the activities. At the end of the 8 weeks, all students took a Degree of Reading Power (DRP) test, which measures the aspects of reading ability that the treatment is designed to improve.
> Summary statistics on the two groups of children show that the average score of the treatment class was almost ten points higher than the average of the control class. A two-sample t-test is appropriate for testing whether this difference is statistically significant. The t-statistic is 2.31, which is significant at the .05 level.
I'll use Pandas to load the data into a DataFrame.
```
import pandas as pd
df = pd.read_csv('drp_scores.csv', skiprows=21, delimiter='\t')
df.head()
```
And use `groupby` to compute the means for the two groups.
```
grouped = df.groupby('Treatment')
for name, group in grouped:
print(name, group.Response.mean())
```
The `Normal` class provides a `Likelihood` function that computes the likelihood of a sample from a normal distribution.
```
from scipy.stats import norm
class Normal(Suite, Joint):
def Likelihood(self, data, hypo):
"""
data: sequence of test scores
hypo: mu, sigma
"""
mu, sigma = hypo
likes = norm.pdf(data, mu, sigma)
return np.prod(likes)
```
The prior distributions for `mu` and `sigma` are uniform.
```
mus = np.linspace(20, 80, 101)
sigmas = np.linspace(5, 30, 101)
```
I use `itertools.product` to enumerate all pairs of `mu` and `sigma`.
```
from itertools import product
control = Normal(product(mus, sigmas))
data = df[df.Treatment=='Control'].Response
control.Update(data)
```
After the update, we can plot the probability of each `mu`-`sigma` pair as a contour plot.
```
thinkplot.Contour(control, pcolor=True)
thinkplot.Config(xlabel='mu', ylabel='sigma')
```
And then we can extract the marginal distribution of `mu`
```
pmf_mu0 = control.Marginal(0)
thinkplot.Pdf(pmf_mu0)
thinkplot.Config(xlabel='mu', ylabel='Pmf')
```
And the marginal distribution of `sigma`
```
pmf_sigma0 = control.Marginal(1)
thinkplot.Pdf(pmf_sigma0)
thinkplot.Config(xlabel='sigma', ylabel='Pmf')
```
**Exercise:** Run this analysis again for the control group. What is the distribution of the difference between the groups? What is the probability that the average "reading power" for the treatment group is higher? What is the probability that the variance of the treatment group is higher?
```
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# It looks like there is a high probability that the mean of
# the treatment group is higher, and the most likely size of
# the effect is 9-10 points.
# It looks like the variance of the treated group is substantially
# smaller, which suggests that the treatment might be helping
# low scorers more than high scorers.
```
## Paintball
Suppose you are playing paintball in an indoor arena 30 feet
wide and 50 feet long. You are standing near one of the 30 foot
walls, and you suspect that one of your opponents has taken cover
nearby. Along the wall, you see several paint spatters, all the same
color, that you think your opponent fired recently.
The spatters are at 15, 16, 18, and 21 feet, measured from the
lower-left corner of the room. Based on these data, where do you
think your opponent is hiding?
Here's the Suite that does the update. It uses `MakeLocationPmf`,
defined below.
```
class Paintball(Suite, Joint):
"""Represents hypotheses about the location of an opponent."""
def __init__(self, alphas, betas, locations):
"""Makes a joint suite of parameters alpha and beta.
Enumerates all pairs of alpha and beta.
Stores locations for use in Likelihood.
alphas: possible values for alpha
betas: possible values for beta
locations: possible locations along the wall
"""
self.locations = locations
pairs = [(alpha, beta)
for alpha in alphas
for beta in betas]
Suite.__init__(self, pairs)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: pair of alpha, beta
data: location of a hit
Returns: float likelihood
"""
alpha, beta = hypo
x = data
pmf = MakeLocationPmf(alpha, beta, self.locations)
like = pmf.Prob(x)
return like
def MakeLocationPmf(alpha, beta, locations):
"""Computes the Pmf of the locations, given alpha and beta.
Given that the shooter is at coordinates (alpha, beta),
the probability of hitting any spot is inversely proportionate
to the strafe speed.
alpha: x position
beta: y position
locations: x locations where the pmf is evaluated
Returns: Pmf object
"""
pmf = Pmf()
for x in locations:
prob = 1.0 / StrafingSpeed(alpha, beta, x)
pmf.Set(x, prob)
pmf.Normalize()
return pmf
def StrafingSpeed(alpha, beta, x):
"""Computes strafing speed, given location of shooter and impact.
alpha: x location of shooter
beta: y location of shooter
x: location of impact
Returns: derivative of x with respect to theta
"""
theta = math.atan2(x - alpha, beta)
speed = beta / math.cos(theta)**2
return speed
```
The prior probabilities for `alpha` and `beta` are uniform.
```
alphas = range(0, 31)
betas = range(1, 51)
locations = range(0, 31)
suite = Paintball(alphas, betas, locations)
suite.UpdateSet([15, 16, 18, 21])
```
To visualize the joint posterior, I take slices for a few values of `beta` and plot the conditional distributions of `alpha`. If the shooter is close to the wall, we can be somewhat confident of his position. The farther away he is, the less certain we are.
```
locations = range(0, 31)
alpha = 10
betas = [10, 20, 40]
thinkplot.PrePlot(num=len(betas))
for beta in betas:
pmf = MakeLocationPmf(alpha, beta, locations)
pmf.label = 'beta = %d' % beta
thinkplot.Pdf(pmf)
thinkplot.Config(xlabel='Distance',
ylabel='Prob')
```
Here are the marginal posterior distributions for `alpha` and `beta`.
```
marginal_alpha = suite.Marginal(0, label='alpha')
marginal_beta = suite.Marginal(1, label='beta')
print('alpha CI', marginal_alpha.CredibleInterval(50))
print('beta CI', marginal_beta.CredibleInterval(50))
thinkplot.PrePlot(num=2)
thinkplot.Cdf(Cdf(marginal_alpha))
thinkplot.Cdf(Cdf(marginal_beta))
thinkplot.Config(xlabel='Distance',
ylabel='Prob')
```
To visualize the joint posterior, I take slices for a few values of `beta` and plot the conditional distributions of `alpha`. If the shooter is close to the wall, we can be somewhat confident of his position. The farther away he is, the less certain we are.
```
betas = [10, 20, 40]
thinkplot.PrePlot(num=len(betas))
for beta in betas:
cond = suite.Conditional(0, 1, beta)
cond.label = 'beta = %d' % beta
thinkplot.Pdf(cond)
thinkplot.Config(xlabel='Distance',
ylabel='Prob')
```
Another way to visualize the posterio distribution: a pseudocolor plot of probability as a function of `alpha` and `beta`.
```
thinkplot.Contour(suite.GetDict(), contour=False, pcolor=True)
thinkplot.Config(xlabel='alpha',
ylabel='beta',
axis=[0, 30, 0, 20])
```
Here's another visualization that shows posterior credible regions.
```
d = dict((pair, 0) for pair in suite.Values())
percentages = [75, 50, 25]
for p in percentages:
interval = suite.MaxLikeInterval(p)
for pair in interval:
d[pair] += 1
thinkplot.Contour(d, contour=False, pcolor=True)
thinkplot.Text(17, 4, '25', color='white')
thinkplot.Text(17, 15, '50', color='white')
thinkplot.Text(17, 30, '75')
thinkplot.Config(xlabel='alpha',
ylabel='beta',
legend=False)
```
**Exercise:** From [John D. Cook](http://www.johndcook.com/blog/2010/07/13/lincoln-index/)
"Suppose you have a tester who finds 20 bugs in your program. You want to estimate how many bugs are really in the program. You know there are at least 20 bugs, and if you have supreme confidence in your tester, you may suppose there are around 20 bugs. But maybe your tester isn't very good. Maybe there are hundreds of bugs. How can you have any idea how many bugs there are? There’s no way to know with one tester. But if you have two testers, you can get a good idea, even if you don’t know how skilled the testers are.
Suppose two testers independently search for bugs. Let k1 be the number of errors the first tester finds and k2 the number of errors the second tester finds. Let c be the number of errors both testers find. The Lincoln Index estimates the total number of errors as k1 k2 / c [I changed his notation to be consistent with mine]."
So if the first tester finds 20 bugs, the second finds 15, and they find 3 in common, we estimate that there are about 100 bugs. What is the Bayesian estimate of the number of errors based on this data?
```
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
```
**Exercise:** The GPS problem. According to [Wikipedia]()

> GPS included a (currently disabled) feature called Selective Availability (SA) that adds intentional, time varying errors of up to 100 meters (328 ft) to the publicly available navigation signals. This was intended to deny an enemy the use of civilian GPS receivers for precision weapon guidance.
> [...]
> Before it was turned off on May 2, 2000, typical SA errors were about 50 m (164 ft) horizontally and about 100 m (328 ft) vertically.[10] Because SA affects every GPS receiver in a given area almost equally, a fixed station with an accurately known position can measure the SA error values and transmit them to the local GPS receivers so they may correct their position fixes. This is called Differential GPS or DGPS. DGPS also corrects for several other important sources of GPS errors, particularly ionospheric delay, so it continues to be widely used even though SA has been turned off. The ineffectiveness of SA in the face of widely available DGPS was a common argument for turning off SA, and this was finally done by order of President Clinton in 2000.
Suppose it is 1 May 2000, and you are standing in a field that is 200m square. You are holding a GPS unit that indicates that your location is 51m north and 15m west of a known reference point in the middle of the field.
However, you know that each of these coordinates has been perturbed by a "feature" that adds random errors with mean 0 and standard deviation 30m.
1) After taking one measurement, what should you believe about your position?
Note: Since the intentional errors are independent, you could solve this problem independently for X and Y. But we'll treat it as a two-dimensional problem, partly for practice and partly to see how we could extend the solution to handle dependent errors.
You can start with the code in gps.py.
2) Suppose that after one second the GPS updates your position and reports coordinates (48, 90). What should you believe now?
3) Suppose you take 8 more measurements and get:
(11.903060613102866, 19.79168669735705)
(77.10743601503178, 39.87062906535289)
(80.16596823095534, -12.797927542984425)
(67.38157493119053, 83.52841028148538)
(89.43965206875271, 20.52141889230797)
(58.794021026248245, 30.23054016065644)
(2.5844401241265302, 51.012041625783766)
(45.58108994142448, 3.5718287379754585)
At this point, how certain are you about your location?
```
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
```
**Exercise:** [The Flea Beetle problem from DASL](http://lib.stat.cmu.edu/DASL/Datafiles/FleaBeetles.html)
Datafile Name: Flea Beetles
Datafile Subjects: Biology
Story Names: Flea Beetles
Reference: Lubischew, A.A. (1962) On the use of discriminant functions in taxonomy. Biometrics, 18, 455-477. Also found in: Hand, D.J., et al. (1994) A Handbook of Small Data Sets, London: Chapman & Hall, 254-255.
Authorization: Contact Authors
Description: Data were collected on the genus of flea beetle Chaetocnema, which contains three species: concinna (Con), heikertingeri (Hei), and heptapotamica (Hep). Measurements were made on the width and angle of the aedeagus of each beetle. The goal of the original study was to form a classification rule to distinguish the three species.
Number of cases: 74
Variable Names:
Width: The maximal width of aedeagus in the forpart (in microns)
Angle: The front angle of the aedeagus (1 unit = 7.5 degrees)
Species: Species of flea beetle from the genus Chaetocnema
Suggestions:
1. Plot CDFs for the width and angle data, broken down by species, to get a visual sense of whether the normal distribution is a good model.
2. Use the data to estimate the mean and standard deviation for each variable, broken down by species.
3. Given a joint posterior distribution for `mu` and `sigma`, what is the likelihood of a given datum?
4. Write a function that takes a measured width and angle and returns a posterior PMF of species.
5. Use the function to classify each of the specimens in the table and see how many you get right.
```
import pandas as pd
df = pd.read_csv('flea_beetles.csv', delimiter='\t')
df.head()
# Solution goes here
```
| true |
code
| 0.742791 | null | null | null | null |
|
```
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
#load dataset into the notebook
data = pd.read_csv('titanic.csv')
data.head()
#get all coumns in small caps
data.columns.str.lower()
#lets look the mean of survival using gender
data.groupby('Sex')[['Survived']].mean()
#here we see that the survival rate for females was hire than that of men
#this shows that about 20% of men survived and 75% of females survived
#lets group them further by class
data.groupby(['Sex','Pclass'])[['Survived']].mean().unstack()
# this shows tha most females that survived outcomes survival of mens from both
# first , second and third classses
# It also show that most of people in the first class
# survival rate was hire than the restt
# the survival reduces as you move from first class to third class
#the above can also be written using pivot_table function as shown below
data.pivot_table('Survived', index='Sex', columns='Pclass')
#let then check the survival using
#we will group into different ages i.e 0 to 18 , 18 to 35 and 35 to 80
age = pd.cut(data['Age'], [0, 18,35, 80])
data.pivot_table('Survived', ['Sex', age], 'Pclass').unstack()
#The results also shows similar results to girl child in the first class
#as the survival as also in over 90% unlike boychild
#but for boychild in the ages between 0-18 their survival was abit high
#it also shows people of age 35 to 80 in both genders in the third class
#did not survived many like for mens its alsmost everyone died
#lets now compute the total for each class survival using margin keyword in the pivod table function
data.pivot_table('Survived', index='Sex', columns='Pclass', margins=True)
#the females were the ost survived unlike males
#First class people survived more than the others members
#the rate of survival generally was abit low about 40%
```
## Analysis of The Dataset
```
import copy
import warnings
warnings.filterwarnings('ignore')
```
## Description for Columns in the dataset
- survival - Survival (0 = No; 1 = Yes)
- class - Passenger Class (1 = 1st; 2 = 2nd; 3 = 3rd)
- name - Name
- sex - Sex
- age - Age
- sibsp - Number of Siblings/Spouses Aboard
- parch - Number of Parents/Children Aboard
- ticket - Ticket Number
- fare - Passenger Fare
- cabin - Cabin
- embarked - Port of Embarkation (C = Cherbourg; Q = Queenstown; S = Southampton)
- boat - Lifeboat (if survived)
- body - Body number (if did not survive and body was recovered)
### This May also help : https://data.world/nrippner/titanic-disaster-dataset
```
# create new df
df = copy.copy(data)
df.shape
#check sample data
df.head()
#checking information from the df
df.info()
```
## Findings..
- Age , Cabin has nulls data
- It has 891 entries
- has fields with object data type ...Need to be cleaned to correct types
- 12 columns are in the df
```
#checking stats information about the numericals
df.describe().T
```
## Findings ..
- Looking on real Data ..we have columns like Survived , Age , sibsp , parch , fare
- The Age , sibsp , parch , fare seems to be unevenly distributed by checking on quartiles
## Checking THe Data Quality
```
#We gonna check the percentage of nulls in each column field.
nulls_sums = df.isna().sum()
percent_nulls = nulls_sums /(len(df))
percent_nulls
```
## Findings ..
- Cabin has 77.1% , Age has 19.7% and Embarked has 0.225% nulss
- Since Cabin Has very high amount of nulls , i will drop the column n from the df
- For Age , I will use median to replace the nulls since it is not a good idea to remove this either row_wise or column as it will affect the data greatly
- For embarked i will drop the rows with nulls as they are small
```
#remove the cabin col
df.drop('Cabin' , axis = 1 , inplace = True)
#fill nulls with median or mean for age
age_median = df['Age'].median(skipna = True)
df['Age'].fillna(age_median, inplace = True)
#drop the rows with nulls for embarked
#will use boolean to filter out nulls
df = df[df['Embarked'].isna() != True]
df.shape
# create a copy of df
df1 = df.copy()
df1.shape , df.shape
```
## Detecting Outliers
- Will Use boxplot
```
plt.figure(figsize = (14, 7))
# create a one row with for cols for four plots
plt.subplot(1,4,1)
# Draw for age
sns.boxplot(y= df['Age'])
plt.title("CHeck age outliers")
# for fare
plt.subplot(1,4,2)
sns.boxplot(y= df['Fare'])
plt.title("CHeck Fare outliers")
# siblings
plt.subplot(1,4,3)
sns.boxplot(y= df['SibSp'])
plt.title("CHeck sibsp outliers")
# for childres
plt.subplot(1,4,4)
sns.boxplot(y= df['Parch'])
plt.title("CHeck Parch outliers")
```
## Findings
- From the above 4 attributes we get all has outliers ...as there are many whiskers outside range
- Fare is the one with most outliers
```
#Lets now check survival rate with regard to siblings
sns.catplot(x = 'SibSp' , col = "Survived" , data = df , kind = 'count')
```
## Findings ...
- Mostof who survived were those that were single siblings aboard
- The rest many of then never survived
```
#Lets now check survival rate with regard to parents abord
sns.catplot(x = 'Parch' , col = "Survived" , data = df , kind = 'count')
```
## Findings ...
- Single parents also Survived most
- From The above two plots.
**WE can conclude that parch and sibsp shows whether a sibling is accompanied by parent or not**
- I will Merge the two cols labels(1 or 0) to see if a single person is with another one else
```
#if you add sibsp and parch and is over 0 , return 1 else zero
def checkAccopany(x):
if (x['Parch'] + x['SibSp'] > 0):
return 1
else:
return 0
# create the new merged col
df['is_Accompanied'] = df.apply(checkAccopany , axis = 1)
df.head()
#use survival and new is_accompanied col to check
sns.catplot(x = 'is_Accompanied' , col = "Survived" , data = df , kind = 'count')
```
## Findings
- Those who were not accompanied mostly perished more than those accompanied
- Those who were accompanied survived more than the other ones.
```
#now checking about fare..
#i will use distplot...shows distribution and histgrams combined
plt.figure(figsize = (12 , 7))
sns.distplot(df['Fare'])
plt.title("Fare Distributiron")
```
## Findings ...
- The fare is more skewed to the right. (more data is on the right)
- The skewness need to be removed... can use logs to standard it.
```
#using log function to try balance skewness
plt.figure(figsize = (12 , 7))
sns.distplot(df['Fare'].map(lambda x: np.log(x) if x >0 else 0))
plt.title(" Logarithmic Fare Distributiron")
```
## We have made the data be less skewed for uniformity..
- The fare column can now be replaced with log values since is more uniform
```
#perform logs to fare col
df['Fare'] = df['Fare'].map(lambda x: np.log(x) if x >0 else 0)
## LEts now check sex and class distribution using survival
sns.catplot(x = 'Sex' ,y = "Survived" , data = df , col = 'Pclass', kind = 'bar')
```
## Findings...
- Females were likely to have survived most.
- Those in the first class also survived more
```
#using embarked
sns.catplot(x = 'Sex' ,y = "Survived" , data = df , col = 'Embarked', kind = 'bar')
```
## Findings ..
- Those who boarded from Port Label **C** are likely to have Survived more than others
```
##Cecking Age survival rates
plt.figure(figsize = (12 , 6))
sns.distplot(df['Age'])
```
## Most of the people aboarded were likely to be most of agegroup 20 --- 40
```
#Checking survival based on ages
# 1. Those who survived
plt.figure(figsize = (12 , 6))
sns.distplot(df[df['Survived'] == 1]['Age'])
plt.title("Distribution Of Survived")
```
## Those wilth less than 60 years were most likely to survive.
- greater chances of survival was on btween 30 and 35 years
```
#Checking survival based on ages
# 1. Those who didn't survived
plt.figure(figsize = (12 , 6))
sns.distplot(df[df['Survived'] == 0]['Age'])
plt.title("Distribution Of who did not Survived")
```
## AGed were likely not to survive.
- Its skewed to left.. more of aged did not survive
```
##Survival based on fare..
sns.boxplot(x = 'Survived' , y = 'Fare' , data = df)
```
## __Most Survived are likely to have paid more fare.
- Those who survived have a mean fare greater than non survived
| true |
code
| 0.394376 | null | null | null | null |
|
# Read datasets
```
import pandas as pd
countries_of_the_world = pd.read_csv('../datasets/countries-of-the-world.csv')
countries_of_the_world.head()
mpg = pd.read_csv('../datasets/mpg.csv')
mpg.head()
student_data = pd.read_csv('../datasets/student-alcohol-consumption.csv')
student_data.head()
young_people_survey_data = pd.read_csv('../datasets/young-people-survey-responses.csv')
young_people_survey_data.head()
import matplotlib.pyplot as plt
import seaborn as sns
```
# Count plots
In this exercise, we'll return to exploring our dataset that contains the responses to a survey sent out to young people. We might suspect that young people spend a lot of time on the internet, but how much do they report using the internet each day? Let's use a count plot to break down the number of survey responses in each category and then explore whether it changes based on age.
As a reminder, to create a count plot, we'll use the catplot() function and specify the name of the categorical variable to count (x=____), the Pandas DataFrame to use (data=____), and the type of plot (kind="count").
Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt.
```
survey_data = young_people_survey_data
# Create count plot of internet usage
sns.catplot(x="Internet usage", data=survey_data, kind="count")
# Show plot
plt.show()
# Change the orientation of the plot
sns.catplot(y="Internet usage", data=survey_data,
kind="count")
# Show plot
plt.show()
survey_data["Age Category"] = ['Less than 21' if x < 21 else '21+' for x in survey_data['Age']]
# Create column subplots based on age category
sns.catplot(y="Internet usage",
data=survey_data,
kind="count",
col="Age Category")
# Show plot
plt.show()
```
# Bar plots with percentages
Let's continue exploring the responses to a survey sent out to young people. The variable "Interested in Math" is True if the person reported being interested or very interested in mathematics, and False otherwise. What percentage of young people report being interested in math, and does this vary based on gender? Let's use a bar plot to find out.
As a reminder, we'll create a bar plot using the catplot() function, providing the name of categorical variable to put on the x-axis (x=____), the name of the quantitative variable to summarize on the y-axis (y=____), the Pandas DataFrame to use (data=____), and the type of categorical plot (kind="bar").
Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt.
```
survey_data["Interested in Math"] = [True if x > 3 else False for x in survey_data['Mathematics']]
# Create a bar plot of interest in math, separated by gender
sns.catplot(x="Gender",
y="Interested in Math",
data=survey_data,
kind="bar")
# Show plot
plt.show()
```
# Customizing bar plots
In this exercise, we'll explore data from students in secondary school. The "study_time" variable records each student's reported weekly study time as one of the following categories: "<2 hours", "2 to 5 hours", "5 to 10 hours", or ">10 hours". Do students who report higher amounts of studying tend to get better final grades? Let's compare the average final grade among students in each category using a bar plot.
Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt.
```
# Create bar plot of average final grade in each study category
sns.catplot(x="study_time",
y="G3",
data=student_data,
kind="bar")
# Show plot
plt.show()
# Rearrange the categories
sns.catplot(x="study_time", y="G3",
data=student_data,
kind="bar",
order=["<2 hours",
"2 to 5 hours",
"5 to 10 hours",
">10 hours"])
# Show plot
plt.show()
# Turn off the confidence intervals
sns.catplot(x="study_time", y="G3",
data=student_data,
kind="bar",
order=["<2 hours",
"2 to 5 hours",
"5 to 10 hours",
">10 hours"],
ci=None)
# Show plot
plt.show()
```
# Create and interpret a box plot
Let's continue using the student_data dataset. In an earlier exercise, we explored the relationship between studying and final grade by using a bar plot to compare the average final grade ("G3") among students in different categories of "study_time".
In this exercise, we'll try using a box plot look at this relationship instead. As a reminder, to create a box plot you'll need to use the catplot() function and specify the name of the categorical variable to put on the x-axis (x=____), the name of the quantitative variable to summarize on the y-axis (y=____), the Pandas DataFrame to use (data=____), and the type of plot (kind="box").
We have already imported matplotlib.pyplot as plt and seaborn as sns.
```
# Specify the category ordering
study_time_order = ["<2 hours", "2 to 5 hours",
"5 to 10 hours", ">10 hours"]
# Create a box plot and set the order of the categories
sns.catplot(x="study_time",
y="G3",
data=student_data,
kind='box',
order=study_time_order)
# Show plot
plt.show()
```
## Question
Which of the following is a correct interpretation of this box plot?
Possible Answers: The median grade among students studying less than 2 hours is 10.0.
# Omitting outliers
Now let's use the student_data dataset to compare the distribution of final grades ("G3") between students who have internet access at home and those who don't. To do this, we'll use the "internet" variable, which is a binary (yes/no) indicator of whether the student has internet access at home.
Since internet may be less accessible in rural areas, we'll add subgroups based on where the student lives. For this, we can use the "location" variable, which is an indicator of whether a student lives in an urban ("Urban") or rural ("Rural") location.
Seaborn has already been imported as sns and matplotlib.pyplot has been imported as plt. As a reminder, you can omit outliers in box plots by setting the sym parameter equal to an empty string ("").
```
# Create a box plot with subgroups and omit the outliers
sns.catplot(x="internet",
y="G3",
data=student_data,
kind='box',
hue="location",
sym="")
# Show plot
plt.show()
```
# Adjusting the whiskers
In the lesson we saw that there are multiple ways to define the whiskers in a box plot. In this set of exercises, we'll continue to use the student_data dataset to compare the distribution of final grades ("G3") between students who are in a romantic relationship and those that are not. We'll use the "romantic" variable, which is a yes/no indicator of whether the student is in a romantic relationship.
Let's create a box plot to look at this relationship and try different ways to define the whiskers.
We've already imported Seaborn as sns and matplotlib.pyplot as plt.
```
# Extend the whiskers to the 5th and 95th percentile
sns.catplot(x="romantic", y="G3",
data=student_data,
kind="box",
whis=0.5)
# Show plot
plt.show()
# Extend the whiskers to the 5th and 95th percentile
sns.catplot(x="romantic", y="G3",
data=student_data,
kind="box",
whis=[5, 95])
# Show plot
plt.show()
# Set the whiskers at the min and max values
sns.catplot(x="romantic", y="G3",
data=student_data,
kind="box",
whis=[0, 100])
# Show plot
plt.show()
```
# Customizing point plots
Let's continue to look at data from students in secondary school, this time using a point plot to answer the question: does the quality of the student's family relationship influence the number of absences the student has in school? Here, we'll use the "famrel" variable, which describes the quality of a student's family relationship from 1 (very bad) to 5 (very good).
As a reminder, to create a point plot, use the catplot() function and specify the name of the categorical variable to put on the x-axis (x=____), the name of the quantitative variable to summarize on the y-axis (y=____), the Pandas DataFrame to use (data=____), and the type of categorical plot (kind="point").
We've already imported Seaborn as sns and matplotlib.pyplot as plt.
```
# Create a point plot of family relationship vs. absences
sns.catplot(x="famrel", y="absences",
data=student_data,
kind="point")
# Show plot
plt.show()
# Add caps to the confidence interval
sns.catplot(x="famrel", y="absences",
data=student_data,
kind="point",
capsize=0.2)
# Show plot
plt.show()
# Remove the lines joining the points
sns.catplot(x="famrel", y="absences",
data=student_data,
kind="point",
capsize=0.2,
join=False)
# Show plot
plt.show()
```
# Point plots with subgroups
Let's continue exploring the dataset of students in secondary school. This time, we'll ask the question: is being in a romantic relationship associated with higher or lower school attendance? And does this association differ by which school the students attend? Let's find out using a point plot.
We've already imported Seaborn as sns and matplotlib.pyplot as plt.
Use sns.catplot() and the student_data DataFrame to create a point plot with relationship status ("romantic") on the x-axis and number of absences ("absences") on the y-axis. Create subgroups based on the school that they attend ("school")
```
# Create a point plot with subgroups
sns.catplot(x="romantic",
y="absences",
data=student_data,
kind="point",
hue="school")
# Show plot
plt.show()
# Turn off the confidence intervals for this plot
sns.catplot(x="romantic", y="absences",
data=student_data,
kind="point",
hue="school",
ci=None)
# Show plot
plt.show()
# Import median function from numpy
from numpy import median
# Plot the median number of absences instead of the mean
sns.catplot(x="romantic", y="absences",
data=student_data,
kind="point",
hue="school",
ci=None, estimator=median)
# Show plot
plt.show()
```
| true |
code
| 0.6969 | null | null | null | null |
|
# Preprocessing
To begin the training process, the raw images first had to be preprocessed. For the most part, this meant removing the banners that contained image metadata while retaining as much useful image data as possible. To remove the banners, I used a technique called "reflective padding" which meant I remove the banner region, then pad the edges with its own reflection. An example of this is shown here:
In order to remove the banners, however, they must first be detected. This was done using kernels in OpenCV to detect vertical and horizontal lines within the image. For instance, let's say you start with this image:
```
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
file = '../data/Raw_Data/Particles/L2_000b4469b73e3fb3558d20b33b91fcb0.jpg'
img = mpimg.imread(file)
fig, ax = plt.subplots(1, 1, figsize=(10,10))
ax.set_axis_off()
ax.imshow(img)
```
The first step would be to create a binary mask of the image where all pixels above a threshold becomes 255 and all pixels below the threshold becomes 0. Since the banners in our images are mostly white, the threshold value chosen was 250. This is to ensure it is mostly only the banner that is left in the mask.
```
import cv2
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # binarization only works if the image is first converted to greyscale
ret, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY) # binarize the image using 250 as the threshold value
fig, ax = plt.subplots(1, 1, figsize=(10,10))
ax.set_axis_off()
ax.imshow(thresh)
```
Next, use [erosion and dilation](https://docs.opencv.org/2.4/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html) to find where the vertical and horizontal lines are within the image. By successively replacing pixels with the minimum (erosion) then maximum value (dilation) over the area of a kernel, largely vertical regions of the image are maintained using a tall thin kernel while a short long kernel mantains the largely horizontal regions of the image.
```
# Find the verticle and horizontal lines in the image
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 13))
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 1))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
img_v = cv2.erode(thresh, verticle_kernel, iterations = 3)
vert_lines_img = cv2.dilate(img_v, verticle_kernel, iterations = 3)
img_h = cv2.erode(thresh, horizontal_kernel, iterations = 3)
hori_lines_img = cv2.dilate(img_h, horizontal_kernel, iterations = 3)
fig, ax = plt.subplots(1, 2, figsize=(20,20))
ax[0].set_axis_off()
ax[0].imshow(vert_lines_img)
ax[1].set_axis_off()
ax[1].imshow(hori_lines_img)
```
The two masks are then added together and a final erosion + binarization is performed on the inverted array to ensure we are left with a binary mask where pixel values of 0 indicate the banner region and pixel values of 255 indicate everywhere else.
```
img_add = cv2.addWeighted(vert_lines_img, 0.5, hori_lines_img, 0.5, 0.0)
img_final = cv2.erode(~img_add, kernel, iterations = 3)
ret, thresh2 = cv2.threshold(img_final, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
fig, ax = plt.subplots(1, 2, figsize=(20,20))
ax[0].set_axis_off()
ax[0].imshow(img_add)
ax[1].set_axis_off()
ax[1].imshow(thresh2)
img_final
```
| true |
code
| 0.649967 | null | null | null | null |
|
# Quantum Kernel Alignment with Qiskit Runtime
<br>
**Classification with Support Vector Machines**<br>
Classification problems are widespread in machine learning applications. Examples include credit card risk, handwriting recognition, and medical diagnosis. One approach to tackling classification problems is the support vector machine (SVM) [1,2]. This supervised learning algorithm uses labeled data samples to train a model that can predict to which class a test sample belongs. It does this by finding a separating hyperplane maximizing the margin between data classes. Often, data is not linearly separable in the original space. In these cases, the kernel trick is used to implicitly encode a transformation of the data into a higher-dimensional feature space, through the inner product between pairs of data points, where the data may become separable.
**Quantum Kernels**<br>
Quantum computers can be used to encode classical data in a quantum-enhanced feature space. In 2019, IBM introduced an algorithm called the quantum kernel estimator (QKE) for computing quantum kernels [3]. This algorithm uses quantum circuits with data provided classically and offers an efficient way to evaluate inner products between data in a quantum feature space. For two data samples $\theta$ and $\theta'$, the kernel matrix is given as
$$
K(\theta, \theta') = \lvert\langle 0^n \rvert U^\dagger(\theta) U(\theta') \lvert 0^n \rangle \rvert^2,
$$
where $U(\theta)$ prepares the quantum feature state. Quantum kernels used in a classification framework inherit the convex optimization program of the SVM and avoid common limitations of variational quantum classifiers. A key observation of this paper was that a necessary condition for a computational advantage requires quantum circuits for the kernel that are hard to simulate classically. More recently, IBM proved that quantum kernels can offer superpolynomial speedups over any classical learner on a learning problem based on the hardness of the discrete logarithm problem [4]. This means that quantum kernels can someday offer quantum advantage on suitable problems.
**Quantum Kernels that Exploit Structure in Data**<br>
An important approach in the search for practical quantum advantage in machine learning is to identify quantum kernels for learning problems that have underlying structure in the data. We've taken a step in this direction in our recent paper [5], where we introduced a broad class of quantum kernels that exploit group structure in data. Examples of learning problems for data with group structure could include learning permutations or classifying translations. We call this new class of kernels _covariant quantum kernels_ as they are related to covariant quantum measurements. The quantum feature map is defined by a unitary representation $D(\theta)$ of a group $G$ for some element $\theta \in G$, and a fiducial reference state $\lvert\psi\rangle = V\lvert0^n\rangle$ prepared by a unitary circuit $V$. The kernel matrix is given as
$$
K(\theta, \theta') = \vert\langle 0^n \rvert V^\dagger D^\dagger(\theta) D(\theta') V \lvert 0^n \rangle \rvert^2. \qquad (1)
$$
In general, the choice of the fiducial state is not known _a priori_ and can significantly impact the performance of the classifier. Here, we use a method called quantum kernel alignment (QKA) to find a good fiducial state for a given group.
**Aligning Quantum Kernels on a Dataset**<br>
In practice, SVMs require a choice of the kernel function. Sometimes, symmetries in the data can inform this selection, other times it is chosen in an ad hoc manner. Kernel alignment is one approach to learning a kernel on a given dataset by iteratively adapting it to have high similarity to a target kernel informed from the underlying data distribution [6]. As a result, the SVM with an aligned kernel will likely generalize better to new data than with an unaligned kernel. Using this concept, we introduced in [5] an algorithm for quantum kernel alignment, which provides a way to learn a quantum kernel from a family of kernels. Specifically, the algorithm optimizes the parameters in a quantum circuit to maximize the alignment of a kernel while converging to the maximum SVM margin. In the context of covariant quantum kernels, we extend Eq. $(1)$ to
$$
K_\lambda(\theta,\theta') = \lvert\langle 0^n \rvert V^\dagger_\lambda D^\dagger(\theta) D(\theta') V_\lambda \lvert 0^n \rangle \rvert^2, \qquad (2)
$$
and use QKA to learn a good fiducial state parametrized by $\lambda$ for a given group.
**Covariant Quantum Kernels on a Specific Learning Problem**<br>
Let's try out QKA on a learning problem. In the following, we'll consider a binary classification problem we call _labeling cosets with error_ [5]. In this problem, we will use a group and a subgroup to form two cosets, which will represent our data classes. We take the group $G = SU(2)^{\otimes n}$ for $n$ qubits, which is the special unitary group of $2\times2$ matrices and has wide applicability in nature, for example, the Standard Model of particle physics and in many condensed matter systems. We take the graph-stabilizer subgroup $S_{\mathrm{graph}} \in G$ with $S_{\mathrm{graph}} = \langle \{ X_i \otimes_{k:(k,i) \in \mathcal{E}} Z_k \}_{i \in \mathcal{V}} \rangle$ for a graph $(\mathcal{E},\mathcal{V})$ with edges $\mathcal{E}$ and vertices $\mathcal{V}$. Note that the stabilizers fix a stabilizer state such that $D_s \lvert \psi\rangle = \lvert \psi\rangle$. This observation will be useful a bit later.
To generate the dataset, we write the rotations of the group as $D(\theta_1, \theta_2, 0)=\exp(i \theta_1 X) \exp(i \theta_2 Z) \in SU(2)$, so that each qubit is parametrized by the first two Euler angles (the third we set to zero). Then, we draw randomly two sets of angles $\mathbf{\theta}_\pm \in [-\pi/4, \pi/4]^{2n}$ for the $n$-qubit problem. From these two sets, we construct a binary classification problem by forming two left-cosets (representing the two classes) with those angles, $C_\pm = D(\mathbf{\theta}_\pm) S_{\mathrm{graph}}$ where $D(\mathbf{\theta}_\pm) = \otimes_{k=1}^n D(\theta_\pm^{2k-1}, \theta_\pm^{2k}, 0)$. Note that the elements of the cosets can again be written in terms of Euler angles. We build training and testing sets by randomly drawing elements from $C_\pm$ such that the dataset has samples $i=1,...,m$ containing the first two Euler angles for each qubit $\mathbf{\theta}_{y_i} = (\theta_{y_i}^{1}, \theta_{y_i}^{2}, \theta_{y_i}^{3}, \theta_{y_i}^{4}, ..., \theta_{y_i}^{2n-1}, \theta_{y_i}^{2n})$ and labels $y_i \in \{-1,1\}$ that indicate to which coset a sample belongs.
Next, we select a fiducial state. A natural candidate is the stabilizer state we encountered above. Why? Because this is a subgroup invariant state, $D_s\lvert\psi\rangle = \lvert\psi\rangle$, which causes the data for a given coset to be mapped to a unique state: $D(\mathbf{\theta}_\pm)D_s \lvert\psi\rangle = D(\mathbf{\theta}_\pm) \lvert\psi\rangle$. This means the classifier only needs to distinguish the _two_ states $D(\mathbf{\theta}_\pm) \lvert\psi\rangle \langle \psi\rvert D^\dagger(\mathbf{\theta}_\pm)$ for every element of the coset. In this tutorial, we will add a small Gaussian error with variance $0.01$ to the Euler angles of the dataset. This noise will perturb these two states, but if the variance is sufficiently small, we expect the states will still be classified correctly. Let's consider a parametrized version of the stabilizer state, associated with the coupling graph $(\mathcal{E},\mathcal{V})$ given by the device connectivity, as our fiducial state and then use kernel alignment to find its optimal parameters. Specifically, we'll replace the initial layers of Hadamards in the graph state with $y$-rotations by an angle $\lambda$,
$$
\lvert \psi_\lambda\rangle = V_\lambda \lvert 0^n\rangle = \prod_{(k,t) \in \mathcal{E}} CZ_{k,t} \prod_{k \in \mathcal{V}} \exp\left(i \frac{\lambda}{2} Y_k\right)\lvert 0^n\rangle,
$$
where $CZ=\mathrm{diag}(1,1,1,-1)$. Then, given two samples from our dataset, $\mathbf{\theta}$ and $\mathbf{\theta}'$, the kernel matrix is evaluated as in Eq. $(2)$. If we initialize the kernel with $\lambda \approx 0$, we expect the quantum kernel alignment algorithm to converge towards the optimal $\lambda = \pi/2$ and the classifier to yield 100\% test accuracy.
Let's define two specific problem instances to test these ideas out. We'll be using the quantum device `ibmq_montreal`, with coupling map shown below:
<br>
<img src="images/chip.png" width="500">
<br>
We'll pick two different subgraphs, one for 7 qubits and one for 10, to define our problem instances. Using these subgraphs, we'll generate the corresponding datasets as described above, and then align the quantum kernel with QKA to learn a good fiducial state.
<br>
<img src="images/subgraphs.png" width="550">
<br>
**Speeding up Algorithms with Qiskit Runtime**<br>
QKA is an iterative quantum-classical algorithm, in which quantum hardware is used to execute parametrized quantum circuits for evaluating the quantum kernel matrices with QKE, while a classical optimizer tunes the parameters of those circuits to maximize the alignment. Iterative algorithms of this type can be slow due to latency between the quantum and classical calculations. Qiskit Runtime is a new architecture that can speed up iterative algorithms like QKA by co-locating classical computations with the quantum hardware executions. In this tutorial, we'll use QKA with Qiskit Runtime to learn a good quantum kernel for the _labeling cosets with error_ problem defined above.
<br>
**References**<br>
[1] B. E. Boser, I. M. Guyon, and V. N. Vapnik, Proceedings of the Fifth Annual Workshop on Computational Learning Theory, COLT ’92 (Association for Computing Machinery, New York, NY, USA, 1992) pp. 144-152 [link](https://doi.org/10.1145/130385.130401) <br>
[2] V. Vapnik, The Nature of Statistical Learning Theory, Information Science and Statistics (Springer New York, 2013) [link](https://books.google.com/books?id=EqgACAAAQBAJ) <br>
[3] V. Havlíček, A. D. Córcoles, K. Temme, A. W. Harrow, A. Kandala, J. M. Chow, and J. M. Gambetta, Nature 567, 209-212 (2019) [link](https://doi.org/10.1038/s41586-019-0980-2) <br>
[4] Y. Liu, S. Arunachalam, and K. Temme, arXiv:2010.02174 (2020) [link](https://arxiv.org/abs/2010.02174) <br>
[5] J. R. Glick, T. P. Gujarati, A. D. Córcoles, Y. Kim, A. Kandala, J. M. Gambetta, K. Temme, arXiv:2105.03406 (2021) [link](https://arxiv.org/abs/2105.03406)<br>
[6] N. Cristianini, J. Shawe-taylor, A. Elisseeff, and J. Kandola, Advances in Neural Information Processing Systems 14 (2001) [link](https://proceedings.neurips.cc/paper/2001/file/1f71e393b3809197ed66df836fe833e5-Paper.pdf) <br>
# Load your IBM Quantum account and get the quantum backend
We'll be using the 27-qubit device `ibmq_montreal` for this tutorial.
```
import sys
sys.path.insert(0, '..') # Add qiskit_runtime directory to the path
from qiskit import IBMQ
IBMQ.load_account()
provider = IBMQ.get_provider(project='qiskit-runtime') # Change this to your provider.
backend = provider.get_backend('ibmq_montreal')
```
# Invoke the Quantum Kernel Alignment program
Before executing the runtime program for QKA, we need to prepare the dataset and configure the input parameters for the algorithm.
### 1. Prepare the dataset
First, we load the dataset from the `csv` file and then extract the labeled training and test samples. Here, we'll look at the 7-qubit problem, shown above in subfigure a). A second dataset is also available for the 10-qubit problem in b).
```
import pandas as pd
df = pd.read_csv('../qiskit_runtime/qka/aux_file/dataset_graph7.csv',sep=',', header=None) # alterative problem: dataset_graph10.csv
data = df.values
```
Let's take a look at the data to see how it's formatted. Each row of the dataset contains a list of Euler angles, followed by the class label $\pm1$ in the last column. For an $n$-qubit problem, there are $2n$ features corresponding to the first two Euler angles for each qubit (recall discussion above). The rows alternate between class labels.
```
print(df.head(4))
```
Now, let's explicitly construct the training and test samples (denoted `x`) and their labels (denoted `y`).
```
import numpy as np
# choose number of training and test samples per class:
num_train = 10
num_test = 10
# extract training and test sets and sort them by class label
train = data[:2*num_train, :]
test = data[2*num_train:2*(num_train+num_test), :]
ind=np.argsort(train[:,-1])
x_train = train[ind][:,:-1]
y_train = train[ind][:,-1]
ind=np.argsort(test[:,-1])
x_test = test[ind][:,:-1]
y_test = test[ind][:,-1]
```
### 2. Configure the QKA algorithm
The first task is to set up the feature map and its entangler map, which specifies the arrangement of $CZ$ gates in the fiducial state. We will choose this to match the connectivity of the problem subgraph, pictured above. We also initialize the fiducial state parameter $\lambda$ with `initial_point`.
```
from qiskit_runtime.qka import FeatureMap
d = np.shape(data)[1]-1 # feature dimension is twice the qubit number
em = [[0,2],[3,4],[2,5],[1,4],[2,3],[4,6]] # we'll match this to the 7-qubit graph
# em = [[0,1],[2,3],[4,5],[6,7],[8,9],[1,2],[3,4],[5,6],[7,8]] # we'll match this to the 10-qubit graph
fm = FeatureMap(feature_dimension=d, entangler_map=em) # define the feature map
initial_point = [0.1] # set the initial parameter for the feature map
```
Let's print out the circuit for the feature map (the circuit for the kernel will be a feature map for one data sample composed with an inverse feature map for a second sample). The first part of the feature map is the fiducial state, which is prepared with a layer of $y$ rotations followed by $CZ$s. Then, the last two layers of $z$ and $x$ rotations in the circuit denote the group representation $D(\theta)$ for a data sample $\theta$. Note that a single-qubit rotation is defined as $RP(\phi) = \exp(- i [\phi/2] P)$ for $P \in {X, Y, Z}$.
```
from qiskit.tools.visualization import circuit_drawer
circuit_drawer(fm.construct_circuit(x=x_train[0], parameters=initial_point),
output='text', fold=200)
```
Next, we set the values for the SVM soft-margin penalty `C` and the number of SPSA iterations `maxiters` we use to align the quantum kernel.
```
C = 1 # SVM soft-margin penalty
maxiters = 10 # number of SPSA iterations
```
Finally, we decide how to map the virtual qubits of our problem graph to the physical qubits of the hardware. For example, in the 7-qubit problem, we can directly map the virtual qubits `[0, 1, 2, 3, 4, 5, 6]` to the physical qubits `[10, 11, 12, 13, 14, 15, 16]` of the device. This allows us to avoid introducing SWAP gates for qubits that are not connected, which can increase the circuit depth.
```
initial_layout = [10, 11, 12, 13, 14, 15, 16] # see figure above for the 7-qubit graph
# initial_layout = [9, 8, 11, 14, 16, 19, 22, 25, 24, 23] # see figure above for the 10-qubit graph
```
### 3. Set up and run the program
We're almost ready to run the program. First, let's take a look at the program metadata, which includes a description of the input parameters and their default values.
```
print(provider.runtime.program('quantum-kernel-alignment'))
```
We see that this program has several input parameters, which we'll configure below. To run the program, we'll set up its two main components: `inputs` (the input parameters from the program metadata) and `options` (the quantum backend). We'll also define a callback function so that the intermediate results of the algorithm will be printed as the program runs. Note that each step of the algorithm for the settings we've selected here takes approximately 11 minutes.
```
def interim_result_callback(job_id, interim_result):
print(f"interim result: {interim_result}\n")
program_inputs = {
'feature_map': fm,
'data': x_train,
'labels': y_train,
'initial_kernel_parameters': initial_point,
'maxiters': maxiters,
'C': C,
'initial_layout': initial_layout
}
options = {'backend_name': backend.name()}
job = provider.runtime.run(program_id="quantum-kernel-alignment",
options=options,
inputs=program_inputs,
callback=interim_result_callback,
)
print(job.job_id())
result = job.result()
```
### 4. Retrieve the results of the program
Now that we've run the program, we can retrieve the output, which is the aligned kernel parameter and the aligned kernel matrix. Let's also plot this kernel matrix (we'll subtract off the diagonal to show the contrast between the remaining entries). The kernel matrix is expected to have a block-diagonal structure. This reflects the fact that the kernel maps the input data effectively to just two states (modulo the small noise we added to the data; recall the discussion above). That is, data in the same coset (same class label) have a larger overlap than do data from different cosets.
```
print(f"aligned_kernel_parameters: {result['aligned_kernel_parameters']}")
from matplotlib import pyplot as plt
from pylab import cm
plt.rcParams['font.size'] = 20
plt.imshow(result['aligned_kernel_matrix']-np.identity(2*num_train), cmap=cm.get_cmap('bwr', 20))
plt.show()
```
# Use the results of the program to test an SVM on new data
Equipped with the aligned kernel and its optimized parameter, we can use the `sklearn` package to train an SVM and then evaluate its classification accuracy on new test points. Note that a second kernel matrix built from the test points is needed for the SVM decision function.
```
from qiskit_runtime.qka import KernelMatrix
from sklearn.svm import SVC
from sklearn import metrics
# train the SVM with the aligned kernel matrix:
kernel_aligned = result['aligned_kernel_matrix']
model = SVC(C=C, kernel='precomputed')
model.fit(X=kernel_aligned, y=y_train)
# test the SVM on new data:
km = KernelMatrix(feature_map=fm, backend=backend, initial_layout=initial_layout)
kernel_test = km.construct_kernel_matrix(x1_vec=x_test, x2_vec=x_train, parameters=result['aligned_kernel_parameters'])
labels_test = model.predict(X=kernel_test)
accuracy_test = metrics.balanced_accuracy_score(y_true=y_test, y_pred=labels_test)
print(f"accuracy test: {accuracy_test}")
import qiskit.tools.jupyter
%qiskit_version_table
%qiskit_copyright
```
| true |
code
| 0.386069 | null | null | null | null |
|
# Evaluation metrics for classification models
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
pd.options.mode.chained_assignment = None
%matplotlib inline
```
### Back with the credit card default dataset
```
# Loading the dataset
DATA_DIR = '../data'
FILE_NAME = 'credit_card_default.csv'
data_path = os.path.join(DATA_DIR, FILE_NAME)
ccd = pd.read_csv(data_path, index_col="ID")
ccd.rename(columns=lambda x: x.lower(), inplace=True)
ccd.rename(columns={'default payment next month':'default'}, inplace=True)
# getting the groups of features
bill_amt_features = ['bill_amt'+ str(i) for i in range(1,7)]
pay_amt_features = ['pay_amt'+ str(i) for i in range(1,7)]
numerical_features = ['limit_bal','age'] + bill_amt_features + pay_amt_features
# Creating creating binary features
ccd['male'] = (ccd['sex'] == 1).astype('int')
ccd['grad_school'] = (ccd['education'] == 1).astype('int')
ccd['university'] = (ccd['education'] == 2).astype('int')
ccd['married'] = (ccd['marriage'] == 1).astype('int')
# simplifying pay features
pay_features= ['pay_' + str(i) for i in range(1,7)]
for x in pay_features:
ccd.loc[ccd[x] <= 0, x] = 0
# simplifying delayed features
delayed_features = ['delayed_' + str(i) for i in range(1,7)]
for pay, delayed in zip(pay_features, delayed_features):
ccd[delayed] = (ccd[pay] > 0).astype(int)
# creating a new feature: months delayed
ccd['months_delayed'] = ccd[delayed_features].sum(axis=1)
```
## Splitting and standarizing the dataset
```
numerical_features = numerical_features + ['months_delayed']
binary_features = ['male','married','grad_school','university']
X = ccd[numerical_features + binary_features]
y = ccd['default'].astype(int)
## Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=5/30, random_state=25)
## Standarize
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train[numerical_features])
X_train.loc[:, numerical_features] = scaler.transform(X_train[numerical_features])
# Standarize also the testing set
X_test.loc[:, numerical_features] = scaler.transform(X_test[numerical_features])
```
## Performance metrics
```
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=25,
max_features=6,
max_depth=4,
random_state=61)
rf.fit(X_train, y_train)
from sklearn.metrics import confusion_matrix
def CM(y_true, y_pred):
M = confusion_matrix(y_true, y_pred)
out = pd.DataFrame(M, index=["Obs Paid", "Obs Default"], columns=["Pred Paid", "Pred Default"])
return out
threshold = 0.5
y_pred_prob = rf.predict_proba(X_test)[:,1]
y_pred = (y_pred_prob > threshold).astype(int)
CM(y_test, y_pred)
from sklearn.metrics import precision_score, recall_score, accuracy_score
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
accuracy = accuracy_score(y_test, y_pred)
print("Precision: {:0.1f}%, Recall: {:.1f}%, Accuracy: {:0.1f}%".format(100*precision, 100*recall, 100*accuracy))
```
## Visualization methods for evaluating classification models
### Visualizing probabilities
```
plt.hist(y_pred_prob, bins=25, ec='k');
fig, ax = plt.subplots(figsize=(8,5))
sns.kdeplot(y_pred_prob[y_test==1], shade=True, color='red', label="Defaults", ax=ax)
sns.kdeplot(y_pred_prob[y_test==0], shade=True, color='green', label="Paid", ax=ax)
ax.set_title("Distribution of predicted probabilies", fontsize=16)
ax.legend()
plt.grid();
```
### ROC and precision-recall curves
```
threshold = 0.4
y_pred_prob = rf.predict_proba(X_test)[:,1]
y_pred = (y_pred_prob > threshold).astype(int)
CM(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
accuracy = accuracy_score(y_test, y_pred)
print("Precision: {:0.1f}%, Recall: {:.1f}%, Accuracy: {:0.1f}%".format(100*precision, 100*recall, 100*accuracy))
from sklearn.metrics import precision_recall_curve
precs, recs, ths = precision_recall_curve(y_test, y_pred_prob)
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(ths, precs[1:], label='Precision')
ax.plot(ths, recs[1:], label='Recall')
ax.set_title('Precision and recall for different thresholds', fontsize=16)
ax.set_xlabel('Theshold', fontsize=14)
ax.set_ylabel('Precision, Recall', fontsize=14)
ax.set_xlim(0.1,0.7)
ax.legend(); ax.grid();
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(precs, recs)
ax.set_title('Precision-recall curve', fontsize=16)
ax.set_xlabel('Precision', fontsize=14)
ax.set_ylabel('Recall', fontsize=14)
ax.set_xlim(0.3,0.7)
ax.grid();
from sklearn.metrics import roc_curve
fpr, tpr, ths = roc_curve(y_test, y_pred_prob)
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(fpr, tpr)
ax.set_title('ROC curve', fontsize=16)
ax.set_xlabel('False positive rate', fontsize=14)
ax.set_ylabel('Recall, true negative rate', fontsize=14)
ax.grid();
```
### Defining a custom metric for classification
```
def class_cost(y_true, y_pred, cost_fn=1, cost_fp=1):
M = confusion_matrix(y_true, y_pred)
N = len(y_true)
FN = M[1,0]
FP = M[0,1]
return (cost_fn*FN + cost_fp*FP)/N
class_cost(y_test, y_pred)
thresholds = np.arange(0.05, 0.95, 0.01)
costs = []
for th in thresholds:
y_pred = (y_pred_prob > th).astype(int)
costs.append(class_cost(y_test, y_pred, cost_fn=3, cost_fp=1))
costs = np.array(costs)
fig, ax = plt.subplots(figsize=(8,5))
ax.plot(thresholds, costs)
ax.set_title('Cost vs threshold', fontsize=16)
ax.set_xlabel('Threshold', fontsize=14)
ax.set_ylabel('Cost', fontsize=14)
ax.grid();
min_cost_th = thresholds[costs.argmin()]
min_cost_th
y_pred = (y_pred_prob > min_cost_th).astype(int)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
print("Precision: {:0.1f}%, Recall: {:.1f}%".format(100*precision, 100*recall))
CM(y_test, y_pred)
```
| true |
code
| 0.635053 | null | null | null | null |
|
# Building ERDDAP Datasets
This notebook documents the process of creating XML fragments
for nowcast system run results files
for inclusion in `/results/erddap-datasets/datasets.xml`
which is symlinked to `/opt/tomcat/content/erddap/datasets.xml`
on the `skookum` ERDDAP server instance.
The contents are a combination of:
* instructions for using the
`GenerateDatasetsXml.sh` and `DasDds.sh` tools found in the
`/opt/tomcat/webapps/erddap/WEB-INF/` directory
* instructions for forcing the server to update the datasets collection
via the `/results/erddap/flags/` directory
* code and metadata to transform the output of `GenerateDatasetsXml.sh`
into XML fragments that are ready for inclusion in `/results/erddap-datasets/datasets.xml`
This is a snapshot of the `erddap-datasets/ERDDAP_datasets.ipynb` notebook that is used
to maintain the `datasets.xml` file.
Please see https://bitbucket.org/salishsea/erddap-datasets for the active,
version controlled version of this notebook,
and the production ``datasets.xml` file.
```
from collections import OrderedDict
from lxml import etree
```
**NOTE**
The next cell mounts the `/results` filesystem on `skookum` locally.
It is intended for use if when this notebook is run on a laptop
or other non-Waterhole machine that has `sshfs` installed
and a mount point for `/results` available in its root filesystem.
Don't execute the cell if that doesn't describe your situation.
```
!sshfs skookum:/results /results
```
The `metadata` dictionary below contains information for dataset
attribute tags whose values need to be changed,
or that need to be added for all datasets.
The keys are the dataset attribute names.
The values are dicts containing a required `text` item
and perhaps an optional `after` item.
The value associated with the `text` key is the text content
for the attribute tag.
When present,
the value associated with the `after` key is the name
of the dataset attribute after which a new attribute tag
containing the `text` value is to be inserted.
```
metadata = OrderedDict([
('coverage_content_type', {
'text': 'modelResult',
'after': 'cdm_data_type',
}),
('infoUrl', {
'text':
'https://salishsea-meopar-docs.readthedocs.io/en/latest/results_server/index.html#salish-sea-model-results',
}),
('institution', {'text': 'UBC EOAS'}),
('institution_fullname', {
'text': 'Earth, Ocean & Atmospheric Sciences, University of British Columbia',
'after': 'institution',
}),
('license', {
'text': '''The Salish Sea MEOPAR NEMO model results are copyright 2013-2021
by the Salish Sea MEOPAR Project Contributors and The University of British Columbia.
They are licensed under the Apache License, Version 2.0. http://www.apache.org/licenses/LICENSE-2.0''',
}),
('project', {
'text':'Salish Sea MEOPAR NEMO Model',
'after': 'title',
}),
('creator_name', {
'text': 'Salish Sea MEOPAR Project Contributors',
'after': 'project',
}),
('creator_email', {
'text': '[email protected]',
'after': 'creator_name',
}),
('creator_url', {
'text': 'https://salishsea-meopar-docs.readthedocs.io/',
'after': 'creator_email',
}),
('acknowledgement', {
'text': 'MEOPAR, ONC, Compute Canada',
'after': 'creator_url',
}),
('drawLandMask', {
'text': 'over',
'after': 'acknowledgement',
}),
])
```
The `datasets` dictionary below provides the content
for the dataset `title` and `summary` attributes.
The `title` attribute content appears in the the datasets list table
(among other places).
It should be `<`80 characters long,
and note that only the 1st 40 characters will appear in the table.
The `summary` attribute content appears
(among other places)
when a user hovers the cursor over the `?` icon beside the `title`
content in the datasets list table.
The text that is inserted into the `summary` attribute tag
by code later in this notebook is the
`title` content followed by the `summary` content,
separated by a blank line.
The keys of the `datasets` dict are the `datasetID` strings that
are used in many places by the ERDDAP server.
They are structured as follows:
* `ubc` to indicate that the dataset was produced at UBC
* `SS` to indicate that the dataset is a product of the Salish Sea NEMO model
* a few letters to indicate the model runs that produce the dataset:
* `n` to indicate that the dataset is from a nowcast run,
* `f` for forecast,
* `f2` for forecast2 (aka preliminary forecast),
* `hg` for hindcast-green
* `ng` for nowcast-green,
* `a` for atmospheric forcing,
* a description of the dataset variables; e.g. `PointAtkinsonSSH` or `3DuVelocity`
* the time interval of values in the dataset; e.g. `15m`, `1h`, `1d`
* the dataset version; e.g. `V16-10`, or `V1`
Versioning was changed to a [CalVer](http://calver.org/) type scheme in Oct-2016.
Thereafter versions are of the form `Vyymm` and indicate the year and month when the dataset entered production.
So:
* `ubcSSnPointAtkinsonSSH15mV1` is the version 1 dataset of 15 minute averaged sea surface height values at Point Atkinson from `PointAtkinson.nc` output files
* `ubcSSn3DwVelocity1hV2` is the version 2 dataset of 1 hr averaged vertical (w) velocity values over the entire domain from `SalishSea_1h_*_grid_W.nc` output files
* `ubcSSnSurfaceTracers1dV1` is the version 1 dataset of daily averaged surface tracer values over the entire domain from `SalishSea_1d_*_grid_T.nc` output files
* `ubcSSnBathymetry2V16-07` is the version 16-07 dataset of longitude, latitude, and bathymetry of the Salish Sea NEMO model grid that came into use in Jul-2016.
The corresponding NEMO-generated mesh mask variables are in the `ubcSSn2DMeshMaskDbo2V16-07` (y, x variables),
and the `ubcSSn3DMeshMaskDbo2V16-07` (z, y, x variables) datasets.
The dataset version part of the `datasetID` is used to indicate changes in the variables
contained in the dataset.
For example,
the transition from the `ubcSSn3DwVelocity1hV1` to the `ubcSSn3DwVelocity1hV2` dataset
occurred on 24-Jan-2016 when we started to output vertical eddy viscosity and diffusivity
values at the `w` grid points.
All dataset ids end with their version identifier and their `summary` ends with a notation about the variables
that they contain; e.g.
```
v1: wVelocity variable
```
When the a dataset version is incremented a line describing the change is added
to the end of its `summary`; e.g.
```
v1: wVelocity variable
v2: Added eddy viscosity & diffusivity variables ve_eddy_visc & ve_eddy_diff
```
```
datasets = {
'ubcSSnBathymetry2V1' :{
'type': 'geolocation bathymetry',
'title': 'Salish Sea NEMO Model Grid, Geo-location and Bathymetry, v1',
'summary':'''Longitude, latitude, and bathymetry of the Salish Sea NEMO model grid.
The bathymetry values are those calculated by NEMO from the input bathymetry file.
NEMO modifies the input bathymetry to remove isolated holes, and too-small partial steps.
The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
v1: longitude, latitude and bathymetry variables
''',
'fileNameRegex': '.*SalishSea2_NEMO_bathy\.nc$'
},
'ubcSSnBathymetry2V16-07' :{
'type': 'geolocation bathymetry',
'title': 'Salish Sea NEMO Model Grid, Geo-location and Bathymetry, v16-07',
'summary':'''Longitude, latitude, and bathymetry of the Salish Sea NEMO model grid.
The bathymetry values are those calculated by NEMO from the input bathymetry file.
NEMO modifies the input bathymetry to remove isolated holes, and too-small partial steps.
The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
v1: longitude, latitude and bathymetry variables
v16-07: same variables,
bathymetry uniformly deepened by 1 grid level,
smoothed at Juan de Fuca & Johnstone Strait open boundaries,
Fraser River lengthened,
bathymetry deepened near mouth of Fraser River
''',
'fileNameRegex': '.*downbyone2_NEMO_bathy\.nc$'
},
'ubcSSn2DMeshMask2V1': {
'type': 'geolocation bathymetry',
'title': 'Salish Sea NEMO Model Grid, 2D Mesh Mask, v1',
'summary':'''NEMO grid variable value for the u-v plane of the
Salish Sea NEMO model Arakawa-C grid.
The values are those calculated by NEMO from the input coordinates and bathymetry files.
The variable names are those used by NEMO-3.4,
see the NEMO-3.4 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_4.pdf) for details,
or the long_name attributes of the variables for succinct descriptions of the variables.
The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
v1: e1t, e2t, e1u, e2u, e1v, e2v, e1f, e2f, glamt, gphit, glamu, gphiu, glamv, gphiv,
tmaskutil, umaskutil, vmaskutil, fmaskutil, ff, mbathy variables
''',
'fileNameRegex': '.*mesh_mask_SalishSea2\.nc$',
},
'ubcSSn2DMeshMask2V16-07': {
'type': 'geolocation bathymetry',
'title': 'Salish Sea NEMO Model Grid, 2D Mesh Mask, v16-07',
'summary':'''NEMO grid variable value for the u-v plane of the
Salish Sea NEMO model Arakawa-C grid.
The values are those calculated by NEMO from the input coordinates and bathymetry files.
The variable names are those used by NEMO-3.6,
see the NEMO-3.6 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_6.pdf) for details,
or the long_name attributes of the variables for succinct descriptions of the variables.
The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
v1: e1t, e2t, e1u, e2u, e1v, e2v, e1f, e2f, glamt, gphit, glamu, gphiu, glamv, gphiv,
tmaskutil, umaskutil, vmaskutil, fmaskutil, ff, mbathy variables
v16-07: e1t, e2t, e1u, e2u, e1v, e2v, e1f, e2f, glamt, gphit, glamu, gphiu, glamv, gphiv,
glamf, gphif, tmaskutil, umaskutil, vmaskutil, fmaskutil, ff, mbathy variables
''',
'fileNameRegex': '.*mesh_mask_downbyone2\.nc$',
},
'ubcSSn3DMeshMask2V1': {
'type': 'geolocation bathymetry',
'title': 'Salish Sea NEMO Model Grid, 3D Mesh Mask, v1',
'summary':'''NEMO grid variable value for the Salish Sea NEMO model Arakawa-C grid.
The values are those calculated by NEMO from the input coordinates and bathymetry files.
The variable names are those used by NEMO-3.4,
see the NEMO-3.4 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_4.pdf) for details,
or the long_name attributes of the variables for succinct descriptions of the variables.
The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
v1: e3t, e3u, e3v, e3w, gdept, gdepu, gdepv, gdepw, tmask, umask, vmask, fmask variables
''',
'fileNameRegex': '.*mesh_mask_SalishSea2\.nc$'
},
'ubcSSn3DMeshMask2V16-07': {
'type': 'geolocation bathymetry',
'title': 'Salish Sea NEMO Model Grid, 3D Mesh Mask, v16-07',
'summary':'''NEMO grid variable value for the Salish Sea NEMO model Arakawa-C grid.
The values are those calculated by NEMO from the input coordinates and bathymetry files.
The variable names are those used by NEMO-3.6,
see the NEMO-3.6 book (http://www.nemo-ocean.eu/Media/Files/NEMO_book_V3_6.pdf) for details,
or the long_name attributes of the variables for succinct descriptions of the variables.
The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
v1: e3t_0, e3u_0, e3v_0, e3w_0, gdept_0, gdepu, gdepv, gdepw_0, tmask, umask, vmask, fmask variables
v16-07: e3t, e3u, e3v, e3w, gdept, gdepu, gdepv, gdepw, tmask, umask, vmask, fmask variables
''',
'fileNameRegex': '.*mesh_mask_downbyone2\.nc$'
},
'ubcSSnPointAtkinsonSSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Point Atkinson, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minute intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Point Atkinson tide gauge station on the north side of English Bay,
near Vancouver, British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*PointAtkinson\.nc$',
},
'ubcSSnCampbellRiverSSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Campbell River, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minutes intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Campbell River tide gauge station at the north end of the Strait of Georgia,
near Campbell River, British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*CampbellRiver\.nc$',
},
'ubcSSnCherryPointSSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Cherry Point, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minutes intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Cherry Point tide gauge station in the southern Strait of Georgia,
near Birch Bay, Washington.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*CherryPoint\.nc$',
},
'ubcSSnFridayHarborSSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Friday Harbor, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minutes intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Friday Harbor tide gauge station at San Juan Island in Haro Strait,
near Friday Harbor, Washington.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*FridayHarbor\.nc$',
},
'ubcSSnNanaimoSSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Nanaimo, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minutes intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Nanaimo tide gauge station on the west side of the central Strait of Georgia,
near Nanaimo, British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*Nanaimo\.nc$',
},
'ubcSSnNeahBaySSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Neah Bay, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minutes intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Neah Bay tide gauge station on the south side of the west end of the Juan de Fuca Strait,
near Neah Bay, Washington.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*NeahBay\.nc$',
},
'ubcSSnVictoriaSSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Victoria, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minutes intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Victoria tide gauge station on the north side of the east end of the Juan de Fuca Strait,
in the Victoria Inner Harbour, near Victoria, British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*Victoria\.nc$',
},
'ubcSSnSandHeadsSSH15mV1': {
'type': 'tide gauge',
'title': 'Nowcast, Sand Heads, Sea Surface Height, 15min, v1',
'summary': '''Sea surface height values averaged over 15 minutes intervals from
Salish Sea NEMO model nowcast runs. The values are calculated at the model grid point
closest to the Sand Heads light station on the east side of the central Strait of Georgia,
near Steveston, British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: ssh variable
''',
'fileNameRegex': '.*Sandheads\.nc$',
},
'ubcSSn3DTracerFields1hV1': {
'type': '3d fields',
'title': 'Nowcast, Salish Sea, 3d Tracer Fields, Hourly, v1',
'summary': '''3d salinity and water temperature field values averaged over 1 hour intervals
from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid
that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: salinity (practical) and temperature variables
''',
'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_T\.nc$',
},
'ubcSSnSurfaceTracerFields1hV1': {
'type': 'surface fields',
'title': 'Nowcast, Salish Sea, Surface Tracer Fields, Hourly, v1',
'summary': '''2d sea surface height and rainfall rate field values averaged over 1 hour intervals
from Salish Sea NEMO model nowcast runs. The values are calculated for the surface of the model grid
that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: sea surface height and rainfall rate variables
''',
'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_T\.nc$',
},
'ubcSSn3DuVelocity1hV1': {
'type': '3d fields',
'title': 'Nowcast, Salish Sea, 3d u Velocity Field, Hourly, v1',
'summary': '''3d zonal (u) component velocity field values averaged over 1 hour intervals
from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid
that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: uVelocity variable
''',
'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_U\.nc$',
},
'ubcSSn3DvVelocity1hV1': {
'type': '3d fields',
'title': 'Nowcast, Salish Sea, 3d v Velocity Field, Hourly, v1',
'summary': '''3d meridional (v) component velocity field values averaged over 1 hour intervals
from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid
that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: vVelocity variable
''',
'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_V\.nc$',
},
'ubcSSn3DwVelocity1hV1': {
'type': '3d fields',
'title': 'Nowcast, Salish Sea, 3d w Velocity Field, Hourly, v1',
'summary': '''3d vertical (w) component velocity field values averaged over 1 hour intervals
from Salish Sea NEMO model nowcast runs. The values are calculated for the entire model grid
that includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
Geo-location and depth data for the Salish Sea NEMO model grid are available in the ubcSSnBathymetry2V1 dataset.
v1: wVelocity variable
''',
'fileNameRegex': '.*SalishSea_1h_\d{8}_\d{8}_grid_W\.nc$',
},
'ubcSSaSurfaceAtmosphereFieldsV1': {
'type': 'surface fields',
'title': 'HRDPS, Salish Sea, Atmospheric Forcing Fields, Hourly, v1',
'summary': '''2d hourly atmospheric field values from the
Environment Canada HRDPS atmospheric forcing model that are used to force the Salish Sea NEMO model.
The model grid includes the Juan de Fuca Strait, the Strait of Georgia, Puget Sound,
and Johnstone Strait on the coasts of Washington State and British Columbia.
Geo-location data for the atmospheric forcing grid are available in the ubcSSaAtmosphereGridV1 dataset.
Atmospheric field values are interpolated on to the Salish Sea NEMO model grid (ubcSSnBathymetry2V1 dataset)
on-the-fly by NEMO.
v1: atmospheric pressure, precipitation rate, 2m specific humidity, 2m air temperature,
short-wave radiation flux, long-wave radiation flux, 10m u wind component, 10m v wind component variables
''',
'fileNameRegex': '.*ops_y\d{4}m\d{2}d\d{2}\.nc$',
},
}
datasets['ubcSSn3DwVelocity1hV2'] = datasets['ubcSSn3DwVelocity1hV1']
datasets['ubcSSn3DwVelocity1hV2'].update({
'title': datasets['ubcSSn3DwVelocity1hV1']['title'].replace(', v1', ', v2'),
'summary': datasets['ubcSSn3DwVelocity1hV1']['summary'] + '''
v2: Added eddy viscosity & diffusivity variables ve_eddy_visc & ve_eddy_diff''',
})
datasets['ubcSSn3DTracerFields1hV16-10'] = datasets['ubcSSn3DTracerFields1hV1']
datasets['ubcSSn3DTracerFields1hV16-10'].update({
'title': datasets['ubcSSn3DTracerFields1hV1']['title'].replace(', v1', ', v16-10'),
'summary': datasets['ubcSSn3DTracerFields1hV1']['summary'] + '''
v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details.
Changed salinity variable to reference salinity.
Made temperature variable explicitly potential temperature.
Added squared buoyancy frequency variable.''',
})
datasets['ubcSSnSurfaceTracerFields1hV16-10'] = datasets['ubcSSnSurfaceTracerFields1hV1']
datasets['ubcSSnSurfaceTracerFields1hV16-10'].update({
'title': datasets['ubcSSnSurfaceTracerFields1hV1']['title'].replace(', v1', ', v16-10'),
'summary': datasets['ubcSSnSurfaceTracerFields1hV1']['summary'] + '''
v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details.
Added mixed layer thickness defined by sigma theta variable.
Deleted rainfall rate variable.''',
})
datasets['ubcSSn3DuVelocity1hV16-10'] = datasets['ubcSSn3DuVelocity1hV1']
datasets['ubcSSn3DuVelocity1hV16-10'].update({
'title': datasets['ubcSSn3DuVelocity1hV1']['title'].replace(', v1', ', v16-10'),
'summary': datasets['ubcSSn3DuVelocity1hV1']['summary'] + '''
v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details.'''
})
datasets['ubcSSn3DvVelocity1hV16-10'] = datasets['ubcSSn3DvVelocity1hV1']
datasets['ubcSSn3DvVelocity1hV16-10'].update({
'title': datasets['ubcSSn3DvVelocity1hV1']['title'].replace(', v1', ', v16-10'),
'summary': datasets['ubcSSn3DvVelocity1hV1']['summary'] + '''
v16-10: NEMO-3.6; ubcSSnBathymetry2V16-07 bathymetry; see infoUrl link for full details.'''
})
```
The `dataset_vars` dictionary below is used to rename
variables from the often cryptic NEMO names to the names
that appear in the ERDDAP generated files and web content.
The keys are the NEMO variable names to replace.
The values are dicts that map the variable names to use in ERDDAP
to the `destinationName` attribute name.
```
dataset_vars = {
'sossheig': {'destinationName': 'ssh'},
'vosaline': {'destinationName': 'salinity'},
'votemper': {'destinationName': 'temperature'},
'vozocrtx': {'destinationName': 'uVelocity'},
'vomecrty': {'destinationName': 'vVelocity'},
'vovecrtz': {'destinationName': 'wVelocity'},
}
```
A few convenient functions to reduce code repetition:
```
def print_tree(root):
"""Display an XML tree fragment with indentation.
"""
print(etree.tostring(root, pretty_print=True).decode('ascii'))
def find_att(root, att):
"""Return the dataset attribute element named att
or raise a ValueError exception if it cannot be found.
"""
e = root.find('.//att[@name="{}"]'.format(att))
if e is None:
raise ValueError('{} attribute element not found'.format(att))
return e
def replace_yx_with_lonlat(root):
new_axes = {
'y': {'sourceName': 'nav_lon', 'destinationName': 'longitude'},
'x': {'sourceName': 'nav_lat', 'destinationName': 'latitude'},
}
for axis in root.findall('.//axisVariable'):
if axis.find('.//sourceName').text in new_axes:
key = axis.find('.//sourceName').text
new_axis = etree.Element('axisVariable')
etree.SubElement(new_axis, 'sourceName').text = new_axes[key]['sourceName']
etree.SubElement(new_axis, 'destinationName').text = new_axes[key]['destinationName']
axis.getparent().replace(axis, new_axis)
```
Now we're ready to produce a dataset!!!
Use the `/opt/tomcat/webapps/erddap/WEB-INF/GenerateDatasetsXml.sh` script
generate the initial version of an XML fragment for a dataset:
```
$ cd /opt/tomcat/webapps/erddap/WEB-INF/
$ bash GenerateDatasetsXml.sh EDDGridFromNcFiles /results/SalishSea/nowcast/
```
The `EDDGridFromNcFiles` and `/results/SalishSea/nowcast/` arguments
tell the script which `EDDType` and what parent directory to use,
avoiding having to type those in answer to prompts.
Answer the remaining prompts,
for example:
```
File name regex (e.g., ".*\.nc") (default="")
? .*SalishSea_1h_\d{8}_\d{8}_grid_W\.nc$
Full file name of one file (default="")
? /results/SalishSea/nowcast/28jan16/SalishSea_1h_20160128_20160128_grid_W.nc
ReloadEveryNMinutes (e.g., 10080) (default="")
? 10080
```
Other examples of file name regex are:
* `.*PointAtkinson.nc$`
* `.*SalishSea_1d_\d{8}_\d{8}_grid_W\.nc$`
The output is written to `/results/erddap/logs/GenerateDatasetsXml.out`
Now, we:
* set the `datasetID` we want to use
* parse the output of `GenerateDatasetsXml.sh` into an XML tree data structure
* set the `datasetID` dataset attribute value
* re-set the `fileNameRegex` dataset attribute value because it looses its `\` characters during parsing(?)
* edit and add dataset attributes from the `metadata` dict
* set the `title` and `summary` dataset attributes from the `datasets` dict
* set the names of the grid `x` and `y` axis variables
* rename data variables as specified in the `dataset_vars` dict
```
def update_xml(root, datasetID, metadata, datasets, dataset_vars):
root.attrib['datasetID'] = datasetID
root.find('.//fileNameRegex').text = datasets[datasetID]['fileNameRegex']
title = datasets[datasetID]['title']
summary = find_att(root, 'summary')
summary.text = '{0}\n\n{1}'.format(title, datasets[datasetID]['summary'])
e = etree.Element('att', name='title')
e.text = title
summary.addnext(e)
for att, info in metadata.items():
e = etree.Element('att', name=att)
e.text = info['text']
try:
root.find('.//att[@name="{}"]'.format(info['after'])).addnext(e)
except KeyError:
find_att(root, att).text = info['text']
for axis_name in root.findall('.//axisVariable/destinationName'):
if axis_name.text in ('x', 'y'):
axis_name.text = 'grid{}'.format(axis_name.text.upper())
if datasets[datasetID]['type'] == 'tide gauge':
replace_yx_with_lonlat(root)
for var_name in root.findall('.//dataVariable/destinationName'):
if var_name.text in dataset_vars:
var_name.text = dataset_vars[var_name.text]['destinationName']
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse('/results/erddap/logs/GenerateDatasetsXml.out', parser)
root = tree.getroot()
datasetID = 'ubcSSn3DvVelocity1hV16-10'
update_xml(root, datasetID, metadata, datasets, dataset_vars)
```
Inspect the resulting dataset XML fragment below and edit the dicts and
code cell above until it is what is required for the dataset:
```
print_tree(root)
```
Extra processing step are required for some types of datasets.
See:
* [Surface Field Datasets](#Surface-Field-Datasets)
* [Model Grid Geo-location and Bathymetry Datasets](#Model-Grid-Geo-location-and-Bathymetry-Datasets)
* [EC HDRPS Atmospheric Forcing Datasets](#EC-HDRPS-Atmospheric-Forcing-Datasets)
Store the XML fragment for the dataset:
```
with open('/results/erddap-datasets/fragments/{}.xml'.format(datasetID), 'wb') as f:
f.write(etree.tostring(root, pretty_print=True))
```
Edit `/results/erddap-datasets/datasets.xml` to include the
XML fragment for the dataset that was stored by the above cell.
That file is symlinked to `/opt/tomcat/content/erddap/datasets.xml`.
Create a flag file to signal the ERDDAP server process to load the dataset:
```
$ cd /results/erddap/flag/
$ touch <datasetID>
```
If the dataset does not appear on https://salishsea.eos.ubc.ca/erddap/info/,
check `/results/erddap/logs/log.txt` for error messages from the dataset load process
(they may not be at the end of the file because ERDDAP is pretty chatty).
Once the dataset has been successfully loaded and you are happy with the metadata
that ERDDAP is providing for it,
commit the changes in `/results/erddap-datasets/` and push them to GitHub.
## Surface Field Datasets
The `/opt/tomcat/webapps/erddap/WEB-INF/GenerateDatasetsXml.sh` script produces and XML
fragment that uses all of the dimensions that it finds in the sample file it parses,
and includes only the variables that have all of those dimensions.
To produce an XML fragment for surface fields we need to do some additional work:
* Delete the depth axis
* Delete all of the `dataVariable` elements
* Add `dataVariable` elements for the surface variables
```
for axis in root.findall('.//axisVariable'):
if axis.find('.//destinationName').text == 'depth':
axis.getparent().remove(axis)
break
for var in root.findall('.//dataVariable'):
var.getparent().remove(var)
var = etree.SubElement(root, 'dataVariable')
etree.SubElement(var, 'sourceName').text = 'sossheig'
etree.SubElement(var, 'destinationName').text = 'ssh'
etree.SubElement(var, 'dataType').text = 'float'
attrs = etree.SubElement(var, 'addAttributes')
etree.SubElement(attrs, 'att', name='_ChunkSize').text = 'null'
etree.SubElement(attrs, 'att', name='coordinates').text = 'null'
var = etree.SubElement(root, 'dataVariable')
etree.SubElement(var, 'sourceName').text = 'rain_rate'
etree.SubElement(var, 'destinationName').text = 'rain_rate'
etree.SubElement(var, 'dataType').text = 'float'
attrs = etree.SubElement(var, 'addAttributes')
etree.SubElement(attrs, 'att', name='_ChunkSize').text = 'null'
etree.SubElement(attrs, 'att', name='coordinates').text = 'null'
find_att(root, 'keywords').text = (
'model results, height, local, sea, sea surface height, sossheig, source, surface, time_counter')
print_tree(root)
with open('/results/erddap-datasets/fragments/{}.xml'.format(datasetID), 'wb') as f:
f.write(etree.tostring(root, pretty_print=True))
```
## Model Grid Geo-location and Bathymetry Datasets
Model grid geo-location and bathymetry datasets require a lot of hand editing
because they are not model generated.
Here is an example of a finished one:
```
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse('/results/erddap-datasets/fragments/ubcSSnBathymetry2V1.xml', parser)
root = tree.getroot()
print_tree(root)
```
## EC HDRPS Atmospheric Forcing Datasets
### Atmospheric Forcing Grid Geo-location Dataset
Use the `/opt/tomcat/webapps/erddap/WEB-INF/GenerateDatasetsXml.sh` script
generate the initial version of an XML fragment for the dataset:
```
$ cd /opt/tomcat/webapps/erddap/WEB-INF/
$ bash GenerateDatasetsXml.sh EDDGridFromNcFiles /results/forcing/atmospheric/GEM2.5/operational/ ops_y\d{4}m\d{2}d\d{2}.nc$ /results/forcing/atmospheric/GEM2.5/operational/ops_y2016m03d07.nc 10080
```
Like the model grid geo-location and bathymetry dataset,
the atmospheric forcing grid dataset requires a lot of hand editing.
Here is the finished dataset:
```
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse('/results/erddap-datasets/fragments/ubcSSaAtmosphereGridV1.xml', parser)
root = tree.getroot()
print_tree(root)
```
### Atmospheric Forcing Model Fields
* Change the value of the `recursive` element to `false` so that the `/results/forcing/atmospheric/GEM2.5/operational/fcst/` directory is excluded
* Add Environment Canada acknowledgement and terms & conditions of use to `license` element
* Add Environment Canada to `acknowledgement` element
```
root.find('.//recursive').text = 'false'
find_att(root, 'license').text += '''
This dataset is derived from a product of the Environment Canada HRDPS (High Resolution Deterministic Prediction System)
model. The Terms and conditions of use of Meteorological Data from Environment Canada are available at
http://dd.weather.gc.ca/doc/LICENCE_GENERAL.txt.</att>'''
find_att(root, 'acknowledgement').text += ', Environment Canada'
for axis in root.findall('.//axisVariable'):
axis_name = axis.find('.//sourceName').text
if 'time' not in axis_name:
attrs = axis.find('.//addAttributes')
etree.SubElement(attrs, 'att', name='grid_spacing').text = 'null'
etree.SubElement(attrs, 'att', name='units').text = 'null'
etree.SubElement(attrs, 'att', name='long_name').text = axis_name.upper()
etree.SubElement(attrs, 'att', name='standard_name').text = axis_name
print_tree(root)
with open('/results/erddap-datasets/fragments/{}.xml'.format(datasetID), 'wb') as f:
f.write(etree.tostring(root, pretty_print=True))
```
| true |
code
| 0.738097 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/iotanalytics/IoTTutorial/blob/main/code/detection_and_segmentation/Anomaly_Detection_with_Autoencoder_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **Anomaly Detection with Autoencoder**
Autoencoders are neural networks designed to learn a low dimensional representation given some input data. They consist of two components: an encoder (which learns to map input data to a low dimensional representation, termed the bottleneck), and a decoder (which learns to map this low dimensional representation back to the original input data). By structuring the learning problem in this manner, the encoder network learns an efficient “compression” function which maps input data to a salient lower dimension representation, such that the decoder network is able to successfully reconstruct the original input data. The model is trained by minimizing the reconstruction error: the difference (mean squared error) between the original input and the reconstructed output produced by the decoder.

# Use autoencoder to get the threshold for anomaly detection
It is important to note that the mapping function learned by an autoencoder is specific to the training data distribution, i.e., an autoencoder will typically not succeed at reconstructing data which is significantly different from data it has seen during training. This property of learning a distribution specific mapping (as opposed to a generic linear mapping) is particularly useful for the task of anomaly detection.
Applying an autoencoder for anomaly detection follows the general principle of first modeling normal behaviour and subsequently generating an anomaly score for a new data sample. To model normal behaviour we train the autoencoder on a normal data sample. This way, the model learns a mapping function that successfully reconstructs normal data samples with a very small reconstruction error (the difference between the actual sample and the version reconstructed by the model). This behavior is replicated at test time, where the reconstruction error is small for normal data samples, and large for abnormal data samples. To identify anomalies, we use the reconstruction error score as an anomaly score and flag samples with reconstruction errors above a given threshold.
```
import numpy as np
import pandas as pd
from tensorflow import keras
from tensorflow.keras import layers
from matplotlib import pyplot as plt
```
load the data, here we used benchmark data in kaggle
```
master_url_root = "https://raw.githubusercontent.com/numenta/NAB/master/data/"
df_small_noise_url_suffix = "artificialNoAnomaly/art_daily_small_noise.csv"
df_small_noise_url = master_url_root + df_small_noise_url_suffix
df_small_noise = pd.read_csv(
df_small_noise_url, parse_dates=True, index_col="timestamp"
)
df_daily_jumpsup_url_suffix = "artificialWithAnomaly/art_daily_jumpsup.csv"
df_daily_jumpsup_url = master_url_root + df_daily_jumpsup_url_suffix
df_daily_jumpsup = pd.read_csv(
df_daily_jumpsup_url, parse_dates=True, index_col="timestamp"
)
print(df_small_noise.head())
print(df_daily_jumpsup.head())
```
Visualize the data: time series with anomalies and without anomalies
```
fig, ax = plt.subplots()
df_small_noise.plot(legend=False, ax=ax)
plt.show()
fig, ax = plt.subplots()
df_daily_jumpsup.plot(legend=False, ax=ax)
plt.show()
```
Wrap up the function to preprocess the time series data, create sequences using raw data through time_steps set in advance.
```
# Normalize and save the mean and std we get,
# for normalizing test data.
TIME_STEPS = 288
# Generated training sequences for use in the model.
def create_sequences(values, time_steps=TIME_STEPS):
output = []
for i in range(len(values) - time_steps + 1):
output.append(values[i : (i + time_steps)])
return np.stack(output)
def AE_anomaly_detection(x_train, x_test, time_steps=TIME_STEPS):
training_mean = x_train.mean()
training_std = x_train.std()
df_training_value = (x_train - training_mean) / training_std
print("Number of training samples:", len(df_training_value))
x_train = create_sequences(df_training_value.values)
print("Training input shape: ", x_train.shape)
model = keras.Sequential(
[
layers.Input(shape=(x_train.shape[1], x_train.shape[2])),
layers.Conv1D(
filters=32, kernel_size=7, padding="same", strides=2, activation="relu"
),
layers.Dropout(rate=0.2),
layers.Conv1D(
filters=16, kernel_size=7, padding="same", strides=2, activation="relu"
),
layers.Conv1DTranspose(
filters=16, kernel_size=7, padding="same", strides=2, activation="relu"
),
layers.Dropout(rate=0.2),
layers.Conv1DTranspose(
filters=32, kernel_size=7, padding="same", strides=2, activation="relu"
),
layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same"),
]
)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.001), loss="mse")
history = model.fit(
x_train,
x_train,
epochs=50,
batch_size=128,
validation_split=0.1,
callbacks=[
keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min")
],
)
# Get train MAE loss.
x_train_pred = model.predict(x_train)
train_mae_loss = np.mean(np.abs(x_train_pred - x_train), axis=1)
plt.hist(train_mae_loss, bins=50)
plt.xlabel("Train MAE loss")
plt.ylabel("No of samples")
plt.show()
# Get reconstruction loss threshold.
threshold = np.max(train_mae_loss)
print("Reconstruction error threshold: ", threshold)
##### test ...
test_mean = x_test.mean()
test_std = x_test.std()
####### prepare the test data
df_test_value = (x_test - test_mean) / test_std
#fig, ax = plt.subplots()
#df_test_value.plot(legend=False, ax=ax)
#plt.show()
# Create sequences from test values.
x_test = create_sequences(df_test_value.values)
print("Test input shape: ", x_test.shape)
# Get test MAE loss.
x_test_pred = model.predict(x_test)
test_mae_loss = np.mean(np.abs(x_test_pred - x_test), axis=1)
test_mae_loss = test_mae_loss.reshape((-1))
plt.hist(test_mae_loss, bins=50)
plt.xlabel("test MAE loss")
plt.ylabel("No of samples")
plt.show()
# Detect all the samples which are anomalies.
anomalies = test_mae_loss > threshold
print("Number of anomaly samples: ", np.sum(anomalies))
#print("Indices of anomaly samples: ", np.where(anomalies))
return anomalies
##### plot anomalies
anomalies = AE_anomaly_detection(df_small_noise, df_daily_jumpsup, time_steps=TIME_STEPS)
# data i is an anomaly if samples [(i - timesteps + 1) to (i)] are anomalies
test_mean = df_daily_jumpsup.mean()
test_std = df_daily_jumpsup.std()
df_test_value = (df_daily_jumpsup - test_mean) / test_std
anomalous_data_indices = []
for data_idx in range(TIME_STEPS - 1, len(df_test_value) - TIME_STEPS + 1):
if np.all(anomalies[data_idx - TIME_STEPS + 1 : data_idx]):
anomalous_data_indices.append(data_idx)
df_subset = df_daily_jumpsup.iloc[anomalous_data_indices]
fig, ax = plt.subplots()
df_daily_jumpsup.plot(legend=False, ax=ax)
df_subset.plot(legend=False, ax=ax, color="r")
plt.show()
```
## Pros and Cons of Autoencoder Algorithm for Anomaly Detection
**Pros**:
- An autoencoeder can perform tasks that a linear program cannot.
When an element of the neural network fails, it can continue without any problem with their parallel nature.
- An autoencoder constructed by neural network learns and does not need to be reprogrammed.
- It can be implemented in any application.
**Cons**:
- The neural network needs training to operate.
- The architecture of a neural network is different from the architecture of microprocessors therefore needs to be emulated.
- Requires high processing time for large neural network
| true |
code
| 0.732643 | null | null | null | null |
|
# 09 - Decision Trees
by [Alejandro Correa Bahnsen](albahnsen.com/)
version 0.2, May 2016
## Part of the class [Machine Learning for Risk Management](https://github.com/albahnsen/ML_RiskManagement)
This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). Special thanks goes to [Kevin Markham](https://github.com/justmarkham)
*Adapted from Chapter 8 of [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/)*
Why are we learning about decision trees?
- Can be applied to both regression and classification problems
- Many useful properties
- Very popular
- Basis for more sophisticated models
- Have a different way of "thinking" than the other models we have studied
## Lesson objectives
Students will be able to:
- Explain how a decision tree is created
- Build a decision tree model in scikit-learn
- Tune a decision tree model and explain how tuning impacts the model
- Interpret a tree diagram
- Describe the key differences between regression and classification trees
- Decide whether a decision tree is an appropriate model for a given problem
# Part 1: Regression trees
Major League Baseball player data from 1986-87:
- **Years** (x-axis): number of years playing in the major leagues
- **Hits** (y-axis): number of hits in the previous year
- **Salary** (color): low salary is blue/green, high salary is red/yellow

Group exercise:
- The data above is our **training data**.
- We want to build a model that predicts the Salary of **future players** based on Years and Hits.
- We are going to "segment" the feature space into regions, and then use the **mean Salary in each region** as the predicted Salary for future players.
- Intuitively, you want to **maximize** the similarity (or "homogeneity") within a given region, and **minimize** the similarity between different regions.
Rules for segmenting:
- You can only use **straight lines**, drawn one at a time.
- Your line must either be **vertical or horizontal**.
- Your line **stops** when it hits an existing line.

Above are the regions created by a computer:
- $R_1$: players with **less than 5 years** of experience, mean Salary of **\$166,000 **
- $R_2$: players with **5 or more years** of experience and **less than 118 hits**, mean Salary of **\$403,000 **
- $R_3$: players with **5 or more years** of experience and **118 hits or more**, mean Salary of **\$846,000 **
**Note:** Years and Hits are both integers, but the convention is to use the **midpoint** between adjacent values to label a split.
These regions are used to make predictions on **out-of-sample data**. Thus, there are only three possible predictions! (Is this different from how **linear regression** makes predictions?)
Below is the equivalent regression tree:

The first split is **Years < 4.5**, thus that split goes at the top of the tree. When a splitting rule is **True**, you follow the left branch. When a splitting rule is **False**, you follow the right branch.
For players in the **left branch**, the mean Salary is \$166,000, thus you label it with that value. (Salary has been divided by 1000 and log-transformed to 5.11.)
For players in the **right branch**, there is a further split on **Hits < 117.5**, dividing players into two more Salary regions: \$403,000 (transformed to 6.00), and \$846,000 (transformed to 6.74).

**What does this tree tell you about your data?**
- Years is the most important factor determining Salary, with a lower number of Years corresponding to a lower Salary.
- For a player with a lower number of Years, Hits is not an important factor determining Salary.
- For a player with a higher number of Years, Hits is an important factor determining Salary, with a greater number of Hits corresponding to a higher Salary.
**Question:** What do you like and dislike about decision trees so far?
## Building a regression tree by hand
Your **training data** is a tiny dataset of [used vehicle sale prices](https://raw.githubusercontent.com/justmarkham/DAT8/master/data/vehicles_train.csv). Your goal is to **predict price** for testing data.
1. Read the data into a Pandas DataFrame.
2. Explore the data by sorting, plotting, or split-apply-combine (aka `group_by`).
3. Decide which feature is the most important predictor, and use that to create your first splitting rule.
- Only binary splits are allowed.
4. After making your first split, split your DataFrame into two parts, and then explore each part to figure out what other splits to make.
5. Stop making splits once you are convinced that it strikes a good balance between underfitting and overfitting.
- Your goal is to build a model that generalizes well.
- You are allowed to split on the same variable multiple times!
6. Draw your tree, labeling the leaves with the mean price for the observations in that region.
- Make sure nothing is backwards: You follow the **left branch** if the rule is true, and the **right branch** if the rule is false.
## How does a computer build a regression tree?
**Ideal approach:** Consider every possible partition of the feature space (computationally infeasible)
**"Good enough" approach:** recursive binary splitting
1. Begin at the top of the tree.
2. For **every feature**, examine **every possible cutpoint**, and choose the feature and cutpoint such that the resulting tree has the lowest possible mean squared error (MSE). Make that split.
3. Examine the two resulting regions, and again make a **single split** (in one of the regions) to minimize the MSE.
4. Keep repeating step 3 until a **stopping criterion** is met:
- maximum tree depth (maximum number of splits required to arrive at a leaf)
- minimum number of observations in a leaf
### Demo: Choosing the ideal cutpoint for a given feature
```
# vehicle data
import pandas as pd
import zipfile
with zipfile.ZipFile('../datasets/vehicles_train.csv.zip', 'r') as z:
f = z.open('vehicles_train.csv')
train = pd.io.parsers.read_table(f, index_col=False, sep=',')
# before splitting anything, just predict the mean of the entire dataset
train['prediction'] = train.price.mean()
train
year = 0
train['pred'] = train.loc[train.year<year, 'price'].mean()
train.loc[train.year>=year, 'pred'] = train.loc[train.year>=year, 'price'].mean()
(((train['price'] - train['pred'])**2).mean()) ** 0.5
train_izq = train.loc[train.year<0].copy()
train_izq.year.unique()
def error_año(train, year):
train['pred'] = train.loc[train.year<year, 'price'].mean()
train.loc[train.year>=year, 'pred'] = train.loc[train.year>=year, 'price'].mean()
return round(((((train['price'] - train['pred'])**2).mean()) ** 0.5), 2)
def error_miles(train, miles):
train['pred'] = train.loc[train.miles<miles, 'price'].mean()
train.loc[train.miles>=miles, 'pred'] = train.loc[train.miles>=miles, 'price'].mean()
return round(((((train['price'] - train['pred'])**2).mean()) ** 0.5), 2)
```
**Recap:** Before every split, this process is repeated for every feature, and the feature and cutpoint that produces the lowest MSE is chosen.
## Building a regression tree in scikit-learn
```
# encode car as 0 and truck as 1
train['vtype'] = train.vtype.map({'car':0, 'truck':1})
# define X and y
feature_cols = ['year', 'miles', 'doors', 'vtype']
X = train[feature_cols]
y = train.price
# instantiate a DecisionTreeRegressor (with random_state=1)
from sklearn.tree import DecisionTreeRegressor
treereg = DecisionTreeRegressor(random_state=1)
treereg
# use leave-one-out cross-validation (LOOCV) to estimate the RMSE for this model
import numpy as np
from sklearn.cross_validation import cross_val_score
scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error')
np.mean(np.sqrt(-scores))
```
## What happens when we grow a tree too deep?
- Left: Regression tree for Salary **grown deeper**
- Right: Comparison of the **training, testing, and cross-validation errors** for trees with different numbers of leaves

The **training error** continues to go down as the tree size increases (due to overfitting), but the lowest **cross-validation error** occurs for a tree with 3 leaves.
## Tuning a regression tree
Let's try to reduce the RMSE by tuning the **max_depth** parameter:
```
# try different values one-by-one
treereg = DecisionTreeRegressor(max_depth=1, random_state=1)
scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error')
np.mean(np.sqrt(-scores))
```
Or, we could write a loop to try a range of values:
```
# list of values to try
max_depth_range = range(1, 8)
# list to store the average RMSE for each value of max_depth
RMSE_scores = []
# use LOOCV with each value of max_depth
for depth in max_depth_range:
treereg = DecisionTreeRegressor(max_depth=depth, random_state=1)
MSE_scores = cross_val_score(treereg, X, y, cv=14, scoring='mean_squared_error')
RMSE_scores.append(np.mean(np.sqrt(-MSE_scores)))
%matplotlib inline
import matplotlib.pyplot as plt
# plot max_depth (x-axis) versus RMSE (y-axis)
plt.plot(max_depth_range, RMSE_scores)
plt.xlabel('max_depth')
plt.ylabel('RMSE (lower is better)')
# max_depth=3 was best, so fit a tree using that parameter
treereg = DecisionTreeRegressor(max_depth=3, random_state=1)
treereg.fit(X, y)
# "Gini importance" of each feature: the (normalized) total reduction of error brought by that feature
pd.DataFrame({'feature':feature_cols, 'importance':treereg.feature_importances_})
```
## Creating a tree diagram
```
# create a Graphviz file
from sklearn.tree import export_graphviz
export_graphviz(treereg, out_file='tree_vehicles.dot', feature_names=feature_cols)
# At the command line, run this to convert to PNG:
# dot -Tpng tree_vehicles.dot -o tree_vehicles.png
```

Reading the internal nodes:
- **samples:** number of observations in that node before splitting
- **mse:** MSE calculated by comparing the actual response values in that node against the mean response value in that node
- **rule:** rule used to split that node (go left if true, go right if false)
Reading the leaves:
- **samples:** number of observations in that node
- **value:** mean response value in that node
- **mse:** MSE calculated by comparing the actual response values in that node against "value"
## Making predictions for the testing data
```
# read the testing data
with zipfile.ZipFile('../datasets/vehicles_test.csv.zip', 'r') as z:
f = z.open('vehicles_test.csv')
test = pd.io.parsers.read_table(f, index_col=False, sep=',')
test['vtype'] = test.vtype.map({'car':0, 'truck':1})
test
```
**Question:** Using the tree diagram above, what predictions will the model make for each observation?
```
# use fitted model to make predictions on testing data
X_test = test[feature_cols]
y_test = test.price
y_pred = treereg.predict(X_test)
y_pred
# calculate RMSE
from sklearn.metrics import mean_squared_error
np.sqrt(mean_squared_error(y_test, y_pred))
```
# Part 2: Classification trees
**Example:** Predict whether Barack Obama or Hillary Clinton will win the Democratic primary in a particular county in 2008:

**Questions:**
- What are the observations? How many observations are there?
- What is the response variable?
- What are the features?
- What is the most predictive feature?
- Why does the tree split on high school graduation rate twice in a row?
- What is the class prediction for the following county: 15% African-American, 90% high school graduation rate, located in the South, high poverty, high population density?
- What is the predicted probability for that same county?
## Comparing regression trees and classification trees
|regression trees|classification trees|
|---|---|
|predict a continuous response|predict a categorical response|
|predict using mean response of each leaf|predict using most commonly occuring class of each leaf|
|splits are chosen to minimize MSE|splits are chosen to minimize Gini index (discussed below)|
## Splitting criteria for classification trees
Common options for the splitting criteria:
- **classification error rate:** fraction of training observations in a region that don't belong to the most common class
- **Gini index:** measure of total variance across classes in a region
### Example of classification error rate
Pretend we are predicting whether someone buys an iPhone or an Android:
- At a particular node, there are **25 observations** (phone buyers), of whom **10 bought iPhones and 15 bought Androids**.
- Since the majority class is **Android**, that's our prediction for all 25 observations, and thus the classification error rate is **10/25 = 40%**.
Our goal in making splits is to **reduce the classification error rate**. Let's try splitting on gender:
- **Males:** 2 iPhones and 12 Androids, thus the predicted class is Android
- **Females:** 8 iPhones and 3 Androids, thus the predicted class is iPhone
- Classification error rate after this split would be **5/25 = 20%**
Compare that with a split on age:
- **30 or younger:** 4 iPhones and 8 Androids, thus the predicted class is Android
- **31 or older:** 6 iPhones and 7 Androids, thus the predicted class is Android
- Classification error rate after this split would be **10/25 = 40%**
The decision tree algorithm will try **every possible split across all features**, and choose the split that **reduces the error rate the most.**
### Example of Gini index
Calculate the Gini index before making a split:
$$1 - \left(\frac {iPhone} {Total}\right)^2 - \left(\frac {Android} {Total}\right)^2 = 1 - \left(\frac {10} {25}\right)^2 - \left(\frac {15} {25}\right)^2 = 0.48$$
- The **maximum value** of the Gini index is 0.5, and occurs when the classes are perfectly balanced in a node.
- The **minimum value** of the Gini index is 0, and occurs when there is only one class represented in a node.
- A node with a lower Gini index is said to be more "pure".
Evaluating the split on **gender** using Gini index:
$$\text{Males: } 1 - \left(\frac {2} {14}\right)^2 - \left(\frac {12} {14}\right)^2 = 0.24$$
$$\text{Females: } 1 - \left(\frac {8} {11}\right)^2 - \left(\frac {3} {11}\right)^2 = 0.40$$
$$\text{Weighted Average: } 0.24 \left(\frac {14} {25}\right) + 0.40 \left(\frac {11} {25}\right) = 0.31$$
Evaluating the split on **age** using Gini index:
$$\text{30 or younger: } 1 - \left(\frac {4} {12}\right)^2 - \left(\frac {8} {12}\right)^2 = 0.44$$
$$\text{31 or older: } 1 - \left(\frac {6} {13}\right)^2 - \left(\frac {7} {13}\right)^2 = 0.50$$
$$\text{Weighted Average: } 0.44 \left(\frac {12} {25}\right) + 0.50 \left(\frac {13} {25}\right) = 0.47$$
Again, the decision tree algorithm will try **every possible split**, and will choose the split that **reduces the Gini index (and thus increases the "node purity") the most.**
### Comparing classification error rate and Gini index
- Gini index is generally preferred because it will make splits that **increase node purity**, even if that split does not change the classification error rate.
- Node purity is important because we're interested in the **class proportions** in each region, since that's how we calculate the **predicted probability** of each class.
- scikit-learn's default splitting criteria for classification trees is Gini index.
Note: There is another common splitting criteria called **cross-entropy**. It's numerically similar to Gini index, but slower to compute, thus it's not as popular as Gini index.
## Building a classification tree in scikit-learn
We'll build a classification tree using the Titanic data:
```
# read in the data
with zipfile.ZipFile('../datasets/titanic.csv.zip', 'r') as z:
f = z.open('titanic.csv')
titanic = pd.read_csv(f, sep=',', index_col=0)
# encode female as 0 and male as 1
titanic['Sex'] = titanic.Sex.map({'female':0, 'male':1})
# fill in the missing values for age with the median age
titanic.Age.fillna(titanic.Age.median(), inplace=True)
# create a DataFrame of dummy variables for Embarked
embarked_dummies = pd.get_dummies(titanic.Embarked, prefix='Embarked')
embarked_dummies.drop(embarked_dummies.columns[0], axis=1, inplace=True)
# concatenate the original DataFrame and the dummy DataFrame
titanic = pd.concat([titanic, embarked_dummies], axis=1)
# print the updated DataFrame
titanic.head()
```
- **Survived:** 0=died, 1=survived (response variable)
- **Pclass:** 1=first class, 2=second class, 3=third class
- What will happen if the tree splits on this feature?
- **Sex:** 0=female, 1=male
- **Age:** numeric value
- **Embarked:** C or Q or S
```
# define X and y
feature_cols = ['Pclass', 'Sex', 'Age', 'Embarked_Q', 'Embarked_S']
X = titanic[feature_cols]
y = titanic.Survived
# fit a classification tree with max_depth=3 on all data
from sklearn.tree import DecisionTreeClassifier
treeclf = DecisionTreeClassifier(max_depth=3, random_state=1)
treeclf.fit(X, y)
# create a Graphviz file
export_graphviz(treeclf, out_file='tree_titanic.dot', feature_names=feature_cols)
# At the command line, run this to convert to PNG:
# dot -Tpng tree_titanic.dot -o tree_titanic.png
```

Notice the split in the bottom right: the **same class** is predicted in both of its leaves. That split didn't affect the **classification error rate**, though it did increase the **node purity**, which is important because it increases the accuracy of our predicted probabilities.
```
# compute the feature importances
pd.DataFrame({'feature':feature_cols, 'importance':treeclf.feature_importances_})
```
# Part 3: Comparing decision trees with other models
**Advantages of decision trees:**
- Can be used for regression or classification
- Can be displayed graphically
- Highly interpretable
- Can be specified as a series of rules, and more closely approximate human decision-making than other models
- Prediction is fast
- Features don't need scaling
- Automatically learns feature interactions
- Tends to ignore irrelevant features
- Non-parametric (will outperform linear models if relationship between features and response is highly non-linear)

**Disadvantages of decision trees:**
- Performance is (generally) not competitive with the best supervised learning methods
- Can easily overfit the training data (tuning is required)
- Small variations in the data can result in a completely different tree (high variance)
- Recursive binary splitting makes "locally optimal" decisions that may not result in a globally optimal tree
- Doesn't tend to work well if the classes are highly unbalanced
- Doesn't tend to work well with very small datasets
| true |
code
| 0.568536 | null | null | null | null |
|
# Location Set Covering Problem (LSCP)
*Authors:* [Germano Barcelos](https://github.com/gegen07), [James Gaboardi](https://github.com/jGaboardi), [Levi J. Wolf](https://github.com/ljwolf), [Qunshan Zhao](https://github.com/qszhao)
Location Set Covering is a problem realized by Toregas, et al. (1971). He figured out that emergency services must have placed according to a response time, since, there is a allowable maximum service time when it's discussed how handle an emergency activity. Therefore he proprosed a model named LSCP that:
_Minimize the number of facilities needed and locate them so that every demand area is covered within a predefined maximal service distance or time._ Church L., Murray, A. (2018)
**LSCP can be written as:**
$\begin{array} \displaystyle \textbf{Minimize} & \sum_{j=1}^{n}{x_j} && (1) \\
\displaystyle \textbf{Subject to:} & \sum_{j\in N_i}{x_j} \geq 1 & \forall i & (2) \\
& x_j \in {0,1} & \forall j & (3) \\ \end{array}$
$\begin{array} \displaystyle \textbf{Where:}\\ & & \displaystyle i & \small = & \textrm{index referencing nodes of the network as demand} \\
& & j & \small = & \textrm{index referencing nodes of the network as potential facility sites} \\
& & S & \small = & \textrm{maximal acceptable service distance or time standard} \\
& & d_{ij} & \small = & \textrm{shortest distance or travel time between nodes} i \textrm{and} j \\
& & N_i & \small = & \{j | d_{ij} < S\} \\
& & x_j & \small = & \begin{cases}
1, \text{if a facility is located at node} j\\
0, \text{otherwise} \\
\end{cases} \end{array}$
_This excerpt above was quoted from Church L., Murray, A. (2018)_
This tutorial solves LSCP using `spopt.locate.coverage.LSCP` instance that depends on a array 2D representing the costs between facilities candidate sites and demand points. For that it uses a lattice 10x10 with simulated points to calculate the costs.
```
from spopt.locate.coverage import LSCP
from spopt.locate.util import simulated_geo_points
import numpy
import geopandas
import pulp
import spaghetti
from shapely.geometry import Point
import matplotlib.pyplot as plt
```
Since the model needs a distance cost matrix we should define some variables. In the comments, it's defined what these variables are for but solver. The solver, assigned below as `pulp.PULP_CBC_CMD`, is an interface to optimization solver developed by [COIN-OR](https://github.com/coin-or/Cbc). If you want to use another optimization interface as Gurobi or CPLEX see this [guide](https://coin-or.github.io/pulp/guides/how_to_configure_solvers.html) that explains how to achieve this.
```
CLIENT_COUNT = 100 # quantity demand points
FACILITY_COUNT = 5 # quantity supply points
MAX_COVERAGE = 8 # maximum service radius in meters
# Random seeds for reproducibility
CLIENT_SEED = 5
FACILITY_SEED = 6
solver = pulp.PULP_CBC_CMD(msg=False)
```
## Lattice 10x10
Create lattice 10x10 with 9 vertical lines in interior.
```
lattice = spaghetti.regular_lattice((0, 0, 10, 10), 9, exterior=True)
ntw = spaghetti.Network(in_data=lattice)
```
Transform spaghetti instance into geodataframe.
```
street = spaghetti.element_as_gdf(ntw, arcs=True)
street_buffered = geopandas.GeoDataFrame(
geopandas.GeoSeries(street["geometry"].buffer(0.2).unary_union),
crs=street.crs,
columns=["geometry"],
)
```
Plotting the network created by spaghetti we can verify that it seems a district with quarters and streets.
```
street.plot()
```
## Simulate points in a network
The function `simulated_geo_points` simulates points inside a network. In this case, it uses a lattice network 10x10 created by using spaghetti package.
Below we use the function defined above and simulate the points inside lattice bounds.
```
client_points = simulated_geo_points(street_buffered, needed=CLIENT_COUNT, seed=CLIENT_SEED)
facility_points = simulated_geo_points(
street_buffered, needed=FACILITY_COUNT, seed=FACILITY_SEED
)
```
Plotting the 100 client and 5 facility points we can see that the function generates dummy points to an area of 10x10 which is the area created by our lattice created on previous cells.
```
fig, ax = plt.subplots(figsize=(6, 6))
street.plot(ax=ax, alpha=0.8, zorder=1, label='streets')
facility_points.plot(ax=ax, color='red', zorder=2, label='facility candidate sites ($n$=5)')
client_points.plot(ax=ax, color='black', label='clients points ($n$=100)')
plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1))
```
## Transform simulated points to real points
To use cost matrix or geodataframes we have to pay attention in some details. The client and facility points simulated don't belong to network, so if we calculate the distances now we are supposed to receive a wrong result. Before calculating distances we snap points to the networok and then calculate the distances.
Below we snap points that is not spatially belong to network and create new real points geodataframes
```
ntw.snapobservations(client_points, "clients", attribute=True)
clients_snapped = spaghetti.element_as_gdf(
ntw, pp_name="clients", snapped=True
)
ntw.snapobservations(facility_points, "facilities", attribute=True)
facilities_snapped = spaghetti.element_as_gdf(
ntw, pp_name="facilities", snapped=True
)
```
Now the plot seems more organized as the points belong to network.
The network created is plotted below with facility points and clients points:
```
fig, ax = plt.subplots(figsize=(6, 6))
street.plot(ax=ax, alpha=0.8, zorder=1, label='streets')
facilities_snapped.plot(ax=ax, color='red', zorder=2, label='facility candidate sites ($n$=5)')
clients_snapped.plot(ax=ax, color='black', label='clients points ($n$=100)')
plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1))
```
## Calculating the cost matrix
Calculate distance between clients and facilities.
```
cost_matrix = ntw.allneighbordistances(
sourcepattern=ntw.pointpatterns["clients"],
destpattern=ntw.pointpatterns["facilities"],
)
```
The expected result here is a Dijkstra distance between clients and facilities points, so we our case an array 2D 100x5.
```
cost_matrix
```
With ``LSCP.from_cost_matrix`` we model LSC problem to cover all demand points with $p$ facility points within `max_coverage` meters as service radius using cost matrix calculated previously.
```
lscp_from_cost_matrix = LSCP.from_cost_matrix(cost_matrix, MAX_COVERAGE)
lscp_from_cost_matrix = lscp_from_cost_matrix.solve(solver)
```
Expected result is an instance of LSCP.
```
lscp_from_cost_matrix
```
## Using GeoDataFrame
With ``LSCP.from_geodataframe`` we model the LSC problem to cover all demand points with $p$ facility points within `max_coverage` meters as service radius using geodataframes without calculating the cost matrix previously.
```
lscp_from_geodataframe = LSCP.from_geodataframe(
clients_snapped, facilities_snapped, "geometry", "geometry", MAX_COVERAGE, distance_metric="euclidean"
)
lscp_from_geodataframe = lscp_from_geodataframe.solve(solver)
```
Expected result is an instance of LSCP.
```
lscp_from_geodataframe
```
## Plotting the results
The cell below describe the plotting of the results. For each method from LSCP class (from_cost_matrix, from_geodataframe) there is a plot displaying the facility site that was selected with a star colored and the points covered with the same color. Sometimes the demand points will be colored with not expected colors, it represents the coverage overlapping.
```
from matplotlib.patches import Patch
import matplotlib.lines as mlines
dv_colors = [
"darkcyan",
"mediumseagreen",
"cyan",
"darkslategray",
"lightskyblue",
"limegreen",
"darkgoldenrod",
"peachpuff",
"coral",
"mediumvioletred",
"blueviolet",
"fuchsia",
"thistle",
"lavender",
"saddlebrown",
]
def plot_results(lscp, facility_points):
arr_points = []
fac_sites = []
for i in range(FACILITY_COUNT):
if lscp.fac2cli[i]:
geom = client_points.iloc[lscp.fac2cli[i]]['geometry']
arr_points.append(geom)
fac_sites.append(i)
fig, ax = plt.subplots(figsize=(6, 6))
legend_elements = []
street.plot(ax=ax, alpha=1, color='black', zorder=1)
legend_elements.append(mlines.Line2D(
[],
[],
color='black',
label='streets',
))
facility_points.plot(ax=ax, color='brown', marker="*", markersize=80, zorder=2)
legend_elements.append(mlines.Line2D(
[],
[],
color='brown',
marker="*",
linewidth=0,
label=f'facility sites ($n$={FACILITY_COUNT})'
))
for i in range(len(arr_points)):
gdf = geopandas.GeoDataFrame(arr_points[i])
label = f"coverage_points by y{fac_sites[i]}"
legend_elements.append(Patch(facecolor=dv_colors[i], edgecolor="k", label=label))
gdf.plot(ax=ax, zorder=3, alpha=0.7, edgecolor="k", color=dv_colors[i], label=label)
facility_points.iloc[[fac_sites[i]]].plot(ax=ax,
marker="*",
markersize=200 * 3.0,
alpha=0.8,
zorder=4,
edgecolor="k",
facecolor=dv_colors[i])
legend_elements.append(mlines.Line2D(
[],
[],
color=dv_colors[i],
marker="*",
ms=20 / 2,
markeredgecolor="k",
linewidth=0,
alpha=0.8,
label=f"y{fac_sites[i]} facility selected",
))
plt.title("LSCP", fontweight="bold")
plt.legend(handles = legend_elements, loc='upper left', bbox_to_anchor=(1.05, 1))
```
### LSCP built from cost matrix
```
lscp_from_cost_matrix.facility_client_array()
plot_results(lscp_from_cost_matrix, facility_points)
```
### LSCP built from geodataframe
```
lscp_from_geodataframe.facility_client_array()
plot_results(lscp_from_geodataframe, facility_points)
```
You may notice that the models are different. This result is expected as the distance between facility and demand points is calculated with different metrics. The cost matrix is calculated with dijkstra distance while the distance using geodataframe is calculated with euclidean distance.
But why it needs just one facility point to cover all of those demand points? It can be explained by the nature of the problem. The problem was configured in a synthetic manner, the street is created with 10x10 lattice and the max_coverage parameter is 8 meters, so this result is not weird at all. You can change the max_coverage parameter to 2 meters and you will obtain a different result but be aware with how many points will be covered.
## References
- [Church, R. L., & Murray, A. T. (2018). Location covering models: History, applications and advancements (1st edition 2018). Springer](https://www.springer.com/gb/book/9783319998459)
- [Toregas, C., Swain, R., ReVelle, C., & Bergman, L. (1971). The location of emergency service facilities. Operations Research, 19(6), 1363–1373.](https://pubsonline.informs.org/doi/abs/10.1287/opre.19.6.1363)
| true |
code
| 0.679777 | null | null | null | null |
|
# Character Sequence to Sequence
In this notebook, we'll build a model that takes in a sequence of letters, and outputs a sorted version of that sequence. We'll do that using what we've learned so far about Sequence to Sequence models. This notebook was updated to work with TensorFlow 1.1 and builds on the work of Dave Currie. Check out Dave's post [Text Summarization with Amazon Reviews](https://medium.com/towards-data-science/text-summarization-with-amazon-reviews-41801c2210b).
<img src="images/sequence-to-sequence.jpg"/>
## Dataset
The dataset lives in the /data/ folder. At the moment, it is made up of the following files:
* **letters_source.txt**: The list of input letter sequences. Each sequence is its own line.
* **letters_target.txt**: The list of target sequences we'll use in the training process. Each sequence here is a response to the input sequence in letters_source.txt with the same line number.
```
import numpy as np
import time
import helper
source_path = 'data/letters_source.txt'
target_path = 'data/letters_target.txt'
source_sentences = helper.load_data(source_path)
target_sentences = helper.load_data(target_path)
```
Let's start by examining the current state of the dataset. `source_sentences` contains the entire input sequence file as text delimited by newline symbols.
```
source_sentences[:50].split('\n')
```
`target_sentences` contains the entire output sequence file as text delimited by newline symbols. Each line corresponds to the line from `source_sentences`. `target_sentences` contains a sorted characters of the line.
```
target_sentences[:50].split('\n')
```
## Preprocess
To do anything useful with it, we'll need to turn the each string into a list of characters:
<img src="images/source_and_target_arrays.png"/>
Then convert the characters to their int values as declared in our vocabulary:
```
def extract_character_vocab(data):
special_words = ['<PAD>', '<UNK>', '<GO>', '<EOS>']
set_words = set([character for line in data.split('\n') for character in line])
int_to_vocab = {word_i: word for word_i, word in enumerate(special_words + list(set_words))}
vocab_to_int = {word: word_i for word_i, word in int_to_vocab.items()}
return int_to_vocab, vocab_to_int
# Build int2letter and letter2int dicts
source_int_to_letter, source_letter_to_int = extract_character_vocab(source_sentences)
target_int_to_letter, target_letter_to_int = extract_character_vocab(target_sentences)
# Convert characters to ids
source_letter_ids = [[source_letter_to_int.get(letter, source_letter_to_int['<UNK>']) for letter in line] for line in source_sentences.split('\n')]
target_letter_ids = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>']) for letter in line] + [target_letter_to_int['<EOS>']] for line in target_sentences.split('\n')]
print("Example source sequence")
print(source_letter_ids[:3])
print("\n")
print("Example target sequence")
print(target_letter_ids[:3])
```
This is the final shape we need them to be in. We can now proceed to building the model.
## Model
#### Check the Version of TensorFlow
This will check to make sure you have the correct version of TensorFlow
```
from distutils.version import LooseVersion
import tensorflow as tf
from tensorflow.python.layers.core import Dense
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.1'), 'Please use TensorFlow version 1.1 or newer'
print('TensorFlow Version: {}'.format(tf.__version__))
```
### Hyperparameters
```
# Number of Epochs
epochs = 60
# Batch Size
batch_size = 128
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 15
decoding_embedding_size = 15
# Learning Rate
learning_rate = 0.001
```
### Input
```
def get_model_inputs():
input_data = tf.placeholder(tf.int32, [None, None], name='input')
targets = tf.placeholder(tf.int32, [None, None], name='targets')
lr = tf.placeholder(tf.float32, name='learning_rate')
target_sequence_length = tf.placeholder(tf.int32, (None,), name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32, (None,), name='source_sequence_length')
return input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length
```
### Sequence to Sequence Model
We can now start defining the functions that will build the seq2seq model. We are building it from the bottom up with the following components:
2.1 Encoder
- Embedding
- Encoder cell
2.2 Decoder
1- Process decoder inputs
2- Set up the decoder
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
2.3 Seq2seq model connecting the encoder and decoder
2.4 Build the training graph hooking up the model with the
optimizer
### 2.1 Encoder
The first bit of the model we'll build is the encoder. Here, we'll embed the input data, construct our encoder, then pass the embedded data to the encoder.
- Embed the input data using [`tf.contrib.layers.embed_sequence`](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/embed_sequence)
<img src="images/embed_sequence.png" />
- Pass the embedded input into a stack of RNNs. Save the RNN state and ignore the output.
<img src="images/encoder.png" />
```
def encoding_layer(input_data, rnn_size, num_layers,
source_sequence_length, source_vocab_size,
encoding_embedding_size):
# Encoder embedding
enc_embed_input = tf.contrib.layers.embed_sequence(input_data, source_vocab_size, encoding_embedding_size)
# RNN cell
def make_cell(rnn_size):
enc_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return enc_cell
enc_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
enc_output, enc_state = tf.nn.dynamic_rnn(enc_cell, enc_embed_input, sequence_length=source_sequence_length, dtype=tf.float32)
return enc_output, enc_state
```
## 2.2 Decoder
The decoder is probably the most involved part of this model. The following steps are needed to create it:
1- Process decoder inputs
2- Set up the decoder components
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
### Process Decoder Input
In the training process, the target sequences will be used in two different places:
1. Using them to calculate the loss
2. Feeding them to the decoder during training to make the model more robust.
Now we need to address the second point. Let's assume our targets look like this in their letter/word form (we're doing this for readibility. At this point in the code, these sequences would be in int form):
<img src="images/targets_1.png"/>
We need to do a simple transformation on the tensor before feeding it to the decoder:
1- We will feed an item of the sequence to the decoder at each time step. Think about the last timestep -- where the decoder outputs the final word in its output. The input to that step is the item before last from the target sequence. The decoder has no use for the last item in the target sequence in this scenario. So we'll need to remove the last item.
We do that using tensorflow's tf.strided_slice() method. We hand it the tensor, and the index of where to start and where to end the cutting.
<img src="images/strided_slice_1.png"/>
2- The first item in each sequence we feed to the decoder has to be GO symbol. So We'll add that to the beginning.
<img src="images/targets_add_go.png"/>
Now the tensor is ready to be fed to the decoder. It looks like this (if we convert from ints to letters/symbols):
<img src="images/targets_after_processing_1.png"/>
```
# Process the input we'll feed to the decoder
def process_decoder_input(target_data, vocab_to_int, batch_size):
'''Remove the last word id from each batch and concat the <GO> to the begining of each batch'''
ending = tf.strided_slice(target_data, [0, 0], [batch_size, -1], [1, 1])
dec_input = tf.concat([tf.fill([batch_size, 1], vocab_to_int['<GO>']), ending], 1)
return dec_input
```
### Set up the decoder components
- Embedding
- Decoder cell
- Dense output layer
- Training decoder
- Inference decoder
#### 1- Embedding
Now that we have prepared the inputs to the training decoder, we need to embed them so they can be ready to be passed to the decoder.
We'll create an embedding matrix like the following then have tf.nn.embedding_lookup convert our input to its embedded equivalent:
<img src="images/embeddings.png" />
#### 2- Decoder Cell
Then we declare our decoder cell. Just like the encoder, we'll use an tf.contrib.rnn.LSTMCell here as well.
We need to declare a decoder for the training process, and a decoder for the inference/prediction process. These two decoders will share their parameters (so that all the weights and biases that are set during the training phase can be used when we deploy the model).
First, we'll need to define the type of cell we'll be using for our decoder RNNs. We opted for LSTM.
#### 3- Dense output layer
Before we move to declaring our decoders, we'll need to create the output layer, which will be a tensorflow.python.layers.core.Dense layer that translates the outputs of the decoder to logits that tell us which element of the decoder vocabulary the decoder is choosing to output at each time step.
#### 4- Training decoder
Essentially, we'll be creating two decoders which share their parameters. One for training and one for inference. The two are similar in that both created using tf.contrib.seq2seq.**BasicDecoder** and tf.contrib.seq2seq.**dynamic_decode**. They differ, however, in that we feed the the target sequences as inputs to the training decoder at each time step to make it more robust.
We can think of the training decoder as looking like this (except that it works with sequences in batches):
<img src="images/sequence-to-sequence-training-decoder.png"/>
The training decoder **does not** feed the output of each time step to the next. Rather, the inputs to the decoder time steps are the target sequence from the training dataset (the orange letters).
#### 5- Inference decoder
The inference decoder is the one we'll use when we deploy our model to the wild.
<img src="images/sequence-to-sequence-inference-decoder.png"/>
We'll hand our encoder hidden state to both the training and inference decoders and have it process its output. TensorFlow handles most of the logic for us. We just have to use the appropriate methods from tf.contrib.seq2seq and supply them with the appropriate inputs.
```
def decoding_layer(target_letter_to_int, decoding_embedding_size, num_layers, rnn_size,
target_sequence_length, max_target_sequence_length, enc_state, dec_input):
# 1. Decoder Embedding
target_vocab_size = len(target_letter_to_int)
dec_embeddings = tf.Variable(tf.random_uniform([target_vocab_size, decoding_embedding_size]))
dec_embed_input = tf.nn.embedding_lookup(dec_embeddings, dec_input)
# 2. Construct the decoder cell
def make_cell(rnn_size):
dec_cell = tf.contrib.rnn.LSTMCell(rnn_size,
initializer=tf.random_uniform_initializer(-0.1, 0.1, seed=2))
return dec_cell
dec_cell = tf.contrib.rnn.MultiRNNCell([make_cell(rnn_size) for _ in range(num_layers)])
# 3. Dense layer to translate the decoder's output at each time
# step into a choice from the target vocabulary
output_layer = Dense(target_vocab_size,
kernel_initializer = tf.truncated_normal_initializer(mean = 0.0, stddev=0.1))
# 4. Set up a training decoder and an inference decoder
# Training Decoder
with tf.variable_scope("decode"):
# Helper for the training process. Used by BasicDecoder to read inputs.
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs=dec_embed_input,
sequence_length=target_sequence_length,
time_major=False)
# Basic decoder
training_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
training_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
training_decoder_output = tf.contrib.seq2seq.dynamic_decode(training_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
# 5. Inference Decoder
# Reuses the same parameters trained by the training process
with tf.variable_scope("decode", reuse=True):
start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']], dtype=tf.int32), [batch_size], name='start_tokens')
# Helper for the inference process.
inference_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(dec_embeddings,
start_tokens,
target_letter_to_int['<EOS>'])
# Basic decoder
inference_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell,
inference_helper,
enc_state,
output_layer)
# Perform dynamic decoding using the decoder
inference_decoder_output = tf.contrib.seq2seq.dynamic_decode(inference_decoder,
impute_finished=True,
maximum_iterations=max_target_sequence_length)[0]
return training_decoder_output, inference_decoder_output
```
## 2.3 Seq2seq model
Let's now go a step above, and hook up the encoder and decoder using the methods we just declared
```
def seq2seq_model(input_data, targets, lr, target_sequence_length,
max_target_sequence_length, source_sequence_length,
source_vocab_size, target_vocab_size,
enc_embedding_size, dec_embedding_size,
rnn_size, num_layers):
# Pass the input data through the encoder. We'll ignore the encoder output, but use the state
_, enc_state = encoding_layer(input_data,
rnn_size,
num_layers,
source_sequence_length,
source_vocab_size,
encoding_embedding_size)
# Prepare the target sequences we'll feed to the decoder in training mode
dec_input = process_decoder_input(targets, target_letter_to_int, batch_size)
# Pass encoder state and decoder inputs to the decoders
training_decoder_output, inference_decoder_output = decoding_layer(target_letter_to_int,
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
max_target_sequence_length,
enc_state,
dec_input)
return training_decoder_output, inference_decoder_output
```
Model outputs *training_decoder_output* and *inference_decoder_output* both contain a 'rnn_output' logits tensor that looks like this:
<img src="images/logits.png"/>
The logits we get from the training tensor we'll pass to tf.contrib.seq2seq.**sequence_loss()** to calculate the loss and ultimately the gradient.
```
# Build the graph
train_graph = tf.Graph()
# Set the graph to default to ensure that it is ready for training
with train_graph.as_default():
# Load the model inputs
input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_model_inputs()
# Create the training and inference logits
training_decoder_output, inference_decoder_output = seq2seq_model(input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
len(source_letter_to_int),
len(target_letter_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
# Create tensors for the training logits and inference logits
training_logits = tf.identity(training_decoder_output.rnn_output, 'logits')
inference_logits = tf.identity(inference_decoder_output.sample_id, name='predictions')
# Create the weights for sequence_loss
masks = tf.sequence_mask(target_sequence_length, max_target_sequence_length, dtype=tf.float32, name='masks')
with tf.name_scope("optimization"):
# Loss function
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks)
# Optimizer
optimizer = tf.train.AdamOptimizer(lr)
# Gradient Clipping
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
```
## Get Batches
There's little processing involved when we retreive the batches. This is a simple example assuming batch_size = 2
Source sequences (it's actually in int form, we're showing the characters for clarity):
<img src="images/source_batch.png" />
Target sequences (also in int, but showing letters for clarity):
<img src="images/target_batch.png" />
```
def pad_sentence_batch(sentence_batch, pad_int):
"""Pad sentences with <PAD> so that each sentence of a batch has the same length"""
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int):
"""Batch targets, sources, and the lengths of their sentences together"""
for batch_i in range(0, len(sources)//batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i:start_i + batch_size]
targets_batch = targets[start_i:start_i + batch_size]
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
# Need the lengths for the _lengths parameters
pad_targets_lengths = []
for target in pad_targets_batch:
pad_targets_lengths.append(len(target))
pad_source_lengths = []
for source in pad_sources_batch:
pad_source_lengths.append(len(source))
yield pad_targets_batch, pad_sources_batch, pad_targets_lengths, pad_source_lengths
```
## Train
We're now ready to train our model. If you run into OOM (out of memory) issues during training, try to decrease the batch_size.
```
# Split data to training and validation sets
train_source = source_letter_ids[batch_size:]
train_target = target_letter_ids[batch_size:]
valid_source = source_letter_ids[:batch_size]
valid_target = target_letter_ids[:batch_size]
(valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>']))
display_step = 20 # Check training loss after every 20 batches
checkpoint = "best_model.ckpt"
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
for epoch_i in range(1, epochs+1):
for batch_i, (targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(
get_batches(train_target, train_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>'])):
# Training step
_, loss = sess.run(
[train_op, cost],
{input_data: sources_batch,
targets: targets_batch,
lr: learning_rate,
target_sequence_length: targets_lengths,
source_sequence_length: sources_lengths})
# Debug message updating us on the status of the training
if batch_i % display_step == 0 and batch_i > 0:
# Calculate validation cost
validation_loss = sess.run(
[cost],
{input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths})
print('Epoch {:>3}/{} Batch {:>4}/{} - Loss: {:>6.3f} - Validation loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0]))
# Save Model
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved')
```
## Prediction
```
def source_to_seq(text):
'''Prepare the text for the model'''
sequence_length = 7
return [source_letter_to_int.get(word, source_letter_to_int['<UNK>']) for word in text]+ [source_letter_to_int['<PAD>']]*(sequence_length-len(text))
input_sentence = 'hello'
text = source_to_seq(input_sentence)
checkpoint = "./best_model.ckpt"
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
# Load saved model
loader = tf.train.import_meta_graph(checkpoint + '.meta')
loader.restore(sess, checkpoint)
input_data = loaded_graph.get_tensor_by_name('input:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
#Multiply by batch_size to match the model's input parameters
answer_logits = sess.run(logits, {input_data: [text]*batch_size,
target_sequence_length: [len(text)]*batch_size,
source_sequence_length: [len(text)]*batch_size})[0]
pad = source_letter_to_int["<PAD>"]
print('Original Text:', input_sentence)
print('\nSource')
print(' Word Ids: {}'.format([i for i in text]))
print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text])))
print('\nTarget')
print(' Word Ids: {}'.format([i for i in answer_logits if i != pad]))
print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad])))
```
| true |
code
| 0.555375 | null | null | null | null |
|
# Interpretable forecasting with N-Beats
```
import os
import warnings
warnings.filterwarnings("ignore")
os.chdir("../../..")
import pandas as pd
import torch
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping
from pytorch_forecasting import TimeSeriesDataSet, NBeats, Baseline
from pytorch_forecasting.data import NaNLabelEncoder
from pytorch_forecasting.data.examples import generate_ar_data
from pytorch_forecasting.metrics import SMAPE
```
## Load data
```
data = generate_ar_data(seasonality=10.0, timesteps=400, n_series=100)
data["static"] = 2
data["date"] = pd.Timestamp("2020-01-01") + pd.to_timedelta(data.time_idx, "D")
data.head()
# create dataset and dataloaders
max_encoder_length = 60
max_prediction_length = 20
training_cutoff = data["time_idx"].max() - max_prediction_length
context_length = max_encoder_length
prediction_length = max_prediction_length
training = TimeSeriesDataSet(
data[lambda x: x.time_idx <= training_cutoff],
time_idx="time_idx",
target="value",
categorical_encoders={"series": NaNLabelEncoder().fit(data.series)},
group_ids=["series"],
# only unknown variable is "value" - and N-Beats can also not take any additional variables
time_varying_unknown_reals=["value"],
max_encoder_length=context_length,
max_prediction_length=prediction_length,
)
validation = TimeSeriesDataSet.from_dataset(training, data, min_prediction_idx=training_cutoff+1)
batch_size = 128
train_dataloader = training.to_dataloader(train=True, batch_size=batch_size, num_workers=0)
val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, num_workers=0)
```
## Calculate baseline error
```
# calculate baseline absolute error
actuals = torch.cat([y for x, y in iter(val_dataloader)])
baseline_predictions = Baseline().predict(val_dataloader)
SMAPE()(baseline_predictions, actuals)
```
## Train network
Find optimal learning rate
```
trainer = pl.Trainer(gpus=0, gradient_clip_val=0.1)
net = NBeats.from_dataset(training, learning_rate=3e-2, weight_decay=1e-2)
# find optimal learning rate
res = trainer.lr_find(net, train_dataloader=train_dataloader, val_dataloaders=val_dataloader, min_lr=1e-5, max_lr=1e2)
print(f"suggested learning rate: {res.suggestion()}")
fig = res.plot(show=True, suggest=True)
fig.show()
net.hparams.learning_rate = res.suggestion()
```
Fit model
```
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=10, verbose=False, mode="min")
trainer = pl.Trainer(
max_epochs=100,
gpus=0,
weights_summary="top",
gradient_clip_val=0.1,
early_stop_callback=early_stop_callback,
limit_train_batches=30,
)
net = NBeats.from_dataset(training, learning_rate=1.2e-2, log_interval=10, log_val_interval=1, weight_decay=1e-2)
trainer.fit(
net, train_dataloader=train_dataloader, val_dataloaders=val_dataloader,
)
```
### Evaluate Results
```
# best_model_path = trainer.checkpoint_callback.best_model_path
best_model_path = "/Users/beitnerjan/Documents/Github/temporal_fusion_transformer_pytorch/lightning_logs/version_212/checkpoints/epoch=19.ckpt"
best_model = NBeats.load_from_checkpoint(best_model_path)
print(best_model_path)
```
We calculate the error which is approximately half of the baseline error
```
actuals = torch.cat([y for x, y in iter(val_dataloader)])
predictions = best_model.predict(val_dataloader)
(actuals - predictions).abs().mean()
raw_predictions, x = best_model.predict(val_dataloader, mode="raw", return_x=True)
for idx in range(10):
best_model.plot_prediction(x, raw_predictions, idx=idx, add_loss_to_title=True);
```
## Interpret model
```
for idx in range(10):
best_model.plot_interpretation(x, raw_predictions, idx=idx)
;
```
| true |
code
| 0.673474 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/kyle-gao/GRSS_TrackMSD2021/blob/main/MakeTilesDeepGlobe.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
"""
Copyright 2021 Yi Lin(Kyle) Gao
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License."""
```
#DeepGlobe Dataset
https://www.kaggle.com/balraj98/deepglobe-land-cover-classification-dataset#__sid=js0
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_datasets as tfds
import shutil
from PIL import Image
import os
from google.colab import drive
import PIL
drive.mount('/content/drive')
shutil.unpack_archive("/content/drive/MyDrive/DeepGlobeLandCover.zip.zip",'/content/DeepGlobe')
test_x = tf.keras.preprocessing.image.load_img(
"/content/DeepGlobe/train/100694_sat.jpg", grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest')
test_x
data_dir = "/content/DeepGlobe/train"
list_ds = tf.data.Dataset.list_files(str(data_dir+"/*.png"),shuffle=False) #DO NOT SHUFFLE
#dataset is made up of strings
def to_categorical(tensor,class_dict):
"""
converts last dimension to categorical according to keys
"""
for k,v in class_dict.items():
tensor[tensor==k]=v
return tensor
```
##Label
Each satellite image is paired with a mask image for land cover annotation. The mask is a RGB image with 7 classes of labels, using color-coding (R, G, B) as follows.
Urban land: 0,255,255 - Man-made, built up areas with human artifacts (can ignore roads for now which is hard to label)
Agriculture land: 255,255,0 - Farms, any planned (i.e. regular) plantation, cropland, orchards, vineyards, nurseries, and ornamental horticultural areas; confined feeding operations.
Rangeland: 255,0,255 - Any non-forest, non-farm, green land, grass
Forest land: 0,255,0 - Any land with x% tree crown density plus clearcuts.
Water: 0,0,255 - Rivers, oceans, lakes, wetland, ponds.
Barren land: 255,255,255 - Mountain, land, rock, dessert, beach, no vegetation
Unknown: 0,0,0 - Clouds and others
File names for satellite images and the corresponding mask image are id _sat.jpg and id _mask.png. id is a randomized integer.
Please note:
The values of the mask image may not be pure 0 and 255. When converting to labels, please binarize them at threshold 128.|
```
size = (512,512)
filenames = list(list_ds)
padding = 'VALID'
def deepglobe_write_tiles(filenames,size=(512,512),padding='VALID',save_dir = "/content/DeepGlobe224/"):
"""
Args-
filenames: tensorflow list_files dataset object
size: tuple of ints
padding=one of "VALID" "SAME"
save_dir-save directory
"""
(h,w) = size
for f in filenames:
fn=tf.strings.split(f,"_")
image_fn = (fn[0]+"_sat.jpg").numpy()
label_fn = (fn[0]+"_mask.png").numpy()
image = tf.keras.preprocessing.image.load_img(image_fn)
image = tf.keras.preprocessing.image.img_to_array(image)
label = tf.keras.preprocessing.image.load_img(label_fn)
label = tf.keras.preprocessing.image.img_to_array(label)
#(H,W,3)
""" - do this step in preprocessing instead since the encoding rescales everything to 255
#binarization
label [label >= 128] = 255
label[label < 128] = 0
labelnew = label[:,:,0]+0.1*label[:,:,1]+0.01*label[:,:,2] #contracts the last dimension without losing class information
class_dict = {(25.5+2.55):0, (255+25.5):1, (255+2.55):2,(25.5):3,2.55:4,(255+25.5+2.55):5,0:6}
labelnew = to_categorical(labelnew,class_dict) #(H,W) """
image = tf.expand_dims(image,axis=0)
image_tiles = tf.image.extract_patches(images=image,
sizes=[1,h, w, 1],
strides=[1,h, w, 1],
rates=[1, 1, 1, 1],
padding=padding)
image_tiles = tf.reshape(image_tiles, [-1,h,w,3])
#label = tf.expand_dims(labelnew,axis=-1)
label = tf.expand_dims(label,axis=0)
label_tiles = tf.image.extract_patches(images=label,
sizes=[1,h, w, 1],
strides=[1,h, w, 1],
rates=[1, 1, 1, 1],
padding=padding)
label_tiles = tf.reshape(label_tiles, [-1,h,w,3])
if not(os.path.isdir(save_dir)):
os.mkdir(save_dir)
for i in range(label_tiles.shape[0]):
imgtile_fn = (save_dir+fn[0].numpy().decode("utf-8").split("/")[-1]+"_"+str(i)+"_sat.jpg")
labeltile_fn = (save_dir+fn[0].numpy().decode("utf-8").split("/")[-1]+"_"+str(i)+"_mask.png")
tf.keras.preprocessing.image.save_img(imgtile_fn,image_tiles[i,:,:,:])
tf.keras.preprocessing.image.save_img(labeltile_fn,label_tiles[i,:,:,:])
print(image_fn)
deepglobe_write_tiles(filenames)
shutil.make_archive("/content/DeepGlobe_512",'zip',"/content/DeepGlobe224")
shutil.copy2("/content/DeepGlobe_512.zip","/content/drive/MyDrive")
```
| true |
code
| 0.785411 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_08_5_kaggle_project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 8: Kaggle Data Sets**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 8 Material
* Part 8.1: Introduction to Kaggle [[Video]](https://www.youtube.com/watch?v=v4lJBhdCuCU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_1_kaggle_intro.ipynb)
* Part 8.2: Building Ensembles with Scikit-Learn and Keras [[Video]](https://www.youtube.com/watch?v=LQ-9ZRBLasw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_2_keras_ensembles.ipynb)
* Part 8.3: How Should you Architect Your Keras Neural Network: Hyperparameters [[Video]](https://www.youtube.com/watch?v=1q9klwSoUQw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_3_keras_hyperparameters.ipynb)
* Part 8.4: Bayesian Hyperparameter Optimization for Keras [[Video]](https://www.youtube.com/watch?v=sXdxyUCCm8s&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_4_bayesian_hyperparameter_opt.ipynb)
* **Part 8.5: Current Semester's Kaggle** [[Video]](https://www.youtube.com/watch?v=PHQt0aUasRg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_08_5_kaggle_project.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
# Start CoLab
try:
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return "{}:{:>02}:{:>05.2f}".format(h, m, s)
```
# Part 8.5: Current Semester's Kaggle
Kaggke competition site for current semester (Fall 2020):
* [Spring 2021 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learning-wustl-spring-2021b)
Previous Kaggle competition sites for this class (NOT this semester's assignment, feel free to use code):
* [Fall 2020 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learning-wustl-fall-2020)
* [Spring 2020 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learningwustl-spring-2020)
* [Fall 2019 Kaggle Assignment](https://kaggle.com/c/applications-of-deep-learningwustl-fall-2019)
* [Spring 2019 Kaggle Assignment](https://www.kaggle.com/c/applications-of-deep-learningwustl-spring-2019)
* [Fall 2018 Kaggle Assignment](https://www.kaggle.com/c/wustl-t81-558-washu-deep-learning-fall-2018)
* [Spring 2018 Kaggle Assignment](https://www.kaggle.com/c/wustl-t81-558-washu-deep-learning-spring-2018)
* [Fall 2017 Kaggle Assignment](https://www.kaggle.com/c/wustl-t81-558-washu-deep-learning-fall-2017)
* [Spring 2017 Kaggle Assignment](https://inclass.kaggle.com/c/applications-of-deep-learning-wustl-spring-2017)
* [Fall 2016 Kaggle Assignment](https://inclass.kaggle.com/c/wustl-t81-558-washu-deep-learning-fall-2016)
# Iris as a Kaggle Competition
If the Iris data were used as a Kaggle, you would be given the following three files:
* [kaggle_iris_test.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_iris_test.csv) - The data that Kaggle will evaluate you on. Contains only input, you must provide answers. (contains x)
* [kaggle_iris_train.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_iris_train.csv) - The data that you will use to train. (contains x and y)
* [kaggle_iris_sample.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_iris_sample.csv) - A sample submission for Kaggle. (contains x and y)
Important features of the Kaggle iris files (that differ from how we've previously seen files):
* The iris species is already index encoded.
* Your training data is in a separate file.
* You will load the test data to generate a submission file.
The following program generates a submission file for "Iris Kaggle". You can use it as a starting point for assignment 3.
```
import os
import pandas as pd
from sklearn.model_selection import train_test_split
import tensorflow as tf
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.callbacks import EarlyStopping
df_train = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_iris_train.csv", na_values=['NA','?'])
# Encode feature vector
df_train.drop('id', axis=1, inplace=True)
num_classes = len(df_train.groupby('species').species.nunique())
print("Number of classes: {}".format(num_classes))
# Convert to numpy - Classification
x = df_train[['sepal_l', 'sepal_w', 'petal_l', 'petal_w']].values
dummies = pd.get_dummies(df_train['species']) # Classification
species = dummies.columns
y = dummies.values
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=45)
# Train, with early stopping
model = Sequential()
model.add(Dense(50, input_dim=x.shape[1], activation='relu'))
model.add(Dense(25))
model.add(Dense(y.shape[1],activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=5, verbose=1, mode='auto',
restore_best_weights=True)
model.fit(x_train,y_train,validation_data=(x_test,y_test),
callbacks=[monitor],verbose=0,epochs=1000)
```
Now that we've trained the neural network, we can check its log loss.
```
from sklearn import metrics
# Calculate multi log loss error
pred = model.predict(x_test)
score = metrics.log_loss(y_test, pred)
print("Log loss score: {}".format(score))
```
Now we are ready to generate the Kaggle submission file. We will use the iris test data that does not contain a $y$ target value. It is our job to predict this value and submit to Kaggle.
```
# Generate Kaggle submit file
# Encode feature vector
df_test = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_iris_test.csv", na_values=['NA','?'])
# Convert to numpy - Classification
ids = df_test['id']
df_test.drop('id', axis=1, inplace=True)
x = df_test[['sepal_l', 'sepal_w', 'petal_l', 'petal_w']].values
y = dummies.values
# Generate predictions
pred = model.predict(x)
#pred
# Create submission data set
df_submit = pd.DataFrame(pred)
df_submit.insert(0,'id',ids)
df_submit.columns = ['id','species-0','species-1','species-2']
# Write submit file locally
df_submit.to_csv("iris_submit.csv", index=False)
print(df_submit)
```
### MPG as a Kaggle Competition (Regression)
If the Auto MPG data were used as a Kaggle, you would be given the following three files:
* [kaggle_mpg_test.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_auto_test.csv) - The data that Kaggle will evaluate you on. Contains only input, you must provide answers. (contains x)
* [kaggle_mpg_train.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_auto_test.csv) - The data that you will use to train. (contains x and y)
* [kaggle_mpg_sample.csv](https://data.heatonresearch.com/data/t81-558/datasets/kaggle_auto_sample.csv) - A sample submission for Kaggle. (contains x and y)
Important features of the Kaggle iris files (that differ from how we've previously seen files):
The following program generates a submission file for "MPG Kaggle".
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import EarlyStopping
import pandas as pd
import io
import os
import requests
import numpy as np
from sklearn import metrics
save_path = "."
df = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_auto_train.csv",
na_values=['NA', '?'])
cars = df['name']
# Handle missing value
df['horsepower'] = df['horsepower'].fillna(df['horsepower'].median())
# Pandas to Numpy
x = df[['cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'year', 'origin']].values
y = df['mpg'].values # regression
# Split into train/test
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42)
# Build the neural network
model = Sequential()
model.add(Dense(25, input_dim=x.shape[1], activation='relu')) # Hidden 1
model.add(Dense(10, activation='relu')) # Hidden 2
model.add(Dense(1)) # Output
model.compile(loss='mean_squared_error', optimizer='adam')
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3, patience=5,
verbose=1, mode='auto', restore_best_weights=True)
model.fit(x_train,y_train,validation_data=(x_test,y_test),
verbose=2,callbacks=[monitor],epochs=1000)
# Predict
pred = model.predict(x_test)
```
Now that we've trained the neural network, we can check its RMSE error.
```
import numpy as np
# Measure RMSE error. RMSE is common for regression.
score = np.sqrt(metrics.mean_squared_error(pred,y_test))
print("Final score (RMSE): {}".format(score))
```
Now we are ready to generate the Kaggle submission file. We will use the MPG test data that does not contain a $y$ target value. It is our job to predict this value and submit to Kaggle.
```
import pandas as pd
# Generate Kaggle submit file
# Encode feature vector
df_test = pd.read_csv(
"https://data.heatonresearch.com/data/t81-558/datasets/"+\
"kaggle_auto_test.csv", na_values=['NA','?'])
# Convert to numpy - regression
ids = df_test['id']
df_test.drop('id', axis=1, inplace=True)
# Handle missing value
df_test['horsepower'] = df_test['horsepower'].\
fillna(df['horsepower'].median())
x = df_test[['cylinders', 'displacement', 'horsepower', 'weight',
'acceleration', 'year', 'origin']].values
# Generate predictions
pred = model.predict(x)
#pred
# Create submission data set
df_submit = pd.DataFrame(pred)
df_submit.insert(0,'id',ids)
df_submit.columns = ['id','mpg']
# Write submit file locally
df_submit.to_csv("auto_submit.csv", index=False)
print(df_submit)
```
# Module 8 Assignment
You can find the first assignment here: [assignment 8](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class8.ipynb)
| true |
code
| 0.558809 | null | null | null | null |
|
# Scikit Learn and the K-nearest Neighbor Algorithm
In this notebook we'll introduce the `sklearn` package and a few important concepts in machine learning:
* Splitting data into test, train, and validation sets.
* Fitting models to a dataset.
* And using "Hyperparameters" to tune models.
Lets revisit the example we saw in the first class:
```
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
# Load the data
heart_dataset = pd.read_csv('../../datasets/uci-heart-disease/heart.csv')
# Split the data into input and labels
labels = heart_dataset['target']
input_data = heart_dataset.drop(columns=['target'])
# Split the data into training and test
training_data, test_data, training_labels, test_labels = train_test_split(
input_data,
labels,
test_size=0.20
)
# Build the model
model = KNeighborsClassifier()
model.fit(training_data, training_labels)
# See how it did.
print("Test accuracy: ", model.score(test_data, test_labels))
```
# Test/Train/Validation Splits:
In machine learning, it's important to avoid something called "overfitting" our models. This happens when a model more or less "memorizes" the training data, but performs poorly on data that is "held out" of the training process. A model that is "overfit" won't "generalize" to unseen data — making it useless in the real world!
To avoid and detect overfitting we will commonly split our data into 3 buckets:
* Training data: the data that the model actually learns from.
* Validation data: data that the model doesn't learn from, but that we use to validate the results throughout the process of building our models.
* Test data: data that is held out entierly during the model building process in order to give an unbiased measure of the final models performance.
* If we use the test data, and then change our model, the test data is no longer "unbiased" as we will have incorporated information from the test data (i.e. our models poor "test performance") into the next version of the model. In this case we have turned the test data into validation data, and we should get new test data.
In the above example we've only made two buckets of data, which we called training and test... In theory we COULD do this:
```
# Split the data into training+validation and test
training_and_validation_data, test_data, training_and_validation_labels, test_labels = train_test_split(
input_data,
labels,
test_size=0.20
)
# Split the training+validation data into training and validation
training_data, validation_data, training_labels, validation_labels = train_test_split(
training_and_validation_data,
training_and_validation_labels,
test_size=0.20
)
# Then use only training and validation to evaluate our model and make changes to the model's performance...
```
While the above CAN be done, it's much more common to make the test/train split much earlier in the data pipeline. Many datasets for ML tasks come with a test set already prepared and separate from the training set. ML practitioners then perform a training/validation split with all of the training data. The training and test data will be saved separately, in their own folders or CSV files or labeled differently in the database/data warehouse software.
If you've collected your own data and want to do ML with it, I strongly suggest you split out 10%-20% of that data, set it aside, and don't look at it until the very end of your ML pipeline to get an unbiased evaluation once you've built a model you like.
# SKLearn's API
Scikit learn has a wonderfully unified API that always follows this pattern:
* Create a model from a class.
* This is where you set the "hyperparameters" of the model.
* Call that model's `.fit` method using the training data to train the model.
* Call that model's `.score` method to evaluate the model against the validation/test data.
For example:
```
# Lets build multiple models using a few different "hyperparameters"
model_one = KNeighborsClassifier()
model_two = KNeighborsClassifier(weights='distance')
model_three = KNeighborsClassifier(n_neighbors=10, weights='distance')
for i, model in enumerate([model_one, model_two, model_three]):
model.fit(training_data, training_labels)
print(f' {i+1} validation accuracy: ', model.score(validation_data, validation_labels))
```
# The K-Nearest Neighbor's Model
So what is the actual difference between these three models? How does KNN actually work?
KNN is a relatively straightforward model. When you want to make a prediction with KNN you simply compare the item you're making a prediction about to the training dataset using a distance function and based on the class of the "nearest" neighbors the model makes a prediction.
K is how many neighbors to look at, if k is 5 the model looks at the 5 nearest neighbors and whichever class is most common among those 5 neighbors is the one selected. Lets look at some pictures from the pre-reading (https://towardsdatascience.com/laymans-introduction-to-knn-c793ed392bc2):




These examples are all in 2-dimensional space, but the algorithm generalizes to n-dimensions (based on the number of features in our training data).
K is controlled in `sklearn` by the `n_neighbors` parameter.
Another hyperparameter in KNN is the `weights` parameter, which has 3 possible values, from the docs (https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsClassifier.html):
* ‘uniform’ : uniform weights. All points in each neighborhood are weighted equally.
* ‘distance’ : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away.
* [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights.
Similarly, the distance metric can be provided:
> metric: str or callable, default=’minkowski’
> the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of DistanceMetric for a list of available metrics. If metric is “precomputed”, X is assumed to be a distance matrix and must be square during fit. X may be a sparse graph, in which case only “nonzero” elements may be considered neighbors.
| true |
code
| 0.61231 | null | null | null | null |
|
# Submitting and Managing Jobs
Launch this tutorial in a Jupyter Notebook on Binder:
[](https://mybinder.org/v2/gh/htcondor/htcondor-python-bindings-tutorials/master?urlpath=lab/tree/Submitting-and-Managing-Jobs.ipynb)
## What is HTCondor?
An HTCondor pool provides a way for you (as a user) to submit units of work, called **jobs**, to be executed on a distributed network of computing resources.
HTCondor provides tools to monitor your jobs as they run, and make certain kinds of changes to them after submission, which we call "managing" jobs.
In this tutorial, we will learn how to submit and manage jobs *from Python*.
We will see how to submit jobs with various toy executables, how to ask HTCondor for information about them, and how to tell HTCondor to do things with them.
All of these things are possible from the command line as well, using tools like `condor_submit`, `condor_qedit`, and `condor_hold`.
However, working from Python instead of the command line gives us access to the full power of Python to do things like generate jobs programmatically based on user input, pass information consistently from submission to management, or even expose an HTCondor pool to a web application.
We start by importing the HTCondor Python bindings modules, which provide the functions we will need to talk to HTCondor.
```
import htcondor # for submitting jobs, querying HTCondor daemons, etc.
import classad # for interacting with ClassAds, HTCondor's internal data format
```
## Submitting a Simple Job
To submit a job, we must first describe it.
A submit description is held in a `Submit` object.
`Submit` objects consist of key-value pairs, and generally behave like Python dictionaries.
If you're familiar with HTCondor's submit file syntax, you should think of each line in the submit file as a single key-value pair in the `Submit` object.
Let's start by writing a `Submit` object that describes a job that executes the `hostname` command on an execute node, which prints out the "name" of the node.
Since `hostname` prints its results to standard output (stdout), we will capture stdout and bring it back to the submit machine so we can see the name.
```
hostname_job = htcondor.Submit({
"executable": "/bin/hostname", # the program to run on the execute node
"output": "hostname.out", # anything the job prints to standard output will end up in this file
"error": "hostname.err", # anything the job prints to standard error will end up in this file
"log": "hostname.log", # this file will contain a record of what happened to the job
"request_cpus": "1", # how many CPU cores we want
"request_memory": "128MB", # how much memory we want
"request_disk": "128MB", # how much disk space we want
})
print(hostname_job)
```
The available descriptors are documented in the [`condor_submit` manual page](https://htcondor.readthedocs.io/en/latest/man-pages/condor_submit.html).
The keys of the Python dictionary you pass to `htcondor.Submit` should be the same as for the submit descriptors, and the values should be **strings containing exactly what would go on the right-hand side**.
Note that we gave the `Submit` object several relative filepaths.
These paths are relative to the directory containing this Jupyter notebook (or, more generally, the current working directory).
When we run the job, you should see those files appear in the file browser on the left as HTCondor creates them.
Now that we have a job description, let's submit a job.
The `htcondor.Schedd.submit` method returns a `SubmitResult` object that contains information about the job, such as its `ClusterId`.
```
schedd = htcondor.Schedd() # get the Python representation of the scheduler
submit_result = schedd.submit(hostname_job) # submit the job
print(submit_result.cluster()) # print the job's ClusterId
```
The job's `ClusterId` uniquely identifies this submission.
Later in this module, we will use it to ask the HTCondor scheduler for information about our jobs.
For now, our job will hopefully have finished running.
You should be able to see the files in the file browser on the left.
Try opening one of them and seeing what's inside.
We can also look at the output from inside Python:
```
import os
import time
output_path = "hostname.out"
# this is a crude way to wait for the job to finish
# see the Advanced tutorial "Scalable Job Tracking" for better methods!
while not os.path.exists(output_path):
print("Output file doesn't exist yet; sleeping for one second")
time.sleep(1)
with open(output_path, mode = "r") as f:
print(f.read())
```
If you got some text, it worked!
If the file never shows up, it means your job didn't run.
You might try looking at the `log` or `error` files specified in the submit description to see if there is any useful information in them about why the job failed.
## Submitting Multiple Jobs
By default, each `submit` will submit a single job.
A more common use case is to submit many jobs at once, often sharing some base submit description.
Let's write a new submit description which runs `sleep`.
When we have multiple **jobs** in a single **cluster**, each job will be identified not just by its **ClusterId** but also by a **ProcID**.
We can use the ProcID to separate the output and error files for each individual job.
Anything that looks like `$(...)` in a submit description is a **macro**, a placeholder which will be "expanded" later by HTCondor into a real value for that particular job.
The ProcID expands to a series of incrementing integers, starting at 0.
So the first job in a cluster will have ProcID 0, the next will have ProcID 1, etc.
```
sleep_job = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "10s", # sleep for 10 seconds
"output": "sleep-$(ProcId).out", # output and error for each job, using the $(ProcId) macro
"error": "sleep-$(ProcId).err",
"log": "sleep.log", # we still send all of the HTCondor logs for every job to the same file (not split up!)
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
print(sleep_job)
```
We will submit 10 of these jobs.
All we need to change from our previous `submit` call is to add the `count` keyword argument.
```
schedd = htcondor.Schedd()
submit_result = schedd.submit(sleep_job, count=10) # submit 10 jobs
print(submit_result.cluster())
```
Now that we have a bunch of jobs in flight, we might want to check how they're doing.
We can ask the HTCondor scheduler about jobs by using its `query` method.
We give it a **constraint**, which tells it which jobs to look for, and a **projection**, which tells it what information to return.
```
schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "Out"],
)
```
There are a few things to notice here:
- Depending on how long it took you to run the cell, you may only get a few of your 10 jobs in the query. Jobs that have finished **leave the queue**, and will no longer show up in queries. To see those jobs, you must use the `history` method instead, which behaves like `query`, but **only** looks at jobs that have left the queue.
- The results may not have come back in ProcID-sorted order. If you want to guarantee the order of the results, you must do so yourself.
- Attributes are often renamed between the submit description and the actual job description in the queue. See [the manual](https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html) for a description of the job attribute names.
- The objects returned by the query are instances of `ClassAd`. ClassAds are the common data exchange format used by HTCondor. In Python, they mostly behave like dictionaries.
## Using Itemdata to Vary Over Parameters
By varying some part of the submit description using the ProcID, we can change how each individual job behaves.
Perhaps it will use a different input file, or a different argument.
However, we often want more flexibility than that.
Perhaps our input files are named after different cities, or by timestamp, or some other naming scheme that already exists.
To use such information in the submit description, we need to use **itemdata**.
Itemdata lets us pass arbitrary extra information when we queue, which we can reference with macros inside the submit description.
This lets use the full power of Python to generate the submit descriptions for our jobs.
Let's mock this situation out by generating some files with randomly-chosen names.
We'll also switch to using `pathlib.Path`, Python's more modern file path manipulation library.
```
from pathlib import Path
import random
import string
import shutil
def random_string(length):
"""Produce a random lowercase ASCII string with the given length."""
return "".join(random.choices(string.ascii_lowercase, k = length))
# make a directory to hold the input files, clearing away any existing directory
input_dir = Path.cwd() / "inputs"
shutil.rmtree(input_dir, ignore_errors = True)
input_dir.mkdir()
# make 5 input files
for idx in range(5):
rs = random_string(5)
input_file = input_dir / "{}.txt".format(rs)
input_file.write_text("Hello from job {}".format(rs))
```
Now we'll get a list of all the files we just created in the input directory.
This is precisely the kind of situation where Python affords us a great deal of flexibility over a submit file: we can use Python instead of the HTCondor submit language to generate and inspect the information we're going to put into the submit description.
```
input_files = list(input_dir.glob("*.txt"))
for path in input_files:
print(path)
```
Now we'll make our submit description.
Our goal is just to print out the text held in each file, which we can do using `cat`.
We will tell HTCondor to transfer the input file to the execute location by including it in `transfer_input_files`.
We also need to call `cat` on the right file via `arguments`.
Keep in mind that HTCondor will move the files in `transfer_input_files` directly to the scratch directory on the execute machine, so instead of the full path, we just need the file's "name", the last component of its path.
`pathlib` will make it easy to extract this information.
```
cat_job = htcondor.Submit({
"executable": "/bin/cat",
"arguments": "$(input_file_name)", # we will pass in the value for this macro via itemdata
"transfer_input_files": "$(input_file)", # we also need HTCondor to move the file to the execute node
"should_transfer_files": "yes", # force HTCondor to transfer files even though we're running entirely inside a container (and it normally wouldn't need to)
"output": "cat-$(ProcId).out",
"error": "cat-$(ProcId).err",
"log": "cat.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
print(cat_job)
```
The itemdata should be passed as a list of dictionaries, where the keys are the macro names to replace in the submit description.
In our case, the keys are `input_file` and `input_file_name`, so should have a list of 10 dictionaries, each with two entries.
HTCondor expects the input file list to be a comma-separated list of POSIX-style paths, so we explicitly convert our `Path` to a POSIX string.
```
itemdata = [{"input_file": path.as_posix(), "input_file_name": path.name} for path in input_files]
for item in itemdata:
print(item)
```
Now we'll submit the jobs, adding the `itemdata` parameter to the `submit` call:
```
schedd = htcondor.Schedd()
submit_result = schedd.submit(cat_job, itemdata = iter(itemdata)) # submit one job for each item in the itemdata
print(submit_result.cluster())
```
Let's do a query to make sure we got the itemdata right (these jobs run fast, so you might need to re-run the jobs if your first run has already left the queue):
```
schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "Out", "Args", "TransferInput"],
)
```
And let's take a look at all the output:
```
# again, this is very crude - see the advanced tutorials!
while not len(list(Path.cwd().glob("cat-*.out"))) == len(itemdata):
print("Not all output files exist yet; sleeping for one second")
time.sleep(1)
for output_file in Path.cwd().glob("cat-*.out"):
print(output_file, "->", output_file.read_text())
```
## Managing Jobs
Once a job is in queue, the scheduler will try its best to execute it to completion.
There are several cases where you may want to interrupt the normal flow of jobs.
Perhaps the results are no longer needed; perhaps the job needs to be edited to correct a submission error.
These actions fall under the purview of **job management**.
There are two `Schedd` methods dedicated to job management:
* `edit()`: Change an attribute for a set of jobs.
* `act()`: Change the state of a job (remove it from the queue, hold it, suspend it, etc.).
The `act` method takes an argument from the `JobAction` enum.
Commonly-used values include:
* `Hold`: put a job on hold, vacating a running job if necessary. A job will stay in the hold
state until told otherwise.
* `Release`: Release a job from the hold state, returning it to Idle.
* `Remove`: Remove a job from the queue. If it is running, it will stop running.
This requires the execute node to acknowledge it has successfully vacated the job, so ``Remove`` may
not be instantaneous.
* `Vacate`: Cause a running job to be killed on the remote resource and return to the Idle state. With
`Vacate`, jobs may be given significant time to cleanly shut down.
To play with this, let's bring back our sleep submit description, but increase the sleep time significantly so that we have time to interact with the jobs.
```
long_sleep_job = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "10m", # sleep for 10 minutes
"output": "sleep-$(ProcId).out",
"error": "sleep-$(ProcId).err",
"log": "sleep.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
print(long_sleep_job)
schedd = htcondor.Schedd()
submit_result = schedd.submit(long_sleep_job, count=5)
```
As an experiment, let's set an arbitrary attribute on the jobs and check that it worked.
When we're really working, we could do things like change the amount of memory a job has requested by editing its `RequestMemory` attribute.
The job attributes that are built-in to HTCondor are described [here](https://htcondor.readthedocs.io/en/latest/classad-attributes/job-classad-attributes.html), but your site may specify additional, custom attributes as well.
```
# sets attribute foo to the string "bar" for all of our jobs
# note the nested quotes around bar! The outer "" make it a Python string; the inner "" make it a ClassAd string.
schedd.edit(f"ClusterId == {submit_result.cluster()}", "foo", "\"bar\"")
# do a query to check the value of attribute foo
schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "JobStatus", "foo"],
)
```
Although the job status appears to be an attribute, we cannot `edit` it directly.
As mentioned above, we must instead `act` on the job.
Let's hold the first two jobs so that they stop running, but leave the others going.
```
# hold the first two jobs
schedd.act(htcondor.JobAction.Hold, f"ClusterId == {submit_result.cluster()} && ProcID <= 1")
# check the status of the jobs
ads = schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "JobStatus"],
)
for ad in ads:
# the ClassAd objects returned by the query act like dictionaries, so we can extract individual values out of them using []
print(f"ProcID = {ad['ProcID']} has JobStatus = {ad['JobStatus']}")
```
The various job statuses are represented by numbers. `1` means `Idle`, `2` means `Running`, and `5` means `Held`. If you see `JobStatus = 5` above for `ProcID = 0` and `ProcID = 1`, then we succeeded!
The opposite of `JobAction.Hold` is `JobAction.Release`.
Let's release those jobs and let them go back to `Idle`.
```
schedd.act(htcondor.JobAction.Release, f"ClusterId == {submit_result.cluster()}")
ads = schedd.query(
constraint=f"ClusterId == {submit_result.cluster()}",
projection=["ClusterId", "ProcId", "JobStatus"],
)
for ad in ads:
# the ClassAd objects returned by the query act like dictionaries, so we can extract individual values out of them using []
print(f"ProcID = {ad['ProcID']} has JobStatus = {ad['JobStatus']}")
```
Note that we simply released all the jobs in the cluster.
Releasing a job that is not held doesn't do anything, so we don't have to be extremely careful.
Finally, let's clean up after ourselves:
```
schedd.act(htcondor.JobAction.Remove, f"ClusterId == {submit_result.cluster()}")
```
## Exercises
Now let's practice what we've learned.
- In each exercise, you will be given a piece of code and a test that does not yet pass.
- The exercises are vaguely in order of increasing difficulty.
- Modify the code, or add new code to it, to pass the test. Do whatever it takes!
- You can run the test by running the block it is in.
- Feel free to look at the test for clues as to how to modify the code.
- Many of the exercises can be solved either by using Python to generate inputs, or by using advanced features of the [ClassAd language](https://htcondor.readthedocs.io/en/latest/misc-concepts/classad-mechanism.html#htcondor-s-classad-mechanism). Either way is valid!
- Don't modify the test. That's cheating!
### Exercise 1: Incrementing Sleeps
Submit five jobs which sleep for `5`, `6`, `7`, `8`, and `9` seconds, respectively.
```
# MODIFY OR ADD TO THIS BLOCK...
incrementing_sleep = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "1",
"output": "ex1-$(ProcId).out",
"error": "ex1-$(ProcId).err",
"log": "ex1.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
schedd = htcondor.Schedd()
submit_result = schedd.submit(incrementing_sleep)
# ... TO MAKE THIS TEST PASS
expected = [str(i) for i in range(5, 10)]
print("Expected ", expected)
ads = schedd.query(f"ClusterId == {submit_result.cluster()}", projection = ["Args"])
arguments = sorted(ad["Args"] for ad in ads)
print("Got ", arguments)
assert arguments == expected, "Arguments were not what we expected!"
print("The test passed. Good job!")
```
### Exercise 2: Echo to Target
Run a job that makes the text `Echo to Target` appear in a file named `ex3.txt`.
```
# MODIFY OR ADD TO THIS BLOCK...
echo = htcondor.Submit({
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
schedd = htcondor.Schedd()
submit_result = schedd.submit(echo)
# ... TO MAKE THIS TEST PASS
does_file_exist = os.path.exists("ex3.txt")
assert does_file_exist, "ex3.txt does not exist!"
expected = "Echo to Target"
print("Expected ", expected)
contents = open("ex3.txt", mode = "r").read().strip()
print("Got ", contents)
assert expected in contents, "Contents were not what we expected!"
print("The test passed. Good job!")
```
### Exercise 3: Holding Odds
Hold all of the odd-numbered jobs in this large cluster.
- Note that the test block **removes all of the jobs you own** when it runs, to prevent these long-running jobs from corrupting other tests!
```
# MODIFY OR ADD TO THIS BLOCK...
long_sleep = htcondor.Submit({
"executable": "/bin/sleep",
"arguments": "10m",
"output": "ex2-$(ProcId).out",
"error": "ex2-$(ProcId).err",
"log": "ex2.log",
"request_cpus": "1",
"request_memory": "128MB",
"request_disk": "128MB",
})
schedd = htcondor.Schedd()
submit_result = schedd.submit(long_sleep, count=100)
# ... TO MAKE THIS TEST PASS
import getpass
try:
ads = schedd.query(f"ClusterId == {submit_result.cluster()}", projection = ["ProcID", "JobStatus"])
proc_to_status = {int(ad["ProcID"]): ad["JobStatus"] for ad in sorted(ads, key = lambda ad: ad["ProcID"])}
for proc, status in proc_to_status.items():
print("Proc {} has status {}".format(proc, status))
assert len(proc_to_status) == 100, "Wrong number of jobs (perhaps you need to resubmit them?)."
assert all(status == 5 for proc, status in proc_to_status.items() if proc % 2 != 0), "Not all odd jobs were held."
assert all(status != 5 for proc, status in proc_to_status.items() if proc % 2 == 0), "An even job was held."
print("The test passed. Good job!")
finally:
schedd.act(htcondor.JobAction.Remove, f'Owner=="{getpass.getuser()}"')
```
| true |
code
| 0.28284 | null | null | null | null |
|
## Individual Variable Data Exploration Notebook
```
import numpy as np
import pandas as pd
import missingno as msno
import matplotlib.pyplot as plt
import seaborn as sns
data_train = pd.read_csv('claim_data_v2_train.csv')
data_train.sample(3)
def visualize_cat(attr, df=data_train):
df_i = df[['Fraudulent_Claim', attr]].groupby([attr])['Fraudulent_Claim'].agg(['count','sum', 'mean'])
m = 'Pct Fraud'
df_i = df_i.rename({'count': 'frequency', 'sum': 'Survivers', 'mean': m}, axis='columns')
print(df_i)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
df_i['frequency'].plot.bar(ax=ax1, alpha=.8)
df_i[m].plot.line(ax=ax2, color='k')
ax1.set_ylabel('frequency')
ax2.set_ylabel('Pct Fraud')
ax2.set_ylim(bottom=0, top=1, auto=False)
ax2.legend()
data_train["Fraudulent_Claim"].describe()
#Convert Fraud Claim data from Y/N to 1/0
data_train.Fraudulent_Claim.replace(('Y', 'N'), (1, 0), inplace=True)
#Test to see if fraud claim data converted correctly
data_train.head()
data_train.columns
data_train.describe(include='all')
# missing data
msno.matrix(data_train)
corrmat= data_train.corr()
f, ax = plt.subplots(figsize=(12, 9))
sns.heatmap(corrmat, vmax=.8, square=True);
```
# Describe Each Variable - Find Outliers
#### Claim Amount
```
data_train['Claim_Amount'].hist()
```
Log tranformation
```
data_train['Claim_Amount'].apply(np.log).hist()
```
#### Income
Income has a major outlier of $10M
```
data_train['Income'].describe()
```
Top 10 Earners
```
data_train['Income'].sort_values(ascending=False).head(10)
data_train['Income'].plot.box()
```
Drop values above the boxplot (greater than Q3+1.5*IQR)
```
cutoff = data_train['Income'].quantile(.75) + (data_train['Income'].quantile(.75) - data_train['Income'].quantile(.25))
print("Cutoff value:", cutoff)
data_train['Income'][data_train['Income']<cutoff].plot.box()
```
Graph histogram without 0 income and without outliers
```
data_train['Income'][(data_train['Income']<cutoff) & (data_train['Income']>0)].hist()
```
#### Premium
Validate that Annual Premium is 12 times Monthly Premium
```
data_train[data_train['Monthly_Premium'] * 12 != data_train['Annual_Premium']]
data_train['Monthly_Premium'].describe()
```
Premium is approximately uniformly distributed between 50 and 140
```
data_train['Monthly_Premium'].hist()
data_train['Months_Since_Last_Claim'].describe()
data_train['Months_Since_Last_Claim'].plot.box()
data_train['Months_Since_Last_Claim'].hist(bins=range(0, 72, 6))
data_train['Months_Since_Policy_Inception'].hist()
data_train['Outstanding_Balance'].hist()
def get_categorical_dist(attr):
return data_train.groupby(attr).size().sort_values(ascending=False)
get_categorical_dist('State_Code')
get_categorical_dist('Education')
get_categorical_dist('Employment_Status')
get_categorical_dist('Gender')
get_categorical_dist('Marital_Status')
get_categorical_dist('Location')
get_categorical_dist('Claim_Cause')
get_categorical_dist('Claim_Report_Type')
get_categorical_dist('Vehicle_Class')
get_categorical_dist('Vehicle_Model')
get_categorical_dist('Fraudulent_Claim')
get_categorical_dist('Claim_Date')
#missing data
data_train.isna().sum()
sns.catplot(x="Fraudulent_Claim", y="Claim_Amount", kind="violin",data=data_train);
sns.catplot(x="Fraudulent_Claim", y="Income", kind="violin",data=data_train);
```
| true |
code
| 0.518668 | null | null | null | null |
|
# Backtesting: EW vs CW
```
import numpy as np
import pandas as pd
import edhec_risk_kit_204 as erk
%load_ext autoreload
%autoreload 2
ind49_rets = erk.get_ind_returns(weighting="vw", n_inds=49)["1974":]
ind49_mcap = erk.get_ind_market_caps(49, weights=True)["1974":]
```
In this section we'll develop a basic infrastructure to backtest various portfolio construction techniques and compare them. Here we'll start with something quite simple and straightforward - we'll write trivially simple weight-optimizers for Equally Weighted and Cap Weighted portfolios and compare them. Obviously, there are no Covariance estimates required for either one of these weighting schemes. In following sessions we'll enhance these optimizers to use more complex weighting schemes that will rely on covariance estimates.
```
def weight_ew(r):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
"""
n = len(r.columns)
return pd.Series(1/n, index=r.columns)
def backtest_ws(r, estimation_window=60, weighting=weight_ew):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window+1)]
# windows is a list of tuples which gives us the (integer) location of the start and stop (non inclusive)
# for each estimation window
weights = [weighting(r.iloc[win[0]:win[1]]) for win in windows]
# List -> DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window-1:].index, columns=r.columns)
# return weights
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
ewr = backtest_ws(ind49_rets, weighting=weight_ew)
ewi = (1+ewr).cumprod()
ewi.plot(figsize=(12,6), title="49 Industries - Equally Weighted");
```
Now, let's add capweighting. We'll need to compute capweights, which we've already been provided through the marketcap file. We can refactor the code we've developed in the past to add a convenience function to our toolkit. Note the use of `**kwargs` to be able to take a variable number of keyword arguments to the function so that we can call any weighting function and let that weighting function take care of whatever arguments it needs. We'll have to refactor `weight_ew` with this new signature, but thats the only change (for now) for `weight_ew`.
```
def weight_ew(r, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
"""
n = len(r.columns)
return pd.Series(1/n, index=r.columns)
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
return cap_weights.loc[r.index[0]]
def backtest_ws(r, estimation_window=60, weighting=weight_ew, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window+1)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert list of weights to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window-1:].index, columns=r.columns)
# return weights
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
ewr = backtest_ws(ind49_rets)
cwr = backtest_ws(ind49_rets, weighting=weight_cw, cap_weights=ind49_mcap)
btr = pd.DataFrame({"EW": ewr, "CW": cwr})
(1+btr).cumprod().plot(figsize=(12,5), title="49 Industries - CapWeighted vs Equally Weighted")
erk.summary_stats(btr.dropna())
```
# Improving EW with CapWeight Tethering
Often in practice, we'll want to implement some sort of a modification of a pure strategy. For instance, although Equal Weight portfolios are popular, they'll be constrained in some way - for instance to match the sector weights of the cap-weighted benchmark or to make sure that microcap stocks are not overweighted. The motivation for doing so could be to make a portfolio more tradeable (e.g. some microcaps may not have the liquidity) or to improve the tracking error to the Cap-Weighted index.
As an illustration of how that can be achieved, we enhance our simple `weight_ew` allocator to (i) drop microcap stocks beyond a particular threshold, and (ii) impose a constraint that ensures that the maximum weight assigned to any stock is no more than some multiple of the weight it would be in a cap-weighted portfolio.
```
def weight_ew(r, cap_weights=None, max_cw_mult=None, microcap_threshold=None, **kwargs):
"""
Returns the weights of the EW portfolio based on the asset returns "r" as a DataFrame
If supplied a set of capweights and a capweight tether, it is applied and reweighted
"""
n = len(r.columns)
ew = pd.Series(1/n, index=r.columns)
if cap_weights is not None:
cw = cap_weights.loc[r.index[0]] # starting cap weight
## exclude microcaps
if microcap_threshold is not None and microcap_threshold > 0:
microcap = cw < microcap_threshold
ew[microcap] = 0
ew = ew/ew.sum()
#limit weight to a multiple of capweight
if max_cw_mult is not None and max_cw_mult > 0:
ew = np.minimum(ew, cw*max_cw_mult)
ew = ew/ew.sum() #reweight
return ew
def weight_cw(r, cap_weights, **kwargs):
"""
Returns the weights of the CW portfolio based on the time series of capweights
"""
w = cap_weights.loc[r.index[0]]
return w/w.sum()
def backtest_ws(r, estimation_window=60, weighting=weight_ew, **kwargs):
"""
Backtests a given weighting scheme, given some parameters:
r : asset returns to use to build the portfolio
estimation_window: the window to use to estimate parameters
weighting: the weighting scheme to use, must be a function that takes "r", and a variable number of keyword-value arguments
"""
n_periods = r.shape[0]
# return windows
windows = [(start, start+estimation_window) for start in range(n_periods-estimation_window+1)]
weights = [weighting(r.iloc[win[0]:win[1]], **kwargs) for win in windows]
# convert to DataFrame
weights = pd.DataFrame(weights, index=r.iloc[estimation_window-1:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) #mincount is to generate NAs if all inputs are NAs
return returns
ewr = backtest_ws(ind49_rets)
ewtr = backtest_ws(ind49_rets, cap_weights=ind49_mcap, max_cw_mult=5, microcap_threshold=.005)
cwr = backtest_ws(ind49_rets, weighting=weight_cw, cap_weights=ind49_mcap)
btr = pd.DataFrame({"EW": ewr, "EW-Tethered": ewtr, "CW": cwr})
(1+btr).cumprod().plot(figsize=(12,5))
erk.summary_stats(btr.dropna())
```
Don't forget to add the code we've just developed to the toolkit, we're going to use it in future sessions!
## A Final Note ...
One of the motivations of adding the tethering constraint is to improve tracking error to the cap-weighted portfolio. Let's see if we did manage to achieve that:
```
erk.tracking_error(ewr, cwr),erk.tracking_error(ewtr, cwr)
```
| true |
code
| 0.759421 | null | null | null | null |
|
# Fun with FFT and sound files
Based on: https://realpython.com/python-scipy-fft/
Define a function for generating pure sine wave tones
```
import numpy as np
import matplotlib.pyplot as plt
SAMPLE_RATE = 44100 # Hertz
DURATION = 5 # Seconds
def generate_sine_wave(freq, sample_rate, duration):
x = np.linspace(0, duration, sample_rate * duration, endpoint=False)
frequencies = x * freq
# 2pi because np.sin takes radians
y = np.sin((2 * np.pi) * frequencies)
return x, y
# Generate a 2 hertz sine wave that lasts for 5 seconds
x, y = generate_sine_wave(2, SAMPLE_RATE, DURATION)
plt.plot(x, y)
plt.show()
```
Produce two tones, e.g. 400 Hz signal and a 4 kHz high-pitch noise
```
_, nice_tone = generate_sine_wave(400, SAMPLE_RATE, DURATION)
_, noise_tone = generate_sine_wave(4000, SAMPLE_RATE, DURATION)
noise_tone = noise_tone * 0.3
mixed_tone = nice_tone + noise_tone
#mixed_tone = noise_tone
```
For the purposes of storing the tones in an audio file, the amplitude needs to be normalized to the range of 16-bit integer
```
normalized_tone = np.int16((mixed_tone / mixed_tone.max()) * 32767)
plt.plot(normalized_tone[:1000])
plt.show()
```
Store the sound for playback
```
from scipy.io import wavfile as wf
# Remember SAMPLE_RATE = 44100 Hz is our playback rate
wf.write("mysinewave.wav", SAMPLE_RATE, normalized_tone)
```
Can also try to record the sound (NB: won't work on datahub !)
```
# import required libraries
%pip install sounddevice
import sounddevice as sd
print("Recording...")
# Start recorder with the given values
# of duration and sample frequency
recording = sd.rec(int(DURATION * SAMPLE_RATE), samplerate=SAMPLE_RATE, channels=1)
# Record audio for the given number of seconds
sd.wait()
print("Done")
# This will convert the NumPy array to an audio
# file with the given sampling frequency
wf.write("recording0.wav", 400, recording)
```
### Fourier transforms
Now try to transform the time stream into frequency space using FFT
```
from scipy.fft import fft, fftfreq
# Number of samples in normalized_tone
N = SAMPLE_RATE * DURATION
yf = fft(normalized_tone)
xf = fftfreq(N, 1 / SAMPLE_RATE)
print('Type of the output array: ',type(yf[0]))
print('Size of the input array: ',N)
print('Size of the Fourier transform: ',len(xf))
df = xf[1]-xf[0]
print(f'Width of the frequency bins: {df} Hz')
plt.plot(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
plt.figure()
plt.yscale('log')
plt.plot(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.xlim(350,4050)
plt.show()
```
You notice that fft returns data for both positive and negative frequencies, produces the output array of the same size as input, and the output is a set of *complex* numbers. However, the information is reduntant: only half of the output values are unique. The magnitudes of the Fourier coefficients at negative frequencies are the same as at the corresponding positive frequencies. This is the property of the *real* Fourier transform, i.e. the transform applied to real-value signals. More precisely, $\mathrm{fft}(f)=\mathrm{fft}^*(-f)$
```
print(xf[1],xf[-1])
print(yf[1],yf[-1])
```
We can use this fact to save computational time and storage by computing only half of the Fourier coefficients:
```
from scipy.fft import rfft, rfftfreq
# Note the extra 'r' at the front
yf = rfft(normalized_tone)
xf = rfftfreq(N, 1 / SAMPLE_RATE)
print('Type of the output array: ',type(yf[0]))
print('Size of the input array: ',N)
print('Size of the Fourier transform: ',len(xf))
df = xf[1]-xf[0]
print(f'Width of the frequency bins: {df} Hz')
plt.plot(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
```
Now let's look at the Fourier transorm of a sound of a guitar string:
```
rate, data = wf.read("recording0.wav")
N=len(data)
print(rate, N)
time=np.arange(0, N)/rate
plt.plot(time, data)
plt.xlabel('time (sec)')
plt.ylabel('Sound a.u.)')
plt.show()
yf = rfft(data)
xf = rfftfreq(len(data), 1 / rate)
print('Type of the output array: ',type(yf[0]))
print('Size of the input array: ',len(data))
print('Size of the Fourier transform: ',len(xf))
df = xf[1]-xf[0]
print(f'Width of the frequency bins: {df} Hz')
plt.figure()
plt.loglog(xf, np.abs(yf))
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
plt.figure()
plt.plot(xf, np.abs(yf))
plt.yscale('log')
plt.xlim(100,2000)
plt.xlabel('Frequency (Hz)')
plt.ylabel('FFT magnitude (a.u.)')
plt.show()
```
| true |
code
| 0.659953 | null | null | null | null |
|
## PSO - Particle Swarm Optimisation
**About PSO -**
PSO is an biologically inspired meta-heuristic optimisation algorithm. It takes its inspiration from bird flocking or fish schooling. It works pretty good in practice. So let us code it up and optimise a function.
```
#dependencies
import random
import math
import copy # for array copying
import sys
```
### COST Function
So basically the function we are trying to optimise will become our cost function.
What cost functions we will see:
1. Sum of squares
2. Rastrigin's function
### Rastrigins function:
Rastrgins equation:

3-D Rendering

As you can see its a non-convex function with lot of local minimas (i.e multi-modal : lot of optimal solutions). It is a fairly diffucult problem for testing and we will test this out.
```
# lets code the rastrigins function
def error(position):
err = 0.0
for i in range(len(position)):
xi = position[i]
err += (xi * xi) - (10 * math.cos(2 * math.pi * xi))
err = 10*len(position) + err
return err
```
### Particle
A particle basically maintains the following params:
1. particle position
2. particle velocity
3. best position individual
4. best error individual
5. error individual
The action it can take when traversing over its search space looks like -
```
Update velocity -
w1*towards_current_direction(intertia) + w2*towards_self_best + w3*towards_swarm_best
Update position -
Add current_velocity to previous_postion to obtain new_velocity
```
Now suppose the particle finds some minima/maxima which is better than the global best it has to update the global value. So we have its fitness evaluation function -
```
evaluate fitness -
plug in current_position into test function to get where exactly you are that will give you the minima/maxima value
check against the global minima/maxima whether yours is better
assign value to global accordingly
```
```
# let us construct the class Particle
class Particle:
def __init__(self,x0):
self.position_i=[] # particle position
self.velocity_i=[] # particle velocity
self.pos_best_i=[] # best position individual
self.err_best_i=-1 # best error individual
self.err_i=-1 # error individual
for i in range(0,num_dimensions):
self.velocity_i.append(random.uniform(-1,1))
self.position_i.append(x0[i])
# evaluate current fitness
def evaluate(self,costFunc):
self.err_i=costFunc(self.position_i)
# check to see if the current position is an individual best
if self.err_i < self.err_best_i or self.err_best_i==-1:
self.pos_best_i=self.position_i
self.err_best_i=self.err_i
# update new particle velocity
def update_velocity(self,pos_best_g):
w=0.5 # constant inertia weight (how much to weigh the previous velocity)
c1=1 # cognative constant
c2=2 # social constant
for i in range(0,num_dimensions):
r1=random.uniform(-1,1)
r2=random.uniform(-1,1)
vel_cognitive=c1*r1*(self.pos_best_i[i]-self.position_i[i])
vel_social=c2*r2*(pos_best_g[i]-self.position_i[i])
self.velocity_i[i]=w*self.velocity_i[i]+vel_cognitive+vel_social
# update the particle position based off new velocity updates
def update_position(self,bounds):
for i in range(0,num_dimensions):
self.position_i[i]=self.position_i[i]+self.velocity_i[i]
# adjust maximum position if necessary
if self.position_i[i]>bounds[i][1]:
self.position_i[i]=bounds[i][1]
# adjust minimum position if neseccary
if self.position_i[i] < bounds[i][0]:
self.position_i[i]=bounds[i][0]
```
### __PSO__ (Particle Swarm Optimisation)
In particle swarm optimisation we
1. Initialise a swarm of particles to go on random exploration
2. for each particle we find whether the have discovered any new minima/maxima
3. The overall groups orientation or their velocities is guided to the global minimas
```
# Now let us define a class PSO
class PSO():
def __init__(self,costFunc,x0,bounds,num_particles,maxiter):
global num_dimensions
num_dimensions=len(x0)
err_best_g=-1 # best error for group
pos_best_g=[] # best position for group
# establish the swarm
swarm=[]
for i in range(0,num_particles):
swarm.append(Particle(x0))
# begin optimization loop
i=0
while i < maxiter:
#print i,err_best_g
# cycle through particles in swarm and evaluate fitness
for j in range(0,num_particles):
swarm[j].evaluate(costFunc)
# determine if current particle is the best (globally)
if swarm[j].err_i < err_best_g or err_best_g == -1:
pos_best_g=list(swarm[j].position_i)
err_best_g=float(swarm[j].err_i)
# cycle through swarm and update velocities and position
for j in range(0,num_particles):
swarm[j].update_velocity(pos_best_g)
swarm[j].update_position(bounds)
i+=1
# print final results
print ('\nFINAL:')
print (pos_best_g)
print (err_best_g)
%time
initial=[5,5] # initial starting location [x1,x2...]
bounds=[(-10,10),(-10,10)] # input bounds [(x1_min,x1_max),(x2_min,x2_max)...]
PSO(error,initial,bounds,num_particles=15,maxiter=30)
```
Now further on we will try to parallelize PSO algorithm
| true |
code
| 0.40392 | null | null | null | null |
|
## Module 2.2: Working with CNNs in Keras (A Review)
We turn to implementing a CNN in the Keras functional API. In this module we will pay attention to:
1. Using the Keras functional API for defining models.
2. Implementing dropout regularization.
Those students who are comfortable with all these matters might consider skipping ahead.
Note that we will not spend time tuning hyper-parameters: The purpose is to show how different techniques can be implemented in Keras, not to solve particular data science problems as optimally as possible. Obviously, most techniques include hyper-parameters that need to be tuned for optimal performance.
We start by importing required libraries.
```
import numpy as np
from sklearn.metrics import confusion_matrix,classification_report
from keras.datasets import cifar10
from keras.models import Sequential
from keras import Model
from keras.layers import Dense,Dropout,Flatten,Activation,Input
from keras.optimizers import Adam
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
import matplotlib.pyplot as plt
```
We will use the CIFAR10 dataset. This consists of small (32 x 32 pixel) color images of 10 different types of objects. It is included in the keras.datasets library.
We load the images. These are already split into training and test cases. We need to normalize the pixel values to be between 0 and 1, and turn our integer labels into one-hot vectors - these are 1d-arrays of length the same as the number of classes, with zeros everywhere except the label specified, which is a 1. They are the probability that the image is of different classes.
We also make a vector of class/label names for display purposes, as the label arrays contain only integers.
```
# Load images
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# Make versions of the labels that are one-hot vectors
train_labels_array=np_utils.to_categorical(train_labels, 10)
test_labels_array=np_utils.to_categorical(test_labels, 10)
# Make vector of classnames
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
train_labels_array.shape
```
Let's make a function to have a look at the images.
```
def show_images(images,labels,class_names,random=True):
plt.figure(figsize=(10,10))
if random:
indices=np.random.randint(0,images.shape[0],25)
else:
indices=np.array([i for i in range(25)])
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(images[indices[i]], cmap=plt.cm.binary)
# The CIFAR labels happen to be arrays,
# which is why we need the extra index
plt.xlabel(class_names[labels[indices[i]][0]])
plt.show()
```
Now we run it. We will see 25 random images from the dataset that we pass. If you set random=False you will see the first 25 images, the variety of which reassures us that the data is in a random order. (If this was a real world problem, such re-assurances would be insufficient, and we would shuffle the data.)
```
show_images(train_images,train_labels,class_names,False)
```
Now we create a function that will define the network architecture. Note that we introduce dropout layers for regularization purposes. We discussed these in the last module.
For comparison, the code to specify the same network using the sequential approach is provided in a second function.
```
def get_model():
inputs = Input(shape=(32, 32, 3),name="Input")
conv1 = Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))(inputs)
pool1 = MaxPooling2D((2, 2))(conv1)
drop1 = Dropout(0.5)(pool1)
conv2 = Conv2D(64, (3, 3), activation='relu')(drop1)
pool2 = MaxPooling2D((2, 2))(conv2)
drop2 = Dropout(0.5)(pool2)
conv3 = Conv2D(64, (3, 3), activation='relu')(drop2)
flat = Flatten()(conv3)
dense1 = Dense(64, activation='relu')(flat)
outputs = Dense(10, activation='softmax')(dense1)
model = Model(inputs=inputs,outputs=outputs)
return model
# For comparison, this is how we would use the sequential process
def get_model_seqential():
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
return model
```
We will get our model.
```
model=get_model()
```
Now we will define an optimizer and compile it. If you are unfamiliar with the different types of optimizers available in keras, I suggest you read the keras documentation [here](https://keras.io/optimizers/) and play around training the model with different alternatives.
```
opt=Adam()
```
And we compile our model with the optimizer ready for training. We use categorical crossentropy as our loss function as this is a good default choice for working with a multi-class categorical target variable (i.e. the image labels).
```
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
```
Now we fit (train) the model. We will set the training to continue for 100 epochs, but use an early stopping callback which means it should terminate much quicker than this.
```
# Before calling fit, we create the Early Stopping callback.
# We set it up to stop if improvement in the validation loss
# does not occur over 10 epochs. When stopping occurs, the
# weights associated with the best validation loss are restored.
earlyStopping = EarlyStopping(monitor="val_loss",
patience=10,
verbose=1,
restore_best_weights=True)
# We need to use the one-hot vector version of the labels
# This shouldn't go through all 100 epoches, because of the
# early stopping, but can take some time.
history = model.fit(train_images,
train_labels_array,
epochs=100,
shuffle=True,
callbacks=[earlyStopping],
validation_split=.2)
```
We will plot the training history to see a graphical representation of the training.
```
def plot_training_history(history):
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model accuracy and loss')
plt.xlabel('Epoch')
plt.legend(['Accuracy','Validation Accuracy', 'Loss',
'Validation Loss'], loc='upper right')
plt.show()
plot_training_history(history)
```
Finally, for fun lets see how our improved model performs on our test data. But remember that we have not spent any time or effort optimizing this model - for a real problem we would determine good values for the dropout regularization, as well as tune the architecture and optimizer.
We make a function that will show the confusion matrix, and then run it.
```
def test_model(model,x,y):
y_pred = model.predict(x)
y_pred = np.argmax(y_pred,axis=1)
cm = confusion_matrix(y, y_pred)
print("Confusion Matrix:")
print(cm)
print("Classification report:")
print(classification_report(y, y_pred))
test_model(model,test_images,test_labels)
```
| true |
code
| 0.79046 | null | null | null | null |
|
# Dependent density regression
In another [example](dp_mix.ipynb), we showed how to use Dirichlet processes to perform Bayesian nonparametric density estimation. This example expands on the previous one, illustrating dependent density regression.
Just as Dirichlet process mixtures can be thought of as infinite mixture models that select the number of active components as part of inference, dependent density regression can be thought of as infinite [mixtures of experts](https://en.wikipedia.org/wiki/Committee_machine) that select the active experts as part of inference. Their flexibility and modularity make them powerful tools for performing nonparametric Bayesian Data analysis.
```
import arviz as az
import numpy as np
import pandas as pd
import pymc3 as pm
import seaborn as sns
from IPython.display import HTML
from matplotlib import animation as ani
from matplotlib import pyplot as plt
from theano import tensor as tt
print(f"Running on PyMC3 v{pm.__version__}")
%config InlineBackend.figure_format = 'retina'
plt.rc("animation", writer="ffmpeg")
blue, *_ = sns.color_palette()
az.style.use("arviz-darkgrid")
SEED = 972915 # from random.org; for reproducibility
np.random.seed(SEED)
```
We will use the LIDAR data set from Larry Wasserman's excellent book, [_All of Nonparametric Statistics_](http://www.stat.cmu.edu/~larry/all-of-nonpar/). We standardize the data set to improve the rate of convergence of our samples.
```
DATA_URI = "http://www.stat.cmu.edu/~larry/all-of-nonpar/=data/lidar.dat"
def standardize(x):
return (x - x.mean()) / x.std()
df = pd.read_csv(DATA_URI, sep=r"\s{1,3}", engine="python").assign(
std_range=lambda df: standardize(df.range), std_logratio=lambda df: standardize(df.logratio)
)
df.head()
```
We plot the LIDAR data below.
```
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(df.std_range, df.std_logratio, color=blue)
ax.set_xticklabels([])
ax.set_xlabel("Standardized range")
ax.set_yticklabels([])
ax.set_ylabel("Standardized log ratio");
```
This data set has a two interesting properties that make it useful for illustrating dependent density regression.
1. The relationship between range and log ratio is nonlinear, but has locally linear components.
2. The observation noise is [heteroskedastic](https://en.wikipedia.org/wiki/Heteroscedasticity); that is, the magnitude of the variance varies with the range.
The intuitive idea behind dependent density regression is to reduce the problem to many (related) density estimates, conditioned on fixed values of the predictors. The following animation illustrates this intuition.
```
fig, (scatter_ax, hist_ax) = plt.subplots(ncols=2, figsize=(16, 6))
scatter_ax.scatter(df.std_range, df.std_logratio, color=blue, zorder=2)
scatter_ax.set_xticklabels([])
scatter_ax.set_xlabel("Standardized range")
scatter_ax.set_yticklabels([])
scatter_ax.set_ylabel("Standardized log ratio")
bins = np.linspace(df.std_range.min(), df.std_range.max(), 25)
hist_ax.hist(df.std_logratio, bins=bins, color="k", lw=0, alpha=0.25, label="All data")
hist_ax.set_xticklabels([])
hist_ax.set_xlabel("Standardized log ratio")
hist_ax.set_yticklabels([])
hist_ax.set_ylabel("Frequency")
hist_ax.legend(loc=2)
endpoints = np.linspace(1.05 * df.std_range.min(), 1.05 * df.std_range.max(), 15)
frame_artists = []
for low, high in zip(endpoints[:-1], endpoints[2:]):
interval = scatter_ax.axvspan(low, high, color="k", alpha=0.5, lw=0, zorder=1)
*_, bars = hist_ax.hist(
df[df.std_range.between(low, high)].std_logratio, bins=bins, color="k", lw=0, alpha=0.5
)
frame_artists.append((interval,) + tuple(bars))
animation = ani.ArtistAnimation(fig, frame_artists, interval=500, repeat_delay=3000, blit=True)
plt.close()
# prevent the intermediate figure from showing
HTML(animation.to_html5_video())
```
As we slice the data with a window sliding along the x-axis in the left plot, the empirical distribution of the y-values of the points in the window varies in the right plot. An important aspect of this approach is that the density estimates that correspond to close values of the predictor are similar.
In the previous example, we saw that a Dirichlet process estimates a probability density as a mixture model with infinitely many components. In the case of normal component distributions,
$$y \sim \sum_{i = 1}^{\infty} w_i \cdot N(\mu_i, \tau_i^{-1}),$$
where the mixture weights, $w_1, w_2, \ldots$, are generated by a [stick-breaking process](https://en.wikipedia.org/wiki/Dirichlet_process#The_stick-breaking_process).
Dependent density regression generalizes this representation of the Dirichlet process mixture model by allowing the mixture weights and component means to vary conditioned on the value of the predictor, $x$. That is,
$$y\ |\ x \sim \sum_{i = 1}^{\infty} w_i\ |\ x \cdot N(\mu_i\ |\ x, \tau_i^{-1}).$$
In this example, we will follow Chapter 23 of [_Bayesian Data Analysis_](http://www.stat.columbia.edu/~gelman/book/) and use a probit stick-breaking process to determine the conditional mixture weights, $w_i\ |\ x$. The probit stick-breaking process starts by defining
$$v_i\ |\ x = \Phi(\alpha_i + \beta_i x),$$
where $\Phi$ is the cumulative distribution function of the standard normal distribution. We then obtain $w_i\ |\ x$ by applying the stick breaking process to $v_i\ |\ x$. That is,
$$w_i\ |\ x = v_i\ |\ x \cdot \prod_{j = 1}^{i - 1} (1 - v_j\ |\ x).$$
For the LIDAR data set, we use independent normal priors $\alpha_i \sim N(0, 5^2)$ and $\beta_i \sim N(0, 5^2)$. We now express this this model for the conditional mixture weights using `PyMC3`.
```
def norm_cdf(z):
return 0.5 * (1 + tt.erf(z / np.sqrt(2)))
def stick_breaking(v):
return v * tt.concatenate(
[tt.ones_like(v[:, :1]), tt.extra_ops.cumprod(1 - v, axis=1)[:, :-1]], axis=1
)
N = len(df)
K = 20
std_range = df.std_range.values[:, np.newaxis]
std_logratio = df.std_logratio.values
with pm.Model(coords={"N": np.arange(N), "K": np.arange(K) + 1, "one": [1]}) as model:
alpha = pm.Normal("alpha", 0.0, 5.0, dims="K")
beta = pm.Normal("beta", 0.0, 5.0, dims=("one", "K"))
x = pm.Data("x", std_range)
v = norm_cdf(alpha + pm.math.dot(x, beta))
w = pm.Deterministic("w", stick_breaking(v), dims=["N", "K"])
```
We have defined `x` as a `pm.Data` container in order to use `PyMC3`'s posterior prediction capabilities later.
While the dependent density regression model theoretically has infinitely many components, we must truncate the model to finitely many components (in this case, twenty) in order to express it using `PyMC3`. After sampling from the model, we will verify that truncation did not unduly influence our results.
Since the LIDAR data seems to have several linear components, we use the linear models
$$
\begin{align*}
\mu_i\ |\ x
& \sim \gamma_i + \delta_i x \\
\gamma_i
& \sim N(0, 10^2) \\
\delta_i
& \sim N(0, 10^2)
\end{align*}
$$
for the conditional component means.
```
with model:
gamma = pm.Normal("gamma", 0.0, 10.0, dims="K")
delta = pm.Normal("delta", 0.0, 10.0, dims=("one", "K"))
mu = pm.Deterministic("mu", gamma + pm.math.dot(x, delta))
```
Finally, we place the prior $\tau_i \sim \textrm{Gamma}(1, 1)$ on the component precisions.
```
with model:
tau = pm.Gamma("tau", 1.0, 1.0, dims="K")
y = pm.Data("y", std_logratio)
obs = pm.NormalMixture("obs", w, mu, tau=tau, observed=y)
pm.model_to_graphviz(model)
```
We now sample from the dependent density regression model.
```
SAMPLES = 20000
BURN = 10000
with model:
step = pm.Metropolis()
trace = pm.sample(SAMPLES, tune=BURN, step=step, random_seed=SEED, return_inferencedata=True)
```
To verify that truncation did not unduly influence our results, we plot the largest posterior expected mixture weight for each component. (In this model, each point has a mixture weight for each component, so we plot the maximum mixture weight for each component across all data points in order to judge if the component exerts any influence on the posterior.)
```
fig, ax = plt.subplots(figsize=(8, 6))
max_mixture_weights = trace.posterior["w"].mean(("chain", "draw")).max("N")
ax.bar(max_mixture_weights.coords.to_index(), max_mixture_weights)
ax.set_xlim(1 - 0.5, K + 0.5)
ax.set_xticks(np.arange(0, K, 2) + 1)
ax.set_xlabel("Mixture component")
ax.set_ylabel("Largest posterior expected\nmixture weight");
```
Since only three mixture components have appreciable posterior expected weight for any data point, we can be fairly certain that truncation did not unduly influence our results. (If most components had appreciable posterior expected weight, truncation may have influenced the results, and we would have increased the number of components and sampled again.)
Visually, it is reasonable that the LIDAR data has three linear components, so these posterior expected weights seem to have identified the structure of the data well. We now sample from the posterior predictive distribution to get a better understand the model's performance.
```
PP_SAMPLES = 5000
lidar_pp_x = np.linspace(std_range.min() - 0.05, std_range.max() + 0.05, 100)
with model:
pm.set_data({"x": lidar_pp_x[:, np.newaxis]})
pp_trace = pm.sample_posterior_predictive(trace, PP_SAMPLES, random_seed=SEED)
```
Below we plot the posterior expected value and the 95% posterior credible interval.
```
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(df.std_range, df.std_logratio, color=blue, zorder=10, label=None)
low, high = np.percentile(pp_trace["obs"], [2.5, 97.5], axis=0)
ax.fill_between(
lidar_pp_x, low, high, color="k", alpha=0.35, zorder=5, label="95% posterior credible interval"
)
ax.plot(lidar_pp_x, pp_trace["obs"].mean(axis=0), c="k", zorder=6, label="Posterior expected value")
ax.set_xticklabels([])
ax.set_xlabel("Standardized range")
ax.set_yticklabels([])
ax.set_ylabel("Standardized log ratio")
ax.legend(loc=1)
ax.set_title("LIDAR Data");
```
The model has fit the linear components of the data well, and also accomodated its heteroskedasticity. This flexibility, along with the ability to modularly specify the conditional mixture weights and conditional component densities, makes dependent density regression an extremely useful nonparametric Bayesian model.
To learn more about depdendent density regression and related models, consult [_Bayesian Data Analysis_](http://www.stat.columbia.edu/~gelman/book/), [_Bayesian Nonparametric Data Analysis_](http://www.springer.com/us/book/9783319189673), or [_Bayesian Nonparametrics_](https://www.google.com/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=bayesian+nonparametrics+book).
This example first appeared [here](http://austinrochford.com/posts/2017-01-18-ddp-pymc3.html).
Author: [Austin Rochford](https://github.com/AustinRochford/)
```
%load_ext watermark
%watermark -n -u -v -iv -w
```
| true |
code
| 0.573678 | null | null | null | null |
|
# Millikan Oil Drop
___**Meansurement of the electron charge**
```
rho=886 # kg/m^3
dV = .5 #volts
dd = .000005 # meters
dP = 5 # pascals
g=9.8 # m/s^2
eta= 1.8330*10**(-5) # N*s/m^2
b=8.20*10**(-3) # Pa*m
p=101325 #Pa
V=500 #V
e=1.6*10**(-19)
d_array=10**(-3)*np.array([7.55,7.59,7.60,7.60,7.60,7.61]) # unit: m
d=d_array.mean()
d_std=d_array.std()
print("d_mean: ",d_mean)
print("d_std: ",d_std)
def reject_outliers(data, m=2):
'''
remove anomalous data points that outside 2 standard deviation in the array
'''
return data[abs(data - np.mean(data)) < m * np.std(data)]
```
**Load data from files**
```
data_path = "/Users/Angel/Documents/MilikanData/"
statistics=[]
for file_name in os.listdir(data_path):
name=file_name[:3]
obj_drop=pd.read_csv(data_path+file_name).dropna()
# seperate rising and falling velocities, remove anomalous velocities at switching field direction
v_y=obj_drop["v_{y}"].values
y = obj_drop["y"] #y values
n_points=len(v_y)
v_r=reject_outliers(v_y[v_y>0])
v_f=reject_outliers(v_y[v_y<0])
# calculate mean and deviation
(v_r_mean,v_r_std)=(v_r.mean(),v_r.std())
(v_f_mean,v_f_std)=(np.abs(v_f.mean()),v_f.std())
# calculate other properties
a=np.sqrt((b/2/p)**2+9*eta*v_f_mean/2/rho/g)-b/(2*p) #droplet radius
m=4*np.pi/3*a**3*rho # droplet mass
q=m*g*d_mean*(v_f_mean+v_r_mean)/V/v_f_mean #droplet charge
# error propagation
dely = np.roll(y, -2)-y
delt = .4
error_y = 2e-6
error_t = .1
error_v = np.sqrt((2*error_y/dely)**2+(2*error_t/delt)**2)
error_v.pop(n_points-1)
error_v.pop(n_points-2)
error_v = np.append([0.5],error_v)
error_v = np.append(error_v, [0.5])
error_v = np.abs(v_y)*error_v
meanerror_v = error_v[~np.isinf(error_v)].mean()
dqdvf = 2*np.pi*(((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))**(-.5))*((np.sqrt(9*eta*v_f_mean/(2*rho*g)+(b/(2*p))**2)-b/(2*p))**2)*9*eta/(2*rho*g)*rho*g*d*(v_f_mean+v_r_mean)/(V*v_f_mean) + 4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*(V*v_f_mean*rho*g*d*v_r_mean-rho*g*d*(v_f_mean+v_r_mean)*V)/((V*v_f_mean)**2)
dqdvr = 4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*(rho*g*d/V)
dqdV = -4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*(v_f_mean*rho*g*d*(v_f_mean+v_r_mean)/((V*v_f_mean)**2))
dqdd = 4*np.pi/3*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**3)*rho*g*(v_f_mean+v_r_mean)/(V*v_f_mean)
dqdP1 = 2*np.pi*((np.sqrt((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))-b/(2*p))**2)*rho*g*d*(v_f_mean+v_r_mean)/(V*v_f_mean)
dqdP2 = -(((b/(2*p))**2+9*eta*v_f_mean/(2*rho*g))**(-.5))*(b**2)/(2*p**3)+b/(4*p**2)
error_func = np.sqrt(((dqdvf)*(meanerror_v))**2+((dqdvr)*(meanerror_v))**2+((dqdV)*(dV))**2+((dqdd)*(dd))**2+((dqdP1*dqdP2)*(dP))**2)
statistics.append(np.array((name,n_points,v_r_mean,v_r_std,v_f_mean,v_f_std, meanerror_v, a,m,q, error_func)))
```
Calculation of the attached charge
```
labels = ["name","n_points","v_r_mean","v_r_std","v_f_mean","v_f_std","meanerror_v","a","m","q","q_error"]
overall = pd.DataFrame(statistics,columns=labels,dtype="float64")
overall
import matplotlib.pylab as plt
plt.figure().dpi=100
plt.xlabel("Charge attached")
plt.ylabel("Number of droplets")
plt.title("Histogram of charge carried by droplets")
(overall.q/e).hist(bins=21)
def clustering(arr,x):
arr = list(arr/x)
num = int(max(arr))
clusters= []
for i in range(num+1):
clusters.append(list(filter(lambda x:i<x<i+1,arr)))
return clusters
from scipy.optimize import minimize
def obj_error(x):
test = list(map(np.mean,clustering(overall.q,x)))
estimate_delta_q = np.array(test[:-1])-np.array(test[1:])
estimate_e = estimate_delta_q[~np.isnan(estimate_delta_q)]
estimate_e = estimate_e*e
return abs(estimate_e.mean())
obj_error(e)
#valuee = minimize(obj_error,.8e-19)
#print(valuee.x)
```
| true |
code
| 0.396039 | null | null | null | null |
|
# 3.6 Refinements with federated learning
## Data loading and preprocessing
```
# read more: https://www.tensorflow.org/federated/tutorials/federated_learning_for_text_generation
import nest_asyncio # pip install nest_asyncio
import tensorflow_federated as tff # pip install tensorflow_federated
import collections
import functools
import os
import time
import numpy as np
import tensorflow as tf
#nest_asyncio.apply()
tf.compat.v1.enable_v2_behavior()
np.random.seed(0)
# Test the TFF is working:
tff.federated_computation(lambda: 'Hello, World!')()
import numpy as np
# A fixed vocabularly of ASCII chars that occur in the works of Shakespeare and Dickens:
vocab = list('dhlptx@DHLPTX $(,048cgkoswCGKOSW[_#\'/37;?bfjnrvzBFJNRVZ"&*.26:\naeimquyAEIMQUY]!%)-159\r')
# Creating a mapping from unique characters to indices
char2idx = {u:i for i, u in enumerate(vocab)}
idx2char = np.array(vocab)
```
## Data
```
train_data, test_data = tff.simulation.datasets.shakespeare.load_data()
# Here the play is "The Tragedy of King Lear" and the character is "King".
raw_example_dataset = train_data.create_tf_dataset_for_client(
'THE_TRAGEDY_OF_KING_LEAR_KING')
# To allow for future extensions, each entry x
# is an OrderedDict with a single key 'snippets' which contains the text.
for x in raw_example_dataset.take(2):
print(x['snippets'])
# Input pre-processing parameters
SEQ_LENGTH = 100
BATCH_SIZE = 8
BUFFER_SIZE = 10000 # For dataset shuffling
```
## Text generation
```
import tensorflow as tf
# Construct a lookup table to map string chars to indexes,
# using the vocab loaded above:
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
keys=vocab, values=tf.constant(list(range(len(vocab))),
dtype=tf.int64)),
default_value=0)
def to_ids(x):
s = tf.reshape(x['snippets'], shape=[1])
chars = tf.strings.bytes_split(s).values
ids = table.lookup(chars)
return ids
def split_input_target(chunk):
input_text = tf.map_fn(lambda x: x[:-1], chunk)
target_text = tf.map_fn(lambda x: x[1:], chunk)
return (input_text, target_text)
def preprocess(dataset):
return (
# Map ASCII chars to int64 indexes using the vocab
dataset.map(to_ids)
# Split into individual chars
.unbatch()
# Form example sequences of SEQ_LENGTH +1
.batch(SEQ_LENGTH + 1, drop_remainder=True)
# Shuffle and form minibatches
.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True)
# And finally split into (input, target) tuples,
# each of length SEQ_LENGTH.
.map(split_input_target))
example_dataset = preprocess(raw_example_dataset)
print(example_dataset.element_spec)
import os
def load_model(batch_size):
urls = {
1: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch1.kerasmodel',
8: 'https://storage.googleapis.com/tff-models-public/dickens_rnn.batch8.kerasmodel'}
assert batch_size in urls, 'batch_size must be in ' + str(urls.keys())
url = urls[batch_size]
local_file = tf.keras.utils.get_file(os.path.basename(url), origin=url)
return tf.keras.models.load_model(local_file, compile=False)
def generate_text(model, start_string):
# Evaluation step (generating text using the learned model)
# Number of characters to generate
num_generate = 1000
# Converting our start string to numbers (vectorizing)
input_eval = [char2idx[s] for s in start_string]
input_eval = tf.expand_dims(input_eval, 0)
# Empty string to store our results
text_generated = []
# Low temperatures results in more predictable text.
# Higher temperatures results in more surprising text.
# Experiment to find the best setting.
temperature = 1.0
# Here batch size == 1
model.reset_states()
for i in range(num_generate):
predictions = model(input_eval)
# remove the batch dimension
predictions = tf.squeeze(predictions, 0)
# using a categorical distribution to predict the character returned by the model
predictions = predictions / temperature
predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy()
# We pass the predicted character as the next input to the model
# along with the previous hidden state
input_eval = tf.expand_dims([predicted_id], 0)
text_generated.append(idx2char[predicted_id])
return (start_string + ''.join(text_generated))
# Text generation requires a batch_size=1 model.
keras_model_batch1 = load_model(batch_size=1)
print(generate_text(keras_model_batch1, 'What of TensorFlow Federated, you ask? '))
BATCH_SIZE = 8 # The training and eval batch size for the rest of this tutorial.
keras_model = load_model(batch_size=BATCH_SIZE)
keras_model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
```
## Federated learning
```
import collections
# Clone the keras_model inside `create_tff_model()`, which TFF will
# call to produce a new copy of the model inside the graph that it will
# serialize. Note: we want to construct all the necessary objects we'll need
# _inside_ this method.
def create_tff_model():
# TFF uses a `dummy_batch` so it knows the types and shapes
# that your model expects.
x = np.random.randint(1, len(vocab), size=[BATCH_SIZE, SEQ_LENGTH])
dummy_batch = collections.OrderedDict(x=x, y=x)
keras_model_clone = tf.keras.models.clone_model(keras_model)
return tff.learning.from_keras_model(
keras_model_clone,
dummy_batch=dummy_batch,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
fed_avg = tff.learning.build_federated_averaging_process(
model_fn=create_tff_model,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(lr=0.5))
nest_asyncio.apply()
NUM_ROUNDS = 5
state = fed_avg.initialize()
for _ in range(NUM_ROUNDS):
state, metrics = fed_avg.next(state, [example_dataset.take(5)])
print(f'loss={metrics.loss}')
```
| true |
code
| 0.749374 | null | null | null | null |
|
# 0.前言
这个文档主要是用来入门下XGBOOST,主要就是参考的https://blog.csdn.net/qq_24519677/article/details/81869196
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import cross_validation
from sklearn.preprocessing import LabelEncoder
import sklearn
import warnings
warnings.filterwarnings('ignore')
```
# 1.数据特征处理
```
train = pd.read_csv('data/train.csv')
test = pd.read_csv('data/test.csv')
train.info() # 打印训练数据的信息
test.info()
```
对数据的缺失值进行处理,这里采用的方法是对连续值用该列的平均值进行填充,非连续值用该列的众数进行填充,还可以使用机器学习的模型对缺失值进行预测,用预测的值来填充缺失值,该方法这里不做介绍:
```
def handle_na(train, test): # 将Cabin特征删除
fare_mean = train['Fare'].mean() # 测试集的fare特征有缺失值,用训练数据的均值填充
test.loc[pd.isnull(test.Fare), 'Fare'] = fare_mean
embarked_mode = train['Embarked'].mode() # 用众数填充
train.loc[pd.isnull(train.Embarked), 'Embarked'] = embarked_mode[0]
train.loc[pd.isnull(train.Age), 'Age'] = train['Age'].mean() # 用均值填充年龄
test.loc[pd.isnull(test.Age), 'Age'] = train['Age'].mean()
return train, test
new_train, new_test = handle_na(train, test) # 填充缺失值
```
由于Embarked,Sex,Pclass特征是离散特征,所以对其进行one-hot/get_dummies编码
```
# 对Embarked和male特征进行one-hot/get_dummies编码
new_train = pd.get_dummies(new_train, columns=['Embarked', 'Sex', 'Pclass'])
new_test = pd.get_dummies(new_test, columns=['Embarked', 'Sex', 'Pclass'])
```
然后再去除掉PassengerId,Name,Ticket,Cabin, Survived列,这里不使用这些特征做预测
```
target = new_train['Survived'].values
# 删除PassengerId,Name,Ticket,Cabin, Survived列
df_train = new_train.drop(['PassengerId','Name','Ticket','Cabin','Survived'], axis=1).values
df_test = new_test.drop(['PassengerId','Name','Ticket','Cabin'], axis=1).values
```
# 2.XGBoost模型
## 2.1使用XGBoost原生版本模型
```
X_train,X_test,y_train,y_test = train_test_split(df_train,target,test_size = 0.3,random_state = 1) # 将数据划分为训练集和测试集
data_train = xgb.DMatrix(X_train, y_train) # 使用XGBoost的原生版本需要对数据进行转化
data_test = xgb.DMatrix(X_test, y_test)
param = {'max_depth': 5, 'eta': 1, 'objective': 'binary:logistic'}
watchlist = [(data_test, 'test'), (data_train, 'train')]
n_round = 3 # 迭代训练3轮
booster = xgb.train(param, data_train, num_boost_round=n_round, evals=watchlist)
# 计算错误率
y_predicted = booster.predict(data_test)
y = data_test.get_label()
accuracy = sum(y == (y_predicted > 0.5))
accuracy_rate = float(accuracy) / len(y_predicted)
print ('样本总数:{0}'.format(len(y_predicted)))
print ('正确数目:{0}'.format(accuracy) )
print ('正确率:{0:.3f}'.format((accuracy_rate)))
```
## 2.2XGBoost的sklearn接口版本
```
X_train,X_test,y_train,y_test = train_test_split(df_train,target,test_size = 0.3,random_state = 1)
model = xgb.XGBClassifier(max_depth=3, n_estimators=200, learn_rate=0.01)
model.fit(X_train, y_train)
test_score = model.score(X_test, y_test)
print('test_score: {0}'.format(test_score))
```
利用xgboost做一次预测。
```
try_pred = X_test[[0,1],:]
try_pred
try_pred_y = y_test[0:2]
try_pred_y
pred = model.predict(try_pred)
pred
```
# 3.使用其他模型于XGBoost进行对比
```
# 应用模型进行预测
model_lr = LogisticRegression()
model_rf = RandomForestClassifier(n_estimators=200)
model_xgb = xgb.XGBClassifier(max_depth=5, n_estimators=200, learn_rate=0.01)
models = [model_lr, model_rf, model_xgb]
model_name = ['LogisticRegression', '随机森林', 'XGBoost']
cv =cross_validation.ShuffleSplit(len(df_train), n_iter=3, test_size=0.3, random_state=1)
for i in range(3):
print(model_name[i] + ":")
model = models[i]
for train, test in cv:
model.fit(df_train[train], target[train])
train_score = model.score(df_train[train], target[train])
test_score = model.score(df_train[test], target[test])
print('train score: {0:.5f} \t test score: {0:.5f}'.format(train_score, test_score))
```
| true |
code
| 0.326355 | null | null | null | null |
|
###### Background
- As you know that, the non-zero value is very few in target in this competition. It is like imbalance of target in classfication problem.
- For solving the imbalance in classfication problem, we commonly use the "stratifed sampling".
- For this cometition, we can simply apply the stratified sampling to get more well-distributed sampling for continuous target.
- To compare the effect of this strategy, I forked the good kernel(https://www.kaggle.com/prashantkikani/rstudio-lgb-single-model-lb1-6607) and used same parameters, same random seeds.
- I just change the sampling strategy. Ok, Let's see whether it works.
```
import os
import json
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import time
from datetime import datetime
import gc
import psutil
from sklearn.preprocessing import LabelEncoder
PATH="../input/"
NUM_ROUNDS = 20000
VERBOSE_EVAL = 500
STOP_ROUNDS = 100
N_SPLITS = 10
#the columns that will be parsed to extract the fields from the jsons
cols_to_parse = ['device', 'geoNetwork', 'totals', 'trafficSource']
def read_parse_dataframe(file_name):
#full path for the data file
path = PATH + file_name
#read the data file, convert the columns in the list of columns to parse using json loader,
#convert the `fullVisitorId` field as a string
data_df = pd.read_csv(path,
converters={column: json.loads for column in cols_to_parse},
dtype={'fullVisitorId': 'str'})
#parse the json-type columns
for col in cols_to_parse:
#each column became a dataset, with the columns the fields of the Json type object
json_col_df = json_normalize(data_df[col])
json_col_df.columns = [f"{col}_{sub_col}" for sub_col in json_col_df.columns]
#we drop the object column processed and we add the columns created from the json fields
data_df = data_df.drop(col, axis=1).merge(json_col_df, right_index=True, left_index=True)
return data_df
def process_date_time(data_df):
print("process date time ...")
data_df['date'] = data_df['date'].astype(str)
data_df["date"] = data_df["date"].apply(lambda x : x[:4] + "-" + x[4:6] + "-" + x[6:])
data_df["date"] = pd.to_datetime(data_df["date"])
data_df["year"] = data_df['date'].dt.year
data_df["month"] = data_df['date'].dt.month
data_df["day"] = data_df['date'].dt.day
data_df["weekday"] = data_df['date'].dt.weekday
data_df['weekofyear'] = data_df['date'].dt.weekofyear
data_df['month_unique_user_count'] = data_df.groupby('month')['fullVisitorId'].transform('nunique')
data_df['day_unique_user_count'] = data_df.groupby('day')['fullVisitorId'].transform('nunique')
data_df['weekday_unique_user_count'] = data_df.groupby('weekday')['fullVisitorId'].transform('nunique')
return data_df
def process_format(data_df):
print("process format ...")
for col in ['visitNumber', 'totals_hits', 'totals_pageviews']:
data_df[col] = data_df[col].astype(float)
data_df['trafficSource_adwordsClickInfo.isVideoAd'].fillna(True, inplace=True)
data_df['trafficSource_isTrueDirect'].fillna(False, inplace=True)
return data_df
def process_device(data_df):
print("process device ...")
data_df['browser_category'] = data_df['device_browser'] + '_' + data_df['device_deviceCategory']
data_df['browser_os'] = data_df['device_browser'] + '_' + data_df['device_operatingSystem']
return data_df
def process_totals(data_df):
print("process totals ...")
data_df['visitNumber'] = np.log1p(data_df['visitNumber'])
data_df['totals_hits'] = np.log1p(data_df['totals_hits'])
data_df['totals_pageviews'] = np.log1p(data_df['totals_pageviews'].fillna(0))
data_df['mean_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('mean')
data_df['sum_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('sum')
data_df['max_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('max')
data_df['min_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('min')
data_df['var_hits_per_day'] = data_df.groupby(['day'])['totals_hits'].transform('var')
data_df['mean_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('mean')
data_df['sum_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('sum')
data_df['max_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('max')
data_df['min_pageviews_per_day'] = data_df.groupby(['day'])['totals_pageviews'].transform('min')
return data_df
def process_geo_network(data_df):
print("process geo network ...")
data_df['sum_pageviews_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('sum')
data_df['count_pageviews_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('count')
data_df['mean_pageviews_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_pageviews'].transform('mean')
data_df['sum_hits_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('sum')
data_df['count_hits_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('count')
data_df['mean_hits_per_network_domain'] = data_df.groupby('geoNetwork_networkDomain')['totals_hits'].transform('mean')
return data_df
def process_traffic_source(data_df):
print("process traffic source ...")
data_df['source_country'] = data_df['trafficSource_source'] + '_' + data_df['geoNetwork_country']
data_df['campaign_medium'] = data_df['trafficSource_campaign'] + '_' + data_df['trafficSource_medium']
data_df['medium_hits_mean'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('mean')
data_df['medium_hits_max'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('max')
data_df['medium_hits_min'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('min')
data_df['medium_hits_sum'] = data_df.groupby(['trafficSource_medium'])['totals_hits'].transform('sum')
return data_df
#Feature processing
## Load data
print('reading train')
train_df = read_parse_dataframe('train.csv')
trn_len = train_df.shape[0]
train_df = process_date_time(train_df)
print('reading test')
test_df = read_parse_dataframe('test.csv')
test_df = process_date_time(test_df)
## Drop columns
cols_to_drop = [col for col in train_df.columns if train_df[col].nunique(dropna=False) == 1]
train_df.drop(cols_to_drop, axis=1, inplace=True)
test_df.drop([col for col in cols_to_drop if col in test_df.columns], axis=1, inplace=True)
###only one not null value
train_df.drop(['trafficSource_campaignCode'], axis=1, inplace=True)
###converting columns format
train_df['totals_transactionRevenue'] = train_df['totals_transactionRevenue'].astype(float)
train_df['totals_transactionRevenue'] = train_df['totals_transactionRevenue'].fillna(0)
# train_df['totals_transactionRevenue'] = np.log1p(train_df['totals_transactionRevenue'])
## Features engineering
train_df = process_format(train_df)
train_df = process_device(train_df)
train_df = process_totals(train_df)
train_df = process_geo_network(train_df)
train_df = process_traffic_source(train_df)
test_df = process_format(test_df)
test_df = process_device(test_df)
test_df = process_totals(test_df)
test_df = process_geo_network(test_df)
test_df = process_traffic_source(test_df)
## Categorical columns
print("process categorical columns ...")
num_cols = ['month_unique_user_count', 'day_unique_user_count', 'weekday_unique_user_count',
'visitNumber', 'totals_hits', 'totals_pageviews',
'mean_hits_per_day', 'sum_hits_per_day', 'min_hits_per_day', 'max_hits_per_day', 'var_hits_per_day',
'mean_pageviews_per_day', 'sum_pageviews_per_day', 'min_pageviews_per_day', 'max_pageviews_per_day',
'sum_pageviews_per_network_domain', 'count_pageviews_per_network_domain', 'mean_pageviews_per_network_domain',
'sum_hits_per_network_domain', 'count_hits_per_network_domain', 'mean_hits_per_network_domain',
'medium_hits_mean','medium_hits_min','medium_hits_max','medium_hits_sum']
not_used_cols = ["visitNumber", "date", "fullVisitorId", "sessionId",
"visitId", "visitStartTime", 'totals_transactionRevenue', 'trafficSource_referralPath']
cat_cols = [col for col in train_df.columns if col not in num_cols and col not in not_used_cols]
merged_df = pd.concat([train_df, test_df])
print('Cat columns : ', len(cat_cols))
ohe_cols = []
for i in cat_cols:
if len(set(merged_df[i].values)) < 100:
ohe_cols.append(i)
print('ohe_cols : ', ohe_cols)
print(len(ohe_cols))
merged_df = pd.get_dummies(merged_df, columns = ohe_cols)
train_df = merged_df[:trn_len]
test_df = merged_df[trn_len:]
del merged_df
gc.collect()
for col in cat_cols:
if col in ohe_cols:
continue
#print(col)
lbl = LabelEncoder()
lbl.fit(list(train_df[col].values.astype('str')) + list(test_df[col].values.astype('str')))
train_df[col] = lbl.transform(list(train_df[col].values.astype('str')))
test_df[col] = lbl.transform(list(test_df[col].values.astype('str')))
print('FINAL train shape : ', train_df.shape, ' test shape : ', test_df.shape)
#print(train_df.columns)
train_df = train_df.sort_values('date')
X = train_df.drop(not_used_cols, axis=1)
y = train_df['totals_transactionRevenue']
X_test = test_df.drop([col for col in not_used_cols if col in test_df.columns], axis=1)
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn import model_selection, preprocessing, metrics
import matplotlib.pyplot as plt
import seaborn as sns
lgb_params1 = {"objective" : "regression", "metric" : "rmse",
"max_depth": 8, "min_child_samples": 20,
"reg_alpha": 1, "reg_lambda": 1,
"num_leaves" : 257, "learning_rate" : 0.01,
"subsample" : 0.8, "colsample_bytree" : 0.8,
"verbosity": -1}
```
# Stratified sampling
- Before stratified samling, we need to pseudo-label for continous target.
- In this case, I categorize the continous target into 12 class using range of 2.
```
# def categorize_target(x):
# if x < 2:
# return 0
# elif x < 4:
# return 1
# elif x < 6:
# return 2
# elif x < 8:
# return 3
# elif x < 10:
# return 4
# elif x < 12:
# return 5
# elif x < 14:
# return 6
# elif x < 16:
# return 7
# elif x < 18:
# return 8
# elif x < 20:
# return 9
# elif x < 22:
# return 10
# else:
# return 11
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
```
## Target, prediction process
- 1st log1p to target
- 2nd exmp1 predictions
- 3rd sum predictions
- 4th log1p to sum
```
# y_categorized = y.apply(categorize_target)
y_log = np.log1p(y)
y_categorized= pd.cut(y_log, bins=range(0,25,3), include_lowest=True,right=False, labels=range(0,24,3)) # Thanks to Vitaly Portnoy
FOLDs = StratifiedKFold(n_splits=5, shuffle=True, random_state=7)
oof_lgb = np.zeros(len(train_df))
predictions_lgb = np.zeros(len(test_df))
features_lgb = list(X.columns)
feature_importance_df_lgb = pd.DataFrame()
for fold_, (trn_idx, val_idx) in enumerate(FOLDs.split(X, y_categorized)):
trn_data = lgb.Dataset(X.iloc[trn_idx], label=y_log.iloc[trn_idx])
val_data = lgb.Dataset(X.iloc[val_idx], label=y_log.iloc[val_idx])
print("LGB " + str(fold_) + "-" * 50)
num_round = 20000
clf = lgb.train(lgb_params1, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=1000, early_stopping_rounds = 100)
oof_lgb[val_idx] = clf.predict(X.iloc[val_idx], num_iteration=clf.best_iteration)
fold_importance_df_lgb = pd.DataFrame()
fold_importance_df_lgb["feature"] = features_lgb
fold_importance_df_lgb["importance"] = clf.feature_importance()
fold_importance_df_lgb["fold"] = fold_ + 1
feature_importance_df_lgb = pd.concat([feature_importance_df_lgb, fold_importance_df_lgb], axis=0)
predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / FOLDs.n_splits
#lgb.plot_importance(clf, max_num_features=30)
cols = feature_importance_df_lgb[["feature", "importance"]].groupby("feature").mean().sort_values(by="importance", ascending=False)[:50].index
best_features_lgb = feature_importance_df_lgb.loc[feature_importance_df_lgb.feature.isin(cols)]
plt.figure(figsize=(14,10))
sns.barplot(x="importance", y="feature", data=best_features_lgb.sort_values(by="importance", ascending=False))
plt.title('LightGBM Features (avg over folds)')
plt.tight_layout()
plt.savefig('lgbm_importances.png')
x = []
for i in oof_lgb:
if i < 0:
x.append(0.0)
else:
x.append(i)
cv_lgb = mean_squared_error(x, y_log)**0.5
cv_lgb = str(cv_lgb)
cv_lgb = cv_lgb[:10]
pd.DataFrame({'preds': x}).to_csv('lgb_oof_' + cv_lgb + '.csv', index = False)
print("CV_LGB : ", cv_lgb)
sub_df = test_df[['fullVisitorId']].copy()
predictions_lgb[predictions_lgb<0] = 0
sub_df["PredictedLogRevenue"] = np.expm1(predictions_lgb)
sub_df = sub_df.groupby("fullVisitorId")["PredictedLogRevenue"].sum().reset_index()
sub_df.columns = ["fullVisitorId", "PredictedLogRevenue"]
sub_df["PredictedLogRevenue"] = np.log1p(sub_df["PredictedLogRevenue"])
sub_df.to_csv("submission.csv", index=False)
```
* - My result is LB : 1.4627
# Conclusion
- The improvement seems to be small, but you know that the small result can change the medal winner.
- This strategy would be improved using more category, etc.
- How about using it?
| true |
code
| 0.259755 | null | null | null | null |
|
**Note that the name of the callback `AccumulateStepper` has been changed into `AccumulateScheduler`**
https://forums.fast.ai/t/accumulating-gradients/33219/90?u=hwasiti
https://github.com/fastai/fastai/blob/fbbc6f91e8e8e91ba0e3cc98ac148f6b26b9e041/fastai/train.py#L99-L134
```
import fastai
from fastai.vision import *
gpu_device = 0
defaults.device = torch.device(f'cuda:{gpu_device}')
torch.cuda.set_device(gpu_device)
def seed_everything(seed):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
seed_everything(42)
path = untar_data(URLs.PETS)
path_anno = path/'annotations'
path_img = path/'images'
fnames = get_image_files(path_img)
pat = re.compile(r'/([^/]+)_\d+.jpg$')
# Simplified RunningBatchNorm
# 07_batchnorm.ipynb (fastai course v3 part2 2019)
class RunningBatchNorm2d(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# I have added self.nf so that it can be represented when
# printing the model in the extra_repr method below
self.nf = nf
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('count', tensor(0.))
self.register_buffer('factor', tensor(0.))
self.register_buffer('offset', tensor(0.))
self.batch = 0
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2,3)
s = x .sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = s.new_tensor(x.numel()/nc)
mom1 = s.new_tensor(1 - (1-self.mom)/math.sqrt(bs-1))
self.sums .lerp_(s , mom1)
self.sqrs .lerp_(ss, mom1)
self.count.lerp_(c , mom1)
self.batch += bs
means = self.sums/self.count
varns = (self.sqrs/self.count).sub_(means*means)
if bool(self.batch < 20): varns.clamp_min_(0.01)
self.factor = self.mults / (varns+self.eps).sqrt()
self.offset = self.adds - means*self.factor
def forward(self, x):
if self.training: self.update_stats(x)
return x*self.factor + self.offset
def extra_repr(self):
return '{nf}, mom={mom}, eps={eps}'.format(**self.__dict__)
class RunningBatchNorm1d(nn.Module):
def __init__(self, nf, mom=0.1, eps=1e-5):
super().__init__()
# I have added self.nf so that it can be represented when
# printing the model in the extra_repr method below
self.nf = nf
self.mom, self.eps = mom, eps
self.mults = nn.Parameter(torch.ones (nf,1,1))
self.adds = nn.Parameter(torch.zeros(nf,1,1))
self.register_buffer('sums', torch.zeros(1,nf,1,1))
self.register_buffer('sqrs', torch.zeros(1,nf,1,1))
self.register_buffer('count', tensor(0.))
self.register_buffer('factor', tensor(0.))
self.register_buffer('offset', tensor(0.))
self.batch = 0
def update_stats(self, x):
bs,nc,*_ = x.shape
self.sums.detach_()
self.sqrs.detach_()
dims = (0,2)
s = x .sum(dims, keepdim=True)
ss = (x*x).sum(dims, keepdim=True)
c = s.new_tensor(x.numel()/nc)
mom1 = s.new_tensor(1 - (1-self.mom)/math.sqrt(bs-1))
self.sums .lerp_(s , mom1)
self.sqrs .lerp_(ss, mom1)
self.count.lerp_(c , mom1)
self.batch += bs
means = self.sums/self.count
varns = (self.sqrs/self.count).sub_(means*means)
if bool(self.batch < 20): varns.clamp_min_(0.01)
self.factor = self.mults / (varns+self.eps).sqrt()
self.offset = self.adds - means*self.factor
def forward(self, x):
if self.training: self.update_stats(x)
return x*self.factor + self.offset
def extra_repr(self):
return '{nf}, mom={mom}, eps={eps}'.format(**self.__dict__)
```
### No Grad Acc (BS 64), No running BN
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=64
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
learn.fit(1)
data.batch_size
```
### No Grad Acc (BS 2), No running BN
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
learn.fit(1)
```
### Naive Grad Acc (BS 2) x 32 steps, No running BN
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy,
callback_fns=[partial(AccumulateScheduler, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.fit(1)
```
### No Grad Acc (BS 2), Running BN
```
def bn2rbn(bn):
if isinstance(bn, nn.BatchNorm1d): rbn = RunningBatchNorm1d(bn.num_features, eps=bn.eps, mom=bn.momentum)
elif isinstance(bn, nn.BatchNorm2d): rbn = RunningBatchNorm2d(bn.num_features, eps=bn.eps, mom=bn.momentum)
rbn.weight = bn.weight
rbn.bias = bn.bias
return (rbn).to(bn.weight.device)
def convert_bn(list_mods, func=bn2rbn):
for i in range(len(list_mods)):
if isinstance(list_mods[i], bn_types):
list_mods[i] = func(list_mods[i])
elif list_mods[i].__class__.__name__ in ("Sequential", "BasicBlock"):
list_mods[i] = nn.Sequential(*convert_bn(list(list_mods[i].children()), func))
return list_mods
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
# learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.model
learn.summary()
learn.model = nn.Sequential(*convert_bn(list(learn.model.children()), bn2rbn))
learn.model
learn.summary()
%debug
learn.fit(1)
```
### GroupNorm
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy,
callback_fns=[partial(AccumulateScheduler, n_step=32)])
# learn.loss_func = CrossEntropyFlat(reduction='sum')
groups = 64
def bn2group(bn):
groupnorm = nn.GroupNorm(groups, bn.num_features, affine=True)
groupnorm.weight = bn.weight
groupnorm.bias = bn.bias
groupnorm.eps = bn.eps
return (groupnorm).to(bn.weight.device)
def convert_bn(list_mods, func=bn2group):
for i in range(len(list_mods)):
if isinstance(list_mods[i], bn_types):
list_mods[i] = func(list_mods[i])
elif list_mods[i].__class__.__name__ in ("Sequential", "BasicBlock"):
list_mods[i] = nn.Sequential(*convert_bn(list(list_mods[i].children()), func))
return list_mods
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.vgg16_bn, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
learn.model = nn.Sequential(*convert_bn(list(learn.model.children()), bn2group))
learn.freeze()
learn.fit(1)
```
### Resnet + GroupNorm
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy,
callback_fns=[partial(AccumulateStepper, n_step=32)])
learn.loss_func = CrossEntropyFlat(reduction='sum')
def change_all_BN(module):
for i in range(5):
atr = 'bn'+str(i)
if hasattr(module, atr):
setattr(module, atr, bn2group(getattr(module,atr)))
def wrap_BN(model):
for i in range(len(model)):
for j in range(len(model[i])):
if isinstance(model[i][j], bn_types):
model[i][j] = bn2group(model[i][j])
elif model[i][j].__class__.__name__ == "Sequential":
for k in range(len(model[i][j])):
if isinstance(model[i][j][k], bn_types):
model[i][j][k] = bn2group(model[i][j][k])
elif model[i][j][k].__class__.__name__ == "BasicBlock":
change_all_BN(model[i][j][k])
if hasattr(model[i][j][k],'downsample'):
if model[i][j][k].downsample is not None:
for l in range(len(model[i][j][k].downsample)):
if isinstance(model[i][j][k].downsample[l], bn_types):
model[i][j][k].downsample[l] = bn2group(model[i][j][k].downsample[l])
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
```
### Resnet + GroupNorm (No Acc)
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=2
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
```
### Resnet + GroupNorm (No Acc) bs = 1
```
seed_everything(2)
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=1
).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
wrap_BN(learn.model)
learn.freeze()
learn.fit(1)
```
| true |
code
| 0.79858 | null | null | null | null |
|
# Automated Gradual Pruning Schedule
Michael Zhu and Suyog Gupta, ["To prune, or not to prune: exploring the efficacy of pruning for model compression"](https://arxiv.org/pdf/1710.01878), 2017 NIPS Workshop on Machine Learning of Phones and other Consumer Devices<br>
<br>
After completing sensitivity analysis, decide on your pruning schedule.
## Table of Contents
1. [Implementation of the gradual sparsity function](#Implementation-of-the-gradual-sparsity-function)
2. [Visualize pruning schedule](#Visualize-pruning-schedule)
3. [References](#References)
```
import numpy
import matplotlib.pyplot as plt
from functools import partial
import torch
from torch.autograd import Variable
from ipywidgets import widgets, interact
```
## Implementation of the gradual sparsity function
The function ```sparsity_target``` implements the gradual sparsity schedule from [[1]](#zhu-gupta):<br><br>
<b><i>"We introduce a new automated gradual pruning algorithm in which the sparsity is increased from an initial sparsity value $s_i$ (usually 0) to a final sparsity value $s_f$ over a span of $n$ pruning steps, starting at training step $t_0$ and with pruning frequency $\Delta t$."</i></b><br>
<br>
<div id="eq:zhu_gupta_schedule"></div>
<center>
$\large
\begin{align}
s_t = s_f + (s_i - s_f) \left(1- \frac{t-t_0}{n\Delta t}\right)^3
\end{align}
\ \ for
\large \ \ t \in \{t_0, t_0+\Delta t, ..., t_0+n\Delta t\}
$
</center>
<br>
Pruning happens once at the beginning of each epoch, until the duration of the pruning (the number of epochs to prune) is exceeded. After pruning ends, the training continues without pruning, but the pruned weights are kept at zero.
```
def sparsity_target(starting_epoch, ending_epoch, initial_sparsity, final_sparsity, current_epoch):
if final_sparsity < initial_sparsity:
return current_epoch
if current_epoch < starting_epoch:
return current_epoch
span = ending_epoch - starting_epoch
target_sparsity = ( final_sparsity +
(initial_sparsity - final_sparsity) *
(1.0 - ((current_epoch-starting_epoch)/span))**3)
return target_sparsity
```
## Visualize pruning schedule
When using the Automated Gradual Pruning (AGP) schedule, you may want to visualize how the pruning schedule will look as a function of the epoch number. This is called the *sparsity function*. The widget below will help you do this.<br>
There are three knobs you can use to change the schedule:
- ```duration```: this is the number of epochs over which to use the AGP schedule ($n\Delta t$).
- ```initial_sparsity```: $s_i$
- ```final_sparsity```: $s_f$
- ```frequency```: this is the pruning frequency ($\Delta t$).
```
def draw_pruning(duration, initial_sparsity, final_sparsity, frequency):
epochs = []
sparsity_levels = []
# The derivative of the sparsity (i.e. sparsity rate of change)
d_sparsity = []
if frequency=='':
frequency = 1
else:
frequency = int(frequency)
for epoch in range(0,40):
epochs.append(epoch)
current_epoch=Variable(torch.FloatTensor([epoch]), requires_grad=True)
if epoch<duration and epoch%frequency == 0:
sparsity = sparsity_target(
starting_epoch=0,
ending_epoch=duration,
initial_sparsity=initial_sparsity,
final_sparsity=final_sparsity,
current_epoch=current_epoch
)
sparsity_levels.append(sparsity)
sparsity.backward()
d_sparsity.append(current_epoch.grad.item())
current_epoch.grad.data.zero_()
else:
sparsity_levels.append(sparsity)
d_sparsity.append(0)
plt.plot(epochs, sparsity_levels, epochs, d_sparsity)
plt.ylabel('sparsity (%)')
plt.xlabel('epoch')
plt.title('Pruning Rate')
plt.ylim(0, 100)
plt.draw()
duration_widget = widgets.IntSlider(min=0, max=100, step=1, value=28)
si_widget = widgets.IntSlider(min=0, max=100, step=1, value=0)
interact(draw_pruning,
duration=duration_widget,
initial_sparsity=si_widget,
final_sparsity=(0,100,1),
frequency='2');
```
<div id="toc"></div>
## References
1. <div id="zhu-gupta"></div> **Michael Zhu and Suyog Gupta**.
[*To prune, or not to prune: exploring the efficacy of pruning for model compression*](https://arxiv.org/pdf/1710.01878),
NIPS Workshop on Machine Learning of Phones and other Consumer Devices,
2017.
| true |
code
| 0.559952 | null | null | null | null |
|
# Coursework 2: Neural Networks
This coursework covers the topics covered in class regarding neural networks for image classification.
This coursework includes both coding questions as well as written ones. Please upload the notebook, which contains your code, results and answers as a pdf file onto Cate.
Dependencies: If you work on a college computer in the Computing Lab, where Ubuntu 18.04 is installed by default, you can use the following virtual environment for your work, where relevant Python packages are already installed.
`source /vol/bitbucket/wbai/virt/computer_vision_ubuntu18.04/bin/activate`
Alternatively, you can use pip, pip3 or anaconda etc to install Python packages.
**Note 1:** please read the both the text and code comment in this notebook to get an idea what you are supposed to implement.
**Note 2:** If you are using the virtual environment in the Computing Lab, please run the following command in the command line before opening jupyter-notebook and importing tensorflow. This will tell tensorflow where the Nvidia CUDA libariries are.
`export LD_LIBRARY_PATH=/vol/cuda/9.0.176/lib64/:"${LD_LIBRARY_PATH}}"`
```
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout
```
## Question 1 (20 points)
Throughout this coursework you will be working with the Fashion-MNIST dataset. If you are interested, you may find relevant information regarding the dataset in this paper.
[1] Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms. Han Xiao, Kashif Rasul, Roland Vollgraf. [arXiv:1708.07747](https://arxiv.org/abs/1708.07747)
Be sure that you have the following files in your working directory: data.tar.gz and reader.py. Loading the data can be done as follows:
`from reader import get_images
(x_train, y_train), (x_test, y_test) = get_images()`
The dataset is already split into a set of 60,000 training images and a set of 10,000 test images. The images are of size 28x28 pixels and stored as 784-D vector. So if you would like to visualise the images, you need to reshape the array.
There are in total 10 label classes, which are:
* 0: T-shirt/top
* 1: Trousers
* 2: Pullover
* 3: Dress
* 4: Coat
* 5: Sandal
* 6: Shirt
* 7: Sneaker
* 8: Bag
* 9: Ankle boot
### 1.1 Load data (6 points)
Load the dataset and print the dimensions of the training set and the test set.
```
from reader import get_images
(x_train, y_train), (x_test, y_test) = get_images()
print('dimensions of the training set:',x_train.shape,y_train.shape)
print('dimensions of the test set:',x_test.shape,y_test.shape)
```
### 1.2 Visualize data (6 points)
Visualise 3 training images (T-shirt, trousers and pullover) and 3 test images (dress, coat and sandal).
```
num=0
class_name=['T-shirt','Trousers','Pullover','Dress','Coat','Sandal','Shirt','Sneaker','Bag','Ankle B']
image=[np.reshape(x_train[1],(28,28))]*6
for index in range(np.random.randint(9000),10000):
if num<3 and y_train[index]==num:
image[num]=np.reshape(x_train[index],(28,28))
num+=1
if num>=3 and y_test[index]==num:
image[num]=np.reshape(x_test[index],(28,28))
num+=1
if num==6: break
plt.figure
for i in range(6):
plt.subplot(2,3,i+1)
plt.imshow(image[i],cmap='gray')
plt.title(class_name[i])
```
### 1.3 Data balance (4 points)
Print out the number of training samples for each class.
```
dict = {}
for class_ in y_train:
dict[class_] = dict.get(class_, 0) + 1
dictlist=sorted(dict.items(), key = lambda x: x[0], reverse=False)
for i in range(10):
print('Sample Number of No.',dictlist[i][0],' ',class_name[i],'=',dictlist[i][1],sep='')
```
### 1.4 Discussion (4 points)
Is the dataset balanced? What would happen if the dataset is not balanced in the context of image classification?
Well, we can know from the output above that the number of training samples for each class is equal, which is 6000. Traditional classification algorithm, which pay more attention on overall classification accuracy, focus too much on the majority of the class. In result, performance degradation of minority classification can not be inevitable.
## Question 2 (40 points)
Build a neural network and train it with the Fashion-MNIST dataset. Here, we use the keras library, which is a high-level neural network library built upon tensorflow.
```
# Convert the label class into a one-hot representation
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# normalization from 0-255 to 0-1
x_train=x_train.astype('float32')/255
x_test=x_test.astype('float32')/255
```
### 2.1 Build a multi-layer perceptron, also known as multi-layer fully connected network. You need to define the layers, the loss function, the optimiser and evaluation metric. (30 points)
```
model = keras.models.Sequential()
# as input layer in a sequential model:
model.add(Dense(512,activation='relu',input_shape=(784,)))
model.add(Dropout(0.25))
#as hidden layer in the model
model.add(Dense(144,activation='relu'))
model.add(Dropout(0.20))
#as output layer in model
model.add(Dense(num_classes,activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=["accuracy"])
print(model.summary())
```
### 2.2 Define the optimisation parameters including the batch size and the number of epochs and then run the optimiser. (10 points)
We have tested that for an appropriate network architecture, on a personal laptop and with only CPU, it takes about a few seconds per epoch to train the network. For 100 epochs, it takes about a coffee break's time to finish the training. If you run it on a powerful GPU, it would be even much faster.
```
batch_size = 32
epochs = 20
model.fit(x_train, y_train,epochs=epochs,batch_size=batch_size)
```
## Question 3 (20 points)
Evaluate the performance of your network with the test data.
Visualize the performance using appropriate metrics and graphs (eg. confusion matrix).
Comment on your per class performance and how it could be better.
```
# This function is provided for you to display the confusion matrix.
# For more information about the confusion matrix, you can read at
# https://en.wikipedia.org/wiki/Confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
cm: confusion matrix, default to be np.int32 data type
classes: a list of the class labels or class names
normalize: normalize the matrix so that each row amounts to one
cmap: color map
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
```
### 3.1 Evaluate the classification accuracy on the test set (10 points)
```
score = model.evaluate(x_test, y_test)
print('Test Loss','%.4f' %score[0])
print('Test Accuracy',score[1])
```
### 3.2 Calculate and plot the confusion matrix (10 points)
```
from sklearn.metrics import confusion_matrix
y_pred = model.predict(x_test)
y_pred = np.argmax(y_pred, axis=1)
y_test = np.argmax(y_test, axis=1)
# confustion matrix
cm=confusion_matrix(y_test, y_pred)
plot_confusion_matrix(cm,class_name)
```
## Question 4 (20 points)
Take two photos, one of your clothes or shoes that belongs to one of 10 classes, the other that does not belong to any class.
Use either Python or other software (Photoshop, Gimp, or any image editer) to convert the photos into grayscale, crop the region of interest and reshape into the size of 28x28.
### 4.1 Load and visualise your own images (6 points)
```
import matplotlib.image
def rgb2gray(rgb):
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
image_name=["Queen's_Tower","T-shirt"]
image_reshape=[]
for i in range(len(image_name)):
img_colour = matplotlib.image.imread(image_name[i]+'.png')
img_grey = rgb2gray(img_colour)
plt.subplot(1,2,i+1)
plt.imshow(img_grey,cmap='gray')
plt.title(image_name[i])
image_reshape.append(np.reshape(img_grey,(1,784)))
```
### 4.2 Test your network on the two images and show the classification results (10 points)
```
for i in range(len(image_reshape)):
pred=model.predict(image_reshape[i])
# print(pred)
class_index=pred.argmax(axis=1)[0]
print('Prediction of',image_name[i]+':',class_name[class_index])
```
### 4.3 Discuss the classification results and provide one method to improve real life performance of the network (4 points)
Well, this classification algorithm identified T-shirt sucsessfully but class the Qieen's Tower as a bag, which is wrong withou suspense. According to the result of this test, we can say that
* This algorithm is good enough to class thoese 10 classes related to clothing and wearing.
* Cannot identify other unlabel classes under structure of muilt-layer connected network and the limited traning data.
There something we can do to bring it into reallife application
1. A large number of sample data and various label classes are needed to adapt to the reallife. Affine transformation can be used to increase the number of data.
2. Combined with neural networks to construct a more complex model is a good way to deal with more data with less parameters.
3. Adding regularisation term is Another method to improve the accuracy of classification
## 5. Survey
How long did the coursework take you to solve?
The whole afternoon of lovely Thursday
| true |
code
| 0.481332 | null | null | null | null |
|
## 1. Where are the old left-handed people?
<p><img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_479/img/Obama_signs_health_care-20100323.jpg" alt="Barack Obama signs the Patient Protection and Affordable Care Act at the White House, March 23, 2010"></p>
<p>Barack Obama is left-handed. So are Bill Gates and Oprah Winfrey; so were Babe Ruth and Marie Curie. A <a href="https://www.nejm.org/doi/full/10.1056/NEJM199104043241418">1991 study</a> reported that left-handed people die on average nine years earlier than right-handed people. Nine years! Could this really be true? </p>
<p>In this notebook, we will explore this phenomenon using age distribution data to see if we can reproduce a difference in average age at death purely from the changing rates of left-handedness over time, refuting the claim of early death for left-handers. This notebook uses <code>pandas</code> and Bayesian statistics to analyze the probability of being a certain age at death given that you are reported as left-handed or right-handed.</p>
<p>A National Geographic survey in 1986 resulted in over a million responses that included age, sex, and hand preference for throwing and writing. Researchers Avery Gilbert and Charles Wysocki analyzed this data and noticed that rates of left-handedness were around 13% for people younger than 40 but decreased with age to about 5% by the age of 80. They concluded based on analysis of a subgroup of people who throw left-handed but write right-handed that this age-dependence was primarily due to changing social acceptability of left-handedness. This means that the rates aren't a factor of <em>age</em> specifically but rather of the <em>year you were born</em>, and if the same study was done today, we should expect a shifted version of the same distribution as a function of age. Ultimately, we'll see what effect this changing rate has on the apparent mean age of death of left-handed people, but let's start by plotting the rates of left-handedness as a function of age.</p>
<p>This notebook uses two datasets: <a href="https://www.cdc.gov/nchs/data/statab/vs00199_table310.pdf">death distribution data</a> for the United States from the year 1999 (source website <a href="https://www.cdc.gov/nchs/nvss/mortality_tables.htm">here</a>) and rates of left-handedness digitized from a figure in this <a href="https://www.ncbi.nlm.nih.gov/pubmed/1528408">1992 paper by Gilbert and Wysocki</a>. </p>
```
# import libraries
# ... YOUR CODE FOR TASK 1 ...
import pandas as pd
import matplotlib.pyplot as plt
# load the data
data_url_1 = "https://gist.githubusercontent.com/mbonsma/8da0990b71ba9a09f7de395574e54df1/raw/aec88b30af87fad8d45da7e774223f91dad09e88/lh_data.csv"
lefthanded_data = pd.read_csv(data_url_1)
# plot male and female left-handedness rates vs. age
%matplotlib inline
fig, ax = plt.subplots() # create figure and axis objects
ax.plot("Age", "Female", data=lefthanded_data, marker = 'o') # plot "Female" vs. "Age"
ax.plot("Age", "Male", data=lefthanded_data, marker = 'x') # plot "Male" vs. "Age"
ax.legend() # add a legend
ax.set_xlabel("Sex")
ax.set_ylabel("Age")
```
## 2. Rates of left-handedness over time
<p>Let's convert this data into a plot of the rates of left-handedness as a function of the year of birth, and average over male and female to get a single rate for both sexes. </p>
<p>Since the study was done in 1986, the data after this conversion will be the percentage of people alive in 1986 who are left-handed as a function of the year they were born. </p>
```
# create a new column for birth year of each age
# ... YOUR CODE FOR TASK 2 ...
lefthanded_data["Birth_year"] = 1986 - lefthanded_data["Age"]
# create a new column for the average of male and female
# ... YOUR CODE FOR TASK 2 ...
lefthanded_data["Mean_lh"] = lefthanded_data[["Female","Male"]].mean(axis=1)
# create a plot of the 'Mean_lh' column vs. 'Birth_year'
fig, ax = plt.subplots()
ax.plot("Birth_year", "Mean_lh", data=lefthanded_data) # plot 'Mean_lh' vs. 'Birth_year'
ax.set_xlabel("Mean_lh") # set the x label for the plot
ax.set_ylabel("Birth_year") # set the y label for the plot
```
## 3. Applying Bayes' rule
<p><strong>Bayes' rule</strong> or <strong>Bayes' theorem</strong> is a statement about conditional probability which allows us to update our beliefs after seeing evidence. The probability of outcome or event A, given that outcome or event B has happened (or is true) is not the same as the probability of outcome B given that outcome A has happened. We need to take into account the <strong>prior</strong> probability that A has happened (the probability that A has happened is written P(A)). Bayes' rule can be written as follows:</p>
<p>$$P(A | B) = \frac{P(B|A) P(A)}{P(B)}$$</p>
<p>The quantity we ultimately want to calculate is the probability of dying at a particular age A, <em>given that</em> your family thinks you are left-handed. Let's write this in shorthand as P(A | LH). We also want the same quantity for right-handers: P(A | RH). As we go, we will figure out or approximate the other three quantities to find out what difference in age of death we might expect purely from the changing rates of left-handedness plotted above.</p>
<p>Here's Bayes' rule in the context of our discussion:</p>
<p>$$P(A | LH) = \frac{P(LH|A) P(A)}{P(LH)}$$</p>
<p>P(LH | A) is the probability that you are left-handed <em>given that</em> you died at age A. P(A) is the overall probability of dying at age A, and P(LH) is the overall probability of being left-handed. We will now calculate each of these three quantities, beginning with P(LH | A).</p>
<p>To calculate P(LH | A) for ages that might fall outside the original data, we will need to extrapolate the data to earlier and later years. Since the rates flatten out in the early 1900s and late 1900s, we'll use a few points at each end and take the mean to extrapolate the rates on each end. The number of points used for this is arbitrary, but we'll pick 10 since the data looks flat-ish until about 1910. </p>
```
# import library
# ... YOUR CODE FOR TASK 3 ...
import numpy as np
# create a function for P(LH | A)
def P_lh_given_A(ages_of_death, study_year = 1990):
""" P(Left-handed | ages of death), calculated based on the reported rates of left-handedness.
Inputs: numpy array of ages of death, study_year
Returns: probability of left-handedness given that subjects died in `study_year` at ages `ages_of_death` """
# Use the mean of the 10 last and 10 first points for left-handedness rates before and after the start
early_1900s_rate = lefthanded_data["Mean_lh"][-10:].mean()
late_1900s_rate = lefthanded_data["Mean_lh"][:10].mean()
middle_rates = lefthanded_data.loc[lefthanded_data['Birth_year'].isin(study_year - ages_of_death)]['Mean_lh']
youngest_age = study_year - 1986 + 10 # the youngest age is 10
oldest_age = study_year - 1986 + 86 # the oldest age is 86
P_return = np.zeros(ages_of_death.shape) # create an empty array to store the results
# extract rate of left-handedness for people of ages 'ages_of_death'
P_return[ages_of_death > oldest_age] = early_1900s_rate/100
P_return[ages_of_death < youngest_age] = late_1900s_rate/100
P_return[np.logical_and((ages_of_death <= oldest_age), (ages_of_death >= youngest_age))] = middle_rates/100
return P_return
```
## 4. When do people normally die?
<p>To estimate the probability of living to an age A, we can use data that gives the number of people who died in a given year and how old they were to create a distribution of ages of death. If we normalize the numbers to the total number of people who died, we can think of this data as a probability distribution that gives the probability of dying at age A. The data we'll use for this is from the entire US for the year 1999 - the closest I could find for the time range we're interested in. </p>
<p>In this block, we'll load in the death distribution data and plot it. The first column is the age, and the other columns are the number of people who died at that age. </p>
```
# Death distribution data for the United States in 1999
data_url_2 = "https://gist.githubusercontent.com/mbonsma/2f4076aab6820ca1807f4e29f75f18ec/raw/62f3ec07514c7e31f5979beeca86f19991540796/cdc_vs00199_table310.tsv"
# load death distribution data
# ... YOUR CODE FOR TASK 4 ...
death_distribution_data = pd.read_csv(data_url_2, sep = "\t", skiprows=[1])
# drop NaN values from the `Both Sexes` column
# ... YOUR CODE FOR TASK 4 ...
death_distribution_data = death_distribution_data.dropna(subset = ["Both Sexes"])
# plot number of people who died as a function of age
fig, ax = plt.subplots()
ax.plot("Age", "Both Sexes", data = death_distribution_data, marker='o') # plot 'Both Sexes' vs. 'Age'
ax.set_xlabel("Both Sexes")
ax.set_ylabel("Age")
```
## 5. The overall probability of left-handedness
<p>In the previous code block we loaded data to give us P(A), and now we need P(LH). P(LH) is the probability that a person who died in our particular study year is left-handed, assuming we know nothing else about them. This is the average left-handedness in the population of deceased people, and we can calculate it by summing up all of the left-handedness probabilities for each age, weighted with the number of deceased people at each age, then divided by the total number of deceased people to get a probability. In equation form, this is what we're calculating, where N(A) is the number of people who died at age A (given by the dataframe <code>death_distribution_data</code>):</p>
<p><img src="https://i.imgur.com/gBIWykY.png" alt="equation" width="220"></p>
<!--- $$P(LH) = \frac{\sum_{\text{A}} P(LH | A) N(A)}{\sum_{\text{A}} N(A)}$$ -->
```
def P_lh(death_distribution_data, study_year = 1990): # sum over P_lh for each age group
""" Overall probability of being left-handed if you died in the study year
Input: dataframe of death distribution data, study year
Output: P(LH), a single floating point number """
p_list = death_distribution_data["Both Sexes"]*P_lh_given_A(death_distribution_data["Age"], study_year) # multiply number of dead people by P_lh_given_A
p = np.sum(p_list) # calculate the sum of p_list
return p/np.sum(death_distribution_data["Both Sexes"]) # normalize to total number of people (sum of death_distribution_data['Both Sexes'])
print(P_lh(death_distribution_data, 1990))
```
## 6. Putting it all together: dying while left-handed (i)
<p>Now we have the means of calculating all three quantities we need: P(A), P(LH), and P(LH | A). We can combine all three using Bayes' rule to get P(A | LH), the probability of being age A at death (in the study year) given that you're left-handed. To make this answer meaningful, though, we also want to compare it to P(A | RH), the probability of being age A at death given that you're right-handed. </p>
<p>We're calculating the following quantity twice, once for left-handers and once for right-handers.</p>
<p>$$P(A | LH) = \frac{P(LH|A) P(A)}{P(LH)}$$</p>
<p>First, for left-handers.</p>
<!--Notice that I was careful not to call these "probability of dying at age A", since that's not actually what we're calculating: we use the exact same death distribution data for each. -->
```
def P_A_given_lh(ages_of_death, death_distribution_data, study_year = 1990):
""" The overall probability of being a particular `age_of_death` given that you're left-handed """
P_A = death_distribution_data["Both Sexes"][ages_of_death]/np.sum(death_distribution_data["Both Sexes"])
P_left = P_lh(death_distribution_data, study_year) # use P_lh function to get probability of left-handedness overall
P_lh_A = P_lh_given_A(ages_of_death, study_year) # use P_lh_given_A to get probability of left-handedness for a certain age
return P_lh_A*P_A/P_left
```
## 7. Putting it all together: dying while left-handed (ii)
<p>And now for right-handers.</p>
```
def P_A_given_rh(ages_of_death, death_distribution_data, study_year = 1990):
""" The overall probability of being a particular `age_of_death` given that you're right-handed """
P_A = death_distribution_data["Both Sexes"][ages_of_death]/np.sum(death_distribution_data["Both Sexes"])
P_right = 1 - P_lh(death_distribution_data, study_year)# either you're left-handed or right-handed, so P_right = 1 - P_left
P_rh_A = 1 - P_lh_given_A(ages_of_death, study_year) # P_rh_A = 1 - P_lh_A
return P_rh_A*P_A/P_right
```
## 8. Plotting the distributions of conditional probabilities
<p>Now that we have functions to calculate the probability of being age A at death given that you're left-handed or right-handed, let's plot these probabilities for a range of ages of death from 6 to 120. </p>
<p>Notice that the left-handed distribution has a bump below age 70: of the pool of deceased people, left-handed people are more likely to be younger. </p>
```
ages = np.arange(6, 120) # make a list of ages of death to plot
# calculate the probability of being left- or right-handed for each
left_handed_probability = P_A_given_lh(ages, death_distribution_data)
right_handed_probability = P_A_given_rh(ages, death_distribution_data)
# create a plot of the two probabilities vs. age
fig, ax = plt.subplots() # create figure and axis objects
ax.plot(ages, left_handed_probability, label = "Left-handed")
ax.plot(ages, right_handed_probability, label = "Right-handed")
ax.legend() # add a legend
ax.set_xlabel("Age at death")
ax.set_ylabel(r"Probability of being age A at death")
```
## 9. Moment of truth: age of left and right-handers at death
<p>Finally, let's compare our results with the original study that found that left-handed people were nine years younger at death on average. We can do this by calculating the mean of these probability distributions in the same way we calculated P(LH) earlier, weighting the probability distribution by age and summing over the result.</p>
<p>$$\text{Average age of left-handed people at death} = \sum_A A P(A | LH)$$</p>
<p>$$\text{Average age of right-handed people at death} = \sum_A A P(A | RH)$$</p>
```
# calculate average ages for left-handed and right-handed groups
# use np.array so that two arrays can be multiplied
average_lh_age = np.nansum(ages*np.array(left_handed_probability))
average_rh_age = np.nansum(ages*np.array(right_handed_probability))
# print the average ages for each group
# ... YOUR CODE FOR TASK 9 ...
print("Average age of lefthanded" + str(average_lh_age))
print("Average age of righthanded" + str(average_rh_age))
# print the difference between the average ages
print("The difference in average ages is " + str(round(average_lh_age - average_rh_age, 1)) + " years.")
```
## 10. Final comments
<p>We got a pretty big age gap between left-handed and right-handed people purely as a result of the changing rates of left-handedness in the population, which is good news for left-handers: you probably won't die young because of your sinisterness. The reported rates of left-handedness have increased from just 3% in the early 1900s to about 11% today, which means that older people are much more likely to be reported as right-handed than left-handed, and so looking at a sample of recently deceased people will have more old right-handers.</p>
<p>Our number is still less than the 9-year gap measured in the study. It's possible that some of the approximations we made are the cause: </p>
<ol>
<li>We used death distribution data from almost ten years after the study (1999 instead of 1991), and we used death data from the entire United States instead of California alone (which was the original study). </li>
<li>We extrapolated the left-handedness survey results to older and younger age groups, but it's possible our extrapolation wasn't close enough to the true rates for those ages. </li>
</ol>
<p>One thing we could do next is figure out how much variability we would expect to encounter in the age difference purely because of random sampling: if you take a smaller sample of recently deceased people and assign handedness with the probabilities of the survey, what does that distribution look like? How often would we encounter an age gap of nine years using the same data and assumptions? We won't do that here, but it's possible with this data and the tools of random sampling. </p>
<!-- I did do this if we want to add more tasks - it would probably take three more blocks.-->
<p>To finish off, let's calculate the age gap we'd expect if we did the study in 2018 instead of in 1990. The gap turns out to be much smaller since rates of left-handedness haven't increased for people born after about 1960. Both the National Geographic study and the 1990 study happened at a unique time - the rates of left-handedness had been changing across the lifetimes of most people alive, and the difference in handedness between old and young was at its most striking. </p>
```
# Calculate the probability of being left- or right-handed for all ages
left_handed_probability_2018 = P_A_given_lh(ages, death_distribution_data, 2018)
right_handed_probability_2018 = P_A_given_rh(ages, death_distribution_data, 2018)
# calculate average ages for left-handed and right-handed groups
average_lh_age_2018 = np.nansum(ages*np.array(left_handed_probability_2018))
average_rh_age_2018 = np.nansum(ages*np.array(right_handed_probability_2018))
# print the average ages for each group
print("Average age of lefthanded" + str(average_lh_age_2018))
print("Average age of righthanded" + str(average_rh_age_2018))
print("The difference in average ages is " +
str(round(average_lh_age_2018 - average_rh_age_2018, 1)) + " years.")
```
| true |
code
| 0.77179 | null | null | null | null |
|
# Address Segmentation
Conversion of address points into segmented address ranges along a road network.
**Notes:** The following guide assumes data has already been preprocessed including data scrubbing and filtering.
```
import contextily as ctx
import geopandas as gpd
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import shapely
from bisect import bisect
from collections import OrderedDict
from IPython.display import display_html
from matplotlib_scalebar.scalebar import ScaleBar
from operator import itemgetter
from shapely.geometry import LineString, Point
# Define index of example roadseg segment.
ex_idx = 264
ex_place = "City of Yellowknife"
# Define join fields.
join_roadseg = "roadname"
join_addresses = "street"
# Define helper functions.
def groupby_to_list(df, group_field, list_field):
"""
Helper function: faster alternative to pandas groupby.apply/agg(list).
Groups records by one or more fields and compiles an output field into a list for each group.
"""
if isinstance(group_field, list):
for field in group_field:
if df[field].dtype.name != "geometry":
df[field] = df[field].astype("U")
transpose = df.sort_values(group_field)[[*group_field, list_field]].values.T
keys, vals = np.column_stack(transpose[:-1]), transpose[-1]
keys_unique, keys_indexes = np.unique(keys.astype("U") if isinstance(keys, np.object) else keys,
axis=0, return_index=True)
else:
keys, vals = df.sort_values(group_field)[[group_field, list_field]].values.T
keys_unique, keys_indexes = np.unique(keys, return_index=True)
vals_arrays = np.split(vals, keys_indexes[1:])
return pd.Series([list(vals_array) for vals_array in vals_arrays], index=keys_unique).copy(deep=True)
```
## Step 1. Load dataframes and configure attributes
Loads dataframes into geopandas and separates address numbers and suffixes, if required.
```
# Load dataframes.
addresses = gpd.read_file("C:/scratch/City_Of_Yellowknife.gpkg", layer="addresses")
roadseg = gpd.read_file("C:/scratch/City_Of_Yellowknife.gpkg", layer="roads")
# Configure attributes - number and suffix.
addresses["suffix"] = addresses["number"].map(lambda val: re.sub(pattern="\\d+", repl="", string=val, flags=re.I))
addresses["number"] = addresses["number"].map(lambda val: re.sub(pattern="[^\\d]", repl="", string=val, flags=re.I)).map(int)
addresses.head()
roadseg.head()
```
## Preview data
**Note:** this code block is for visual purposes only.
```
# Fetch basemaps.
# Note: basemaps are retrieved in EPSG:3857 and, therefore, dataframes should also use this crs.
basemaps = list()
basemaps.append(ctx.bounds2img(*roadseg.total_bounds, ll=False, source=ctx.providers.Esri.WorldImagery))
basemaps.append(ctx.bounds2img(*roadseg.loc[roadseg.index==ex_idx].total_bounds, ll=False,
source=ctx.providers.Esri.WorldImagery))
# Configure local positional distortion (for scalebar dx parameter).
ymin, ymax = itemgetter(1, 3)(roadseg[roadseg.index==ex_idx].to_crs("EPSG:4617").total_bounds)
lat = ymin + ((ymax - ymin) / 2)
dx = math.cos(math.radians(lat))
# Create data for viewing.
starting_pt = gpd.GeoDataFrame(geometry=[Point(roadseg.loc[roadseg.index==ex_idx]["geometry"].iloc[0].coords[0])],
crs=addresses.crs)
# Configure plots.
fig, ax = plt.subplots(1, 2, figsize=(12, 7), tight_layout=True)
for plt_idx, title in enumerate(["All Data", f"roadseg={ex_idx}"]):
ax[plt_idx].imshow(basemaps[plt_idx][0], extent=basemaps[plt_idx][1])
if plt_idx == 0:
addresses.plot(ax=ax[plt_idx], color="red", label="addresses", markersize=2)
roadseg.plot(ax=ax[plt_idx], color="cyan", label="roadseg", linewidth=1)
else:
addresses.plot(ax=ax[plt_idx], color="red", label="addresses", linewidth=2)
starting_pt.plot(ax=ax[plt_idx], color="gold", label=f"roadseg={ex_idx}, 1st point", linewidth=2)
roadseg.loc[roadseg.index==ex_idx].plot(ax=ax[plt_idx], color="yellow", label=f"roadseg={ex_idx}", linewidth=2)
roadseg.loc[roadseg.index!=ex_idx].plot(ax=ax[plt_idx], color="cyan", label="roadseg", linewidth=1)
ax[plt_idx].add_artist(ScaleBar(dx=dx, units="m", location="lower left", pad=0.5, color="black"))
ax[plt_idx].axes.xaxis.set_visible(False)
ax[plt_idx].axes.yaxis.set_visible(False)
ax[plt_idx].set_title(title, fontsize=12)
ax[plt_idx].set_xlim(itemgetter(0, 1)(basemaps[plt_idx][1]))
ax[plt_idx].set_ylim(itemgetter(2, 3)(basemaps[plt_idx][1]))
plt.suptitle(ex_place, fontsize=12)
plt.legend(loc="center left", bbox_to_anchor=(1.0, 0.5), fontsize=12)
plt.show()
```
## Step 2. Configure address to roadseg linkages
Links addresses to the nearest, matching road segment.
```
# Link addresses and roadseg on join fields.
addresses["addresses_index"] = addresses.index
roadseg["roadseg_index"] = roadseg.index
merge = addresses.merge(roadseg[[join_roadseg, "roadseg_index"]], how="left", left_on=join_addresses, right_on=join_roadseg)
addresses["roadseg_index"] = groupby_to_list(merge, "addresses_index", "roadseg_index")
addresses.drop(columns=["addresses_index"], inplace=True)
roadseg.drop(columns=["roadseg_index"], inplace=True)
# Discard non-linked addresses.
addresses.drop(addresses[addresses["roadseg_index"].map(itemgetter(0)).isna()].index, axis=0, inplace=True)
# Convert linkages to integer tuples, if possible.
def as_int(val):
try:
return int(val)
except ValueError:
return val
addresses["roadseg_index"] = addresses["roadseg_index"].map(lambda vals: tuple(set(map(as_int, vals))))
addresses.head()
# Reduce linkages to one roadseg index per address.
# Configure roadseg geometry lookup dictionary.
roadseg_geom_lookup = roadseg["geometry"].to_dict()
def get_nearest_linkage(pt, roadseg_indexes):
"""Returns the roadseg index associated with the nearest roadseg geometry to the given address point."""
# Get roadseg geometries.
roadseg_geometries = itemgetter(*roadseg_indexes)(roadseg_geom_lookup)
# Get roadseg distances from address point.
roadseg_distances = tuple(map(lambda road: pt.distance(road), roadseg_geometries))
# Get the roadseg index associated with the smallest distance.
roadseg_index = roadseg_indexes[roadseg_distances.index(min(roadseg_distances))]
return roadseg_index
# Flag plural linkages.
flag_plural = addresses["roadseg_index"].map(len) > 1
# Reduce plural linkages to the road segment with the lowest (nearest) geometric distance.
addresses.loc[flag_plural, "roadseg_index"] = addresses[flag_plural][["geometry", "roadseg_index"]].apply(
lambda row: get_nearest_linkage(*row), axis=1)
# Unpack first tuple element for singular linkages.
addresses.loc[~flag_plural, "roadseg_index"] = addresses[~flag_plural]["roadseg_index"].map(itemgetter(0))
# Compile linked roadseg geometry for each address.
addresses["roadseg_geometry"] = addresses.merge(
roadseg["geometry"], how="left", left_on="roadseg_index", right_index=True)["geometry_y"]
addresses.head()
```
## Step 3. Configure address parity
Computes address-roadseg parity (left / right side).
```
def get_parity(pt, vector):
"""
Determines the parity (left or right side) of an address point relative to a roadseg vector.
Parity is derived from the determinant of the vectors formed by the road segment and the address-to-roadseg
vectors. A positive determinant indicates 'left' parity and negative determinant indicates 'right' parity.
"""
det = (vector[1][0] - vector[0][0]) * (pt.y - vector[0][1]) - \
(vector[1][1] - vector[0][1]) * (pt.x - vector[0][0])
sign = np.sign(det)
return "l" if sign == 1 else "r"
def get_road_vector(pt, segment):
"""
Returns the following:
a) the distance of the address intersection along the road segment.
b) the vector comprised of the road segment coordinates immediately before and after the address
intersection point.
"""
# For all road segment points and the intersection point, calculate the distance along the road segment.
# Note: always use the length as the distance for the last point to avoid distance=0 for looped roads.
node_distance = (*map(lambda coord: segment.project(Point(coord)), segment.coords[:-1]), segment.length)
intersection_distance = segment.project(pt)
# Compute the index of the intersection point within the road segment points, based on distances.
intersection_index = bisect(node_distance, intersection_distance)
# Conditionally compile the road segment points, as a vector, immediately bounding the intersection point.
# Intersection matches a pre-existing road segment point.
if intersection_distance in node_distance:
# Intersection matches the first road segment point.
if intersection_index == 1:
vector = itemgetter(intersection_index - 1, intersection_index)(segment.coords)
# Intersection matches the last road segment point.
elif intersection_index == len(node_distance):
vector = itemgetter(intersection_index - 2, intersection_index - 1)(segment.coords)
# Intersection matches an interior road segment point.
else:
vector = itemgetter(intersection_index - 2, intersection_index)(segment.coords)
# Intersection matches no pre-existing road segment point.
else:
vector = itemgetter(intersection_index - 1, intersection_index)(segment.coords)
return intersection_distance, vector
# Get point of intersection between each address and the linked road segment.
addresses["intersection"] = addresses[["geometry", "roadseg_geometry"]].apply(
lambda row: itemgetter(-1)(shapely.ops.nearest_points(*row)), axis=1)
# Get the following:
# a) the distance of the intersection point along the linked road segment.
# b) the road segment vector which bounds the intersection point.
# i.e. vector formed by the coordinates immediately before and after the intersection point.
results = addresses[["intersection", "roadseg_geometry"]].apply(lambda row: get_road_vector(*row), axis=1)
addresses["distance"] = results.map(itemgetter(0))
addresses["roadseg_vector"] = results.map(itemgetter(1))
# Get address parity.
addresses["parity"] = addresses[["geometry", "roadseg_vector"]].apply(
lambda row: get_parity(*row), axis=1)
addresses[["geometry", "roadseg_geometry", "intersection", "distance", "roadseg_vector", "parity"]].head()
```
## View relationship between parity variables
View the relationship between address points, bounding roadseg vectors, address-roadseg intersection points, and the computed parity.
**Note:** this code block is for visual purposes only.
```
# Create geometries for viewing.
bounding_vectors = gpd.GeoDataFrame(geometry=addresses["roadseg_vector"].map(LineString), crs=addresses.crs)
intersection = gpd.GeoDataFrame(addresses["parity"], geometry=addresses[["geometry", "intersection"]].apply(
lambda row: LineString([pt.coords[0][:2] for pt in row]), axis=1), crs=addresses.crs)
# Configure plots.
fig, ax = plt.subplots(1, 2, figsize=(14.5, 7), tight_layout=True)
for plt_idx, title in enumerate(["Parity Input", "Parity Output"]):
ax[plt_idx].imshow(basemaps[1][0], extent=basemaps[1][1])
addresses.plot(ax=ax[plt_idx], color="red", label="addresses", linewidth=2)
starting_pt.plot(ax=ax[plt_idx], color="gold", label=f"roadseg={ex_idx}, 1st point", linewidth=2)
roadseg.loc[roadseg.index==ex_idx].plot(ax=ax[plt_idx], color="yellow", label=f"roadseg={ex_idx}", linewidth=2)
roadseg.loc[roadseg.index!=ex_idx].plot(ax=ax[plt_idx], color="cyan", label="roadseg", linewidth=1)
if plt_idx == 0:
intersection.plot(ax=ax[plt_idx], color="orange", label="address-roadseg intersection", linewidth=2)
bounding_vectors.plot(ax=ax[plt_idx], color="magenta", label="bounding roadseg vectors", linewidth=2)
else:
intersection.loc[intersection["parity"]=="l"].plot(
ax=ax[plt_idx], color="blue", label="address-roadseg intersection (left)", linewidth=2)
intersection.loc[intersection["parity"]=="r"].plot(
ax=ax[plt_idx], color="lime", label="address-roadseg intersection (right)", linewidth=2)
ax[plt_idx].add_artist(ScaleBar(dx=dx, units="m", location="lower left", pad=0.5, color="black"))
ax[plt_idx].axes.xaxis.set_visible(False)
ax[plt_idx].axes.yaxis.set_visible(False)
ax[plt_idx].set_title(title, fontsize=12)
ax[plt_idx].set_xlim(itemgetter(0, 1)(basemaps[1][1]))
ax[plt_idx].set_ylim(itemgetter(2, 3)(basemaps[1][1]))
plt.suptitle(ex_place, fontsize=12)
legend_icons = list()
legend_labels = list()
for axis in ax:
legend_items = list(zip(*[items for items in zip(*axis.get_legend_handles_labels()) if items[1] not in legend_labels]))
legend_icons.extend(legend_items[0])
legend_labels.extend(legend_items[1])
plt.legend(legend_icons, legend_labels, loc="center left", bbox_to_anchor=(1.0, 0.5), fontsize=12)
plt.show()
```
## Step 4. Configure address ranges (addrange) and attributes
Groups addresses into ranges then computes the addrange attributes.
```
def get_digdirfg(sequence):
"""Returns the digdirfg attribute for the given sequence of address numbers."""
sequence = list(sequence)
# Return digitizing direction for single addresses.
if len(sequence) == 1:
return "Not Applicable"
# Derive digitizing direction from sequence sorting direction.
if sequence == sorted(sequence):
return "Same Direction"
else:
return "Opposite Direction"
def get_hnumstr(sequence):
"""Returns the hnumstr attribute for the given sequence of address numbers."""
sequence = list(sequence)
# Validate structure for single addresses.
if len(sequence) == 1:
return "Even" if (sequence[0] % 2 == 0) else "Odd"
# Configure sequence sort status.
if sequence == sorted(sequence) or sequence == sorted(sequence, reverse=True):
# Configure sequence parities.
parities = tuple(map(lambda number: number % 2 == 0, sequence))
# Validate structure for sorted address ranges.
if all(parities):
return "Even"
elif not any(parities):
return "Odd"
else:
return "Mixed"
# Return structure for unsorted address ranges.
else:
return "Irregular"
def get_number_sequence(addresses):
"""Returns the filtered number sequence for the given addresses."""
# Separate address components.
numbers, suffixes, distances = tuple(zip(*addresses))
# Reduce addresses at a duplicated intersection distance to only the first instance.
if len(distances) == len(set(distances)):
sequence = numbers
else:
sequence = pd.DataFrame({"number": numbers, "suffix": suffixes, "distance": distances}).drop_duplicates(
subset="distance", keep="first")["number"].to_list()
# Remove duplicated addresses.
sequence = list(OrderedDict.fromkeys(sequence))
return sequence
def sort_addresses(numbers, suffixes, distances):
"""
Sorts the addresses successively by:
1) distance - the distance of the intersection point along the road segment.
2) number
3) suffix
Taking into account the directionality of the addresses relative to the road segment.
"""
# Create individual address tuples from separated address components.
addresses = tuple(zip(numbers, suffixes, distances))
# Apply initial sorting, by distance, to identify address directionality.
addresses_sorted = sorted(addresses, key=itemgetter(2))
directionality = -1 if addresses_sorted[0][0] > addresses_sorted[-1][0] else 1
# Sort addresses - same direction.
if directionality == 1:
return tuple(sorted(addresses, key=itemgetter(2, 1, 0)))
# Sort addresses - opposite direction.
else:
return tuple(sorted(sorted(sorted(
addresses, key=itemgetter(1), reverse=True),
key=itemgetter(0), reverse=True),
key=itemgetter(2)))
```
### Step 4.1. Group and sort addresses
Groups addresses by roadseg index and parity and sorts each grouping prior to configuring addrange attributes.
```
# Split address dataframe by parity.
addresses_l = addresses[addresses["parity"] == "l"].copy(deep=True)
addresses_r = addresses[addresses["parity"] == "r"].copy(deep=True)
# Create dataframes from grouped addresses.
cols = ("number", "suffix", "distance")
addresses_l = pd.DataFrame({col: groupby_to_list(addresses_l, "roadseg_index", col) for col in cols})
addresses_r = pd.DataFrame({col: groupby_to_list(addresses_r, "roadseg_index", col) for col in cols})
# Sort addresses.
addresses_l = addresses_l.apply(lambda row: sort_addresses(*row), axis=1)
addresses_r = addresses_r.apply(lambda row: sort_addresses(*row), axis=1)
```
### View example address grouping
**Note:** this code block is for visual purposes only.
```
# View data.
vals_l = list(zip(*addresses_l.loc[addresses_l.index==ex_idx].iloc[0]))
vals_r = list(zip(*addresses_r.loc[addresses_r.index==ex_idx].iloc[0]))
cols = ("number", "suffix", "distance")
left = pd.DataFrame({("Left Parity", cols[idx]): vals for idx, vals in enumerate(vals_l)})
right = pd.DataFrame({("Right Parity", cols[idx]): vals for idx, vals in enumerate(vals_r)})
display_html(left.style.set_table_attributes("style='display:inline'")._repr_html_()+
"<pre style='display:inline'> </pre>"+
right.style.set_table_attributes("style='display:inline'")._repr_html_(), raw=True)
```
### Step 4.2. Configure addrange attributes
```
# Configure addrange attributes.
addrange = pd.DataFrame(index=map(int, {*addresses_l.index, *addresses_r.index}))
# Configure addrange attributes - hnumf, hnuml.
addrange.loc[addresses_l.index, "l_hnumf"] = addresses_l.map(lambda addresses: addresses[0][0])
addrange.loc[addresses_l.index, "l_hnuml"] = addresses_l.map(lambda addresses: addresses[-1][0])
addrange.loc[addresses_r.index, "r_hnumf"] = addresses_r.map(lambda addresses: addresses[0][0])
addrange.loc[addresses_r.index, "r_hnuml"] = addresses_r.map(lambda addresses: addresses[-1][0])
# Configuring addrange attributes - hnumsuff, hnumsufl.
addrange.loc[addresses_l.index, "l_hnumsuff"] = addresses_l.map(lambda addresses: addresses[0][1])
addrange.loc[addresses_l.index, "l_hnumsufl"] = addresses_l.map(lambda addresses: addresses[-1][1])
addrange.loc[addresses_r.index, "r_hnumsuff"] = addresses_r.map(lambda addresses: addresses[0][1])
addrange.loc[addresses_r.index, "r_hnumsufl"] = addresses_r.map(lambda addresses: addresses[-1][1])
# Configuring addrange attributes - hnumtypf, hnumtypl.
addrange.loc[addresses_l.index, "l_hnumtypf"] = addresses_l.map(lambda addresses: "Actual Located")
addrange.loc[addresses_l.index, "l_hnumtypl"] = addresses_l.map(lambda addresses: "Actual Located")
addrange.loc[addresses_r.index, "r_hnumtypf"] = addresses_r.map(lambda addresses: "Actual Located")
addrange.loc[addresses_r.index, "r_hnumtypl"] = addresses_r.map(lambda addresses: "Actual Located")
# Get address number sequence.
address_sequence_l = addresses_l.map(get_number_sequence)
address_sequence_r = addresses_r.map(get_number_sequence)
# Configure addrange attributes - hnumstr.
addrange.loc[addresses_l.index, "l_hnumstr"] = address_sequence_l.map(get_hnumstr)
addrange.loc[addresses_r.index, "r_hnumstr"] = address_sequence_r.map(get_hnumstr)
# Configure addrange attributes - digdirfg.
addrange.loc[addresses_l.index, "l_digdirfg"] = address_sequence_l.map(get_digdirfg)
addrange.loc[addresses_r.index, "r_digdirfg"] = address_sequence_r.map(get_digdirfg)
```
## Step 5. Merge addrange attributes with roadseg
```
# Merge addrange attributes with roadseg.
roadseg = roadseg.merge(addrange, how="left", left_index=True, right_index=True)
```
## View Results
**Note:** this code block is for visual purposes only.
```
# Create data for viewing.
addresses_filtered = addresses.loc[addresses["roadseg_index"]==ex_idx]
labels = addresses_filtered[["number", "suffix", "geometry", "parity"]].apply(
lambda row: (f"{row[0]}{row[1]}", row[2].x, row[2].y, row[3]), axis=1)
# Configure plots.
fig, ax = plt.subplots(1, 1, figsize=(6, 7), tight_layout=False)
ax.imshow(basemaps[1][0], extent=basemaps[1][1])
addresses_filtered.loc[addresses_filtered["parity"]=="l"].plot(ax=ax, color="blue", label="addresses (left)", linewidth=2)
addresses_filtered.loc[addresses_filtered["parity"]=="r"].plot(ax=ax, color="lime", label="addresses(right)", linewidth=2)
starting_pt.plot(ax=ax, color="gold", label=f"roadseg={ex_idx}, 1st point", linewidth=2)
roadseg.loc[roadseg.index==ex_idx].plot(ax=ax, color="yellow", label=f"roadseg={ex_idx}", linewidth=2)
roadseg.loc[roadseg.index!=ex_idx].plot(ax=ax, color="cyan", label="roadseg", linewidth=1)
ax.add_artist(ScaleBar(dx=dx, units="m", location="lower left", pad=0.5, color="black"))
ax.axes.xaxis.set_visible(False)
ax.axes.yaxis.set_visible(False)
ax.set_title("Parity Output", fontsize=12)
ax.set_xlim(itemgetter(0, 1)(basemaps[1][1]))
ax.set_ylim(itemgetter(2, 3)(basemaps[1][1]))
for label_params in labels:
label, x, y, parity = label_params
if parity == "l":
kwargs = {"xytext": (x-10, y+10), "ha": "right"}
else:
kwargs = {"xytext": (x+10, y+10), "ha": "left"}
plt.annotate(label, xy=(x, y), textcoords="data", va="bottom", fontsize=10, color="red", fontweight="bold",
bbox=dict(pad=0.3, fc="black"), **kwargs)
plt.legend(loc="center left", bbox_to_anchor=(1.0, 0.5), fontsize=12)
plt.savefig("temp.png", bbox_inches='tight', pad_inches=0)
plt.close()
display_html(f"""
<div class=\"container\" style=\"width:100%;\">
<img src=\"temp.png\" style=\"float:left;max-width:59%;\">
{pd.DataFrame(roadseg.loc[roadseg.index==ex_idx].iloc[0]).style.set_table_styles([
{'selector': '', 'props': [('float', 'right'), ('width', '40%')]},
{'selector': 'td', 'props': [('overflow', 'hidden'), ('text-overflow', 'ellipsis'), ('white-space', 'nowrap')]}
])._repr_html_()}
</div>
""", raw=True)
```
| true |
code
| 0.654646 | null | null | null | null |
|
<img src="https://raw.githubusercontent.com/dask/dask/main/docs/source/images/dask_horizontal_no_pad.svg"
width="30%"
alt="Dask logo\" />
# Parallel and Distributed Machine Learning
The material in this notebook was based on the open-source content from [Dask's tutorial repository](https://github.com/dask/dask-tutorial) and the [Machine learning notebook](https://github.com/coiled/data-science-at-scale/blob/master/3-machine-learning.ipynb) from data science at scale from coiled
So far we have seen how Dask makes data analysis scalable with parallelization via Dask DataFrames. Let's now see how [Dask-ML](https://ml.dask.org/) allows us to do machine learning in a parallel and distributed manner. Note, machine learning is really just a special case of data analysis (one that automates analytical model building), so the 💪 Dask gains 💪 we've seen will apply here as well!
(If you'd like a refresher on the difference between parallel and distributed computing, [here's a good discussion on StackExchange](https://cs.stackexchange.com/questions/1580/distributed-vs-parallel-computing).)
## Types of scaling problems in machine learning
There are two main types of scaling challenges you can run into in your machine learning workflow: scaling the **size of your data** and scaling the **size of your model**. That is:
1. **CPU-bound problems**: Data fits in RAM, but training takes too long. Many hyperparameter combinations, a large ensemble of many models, etc.
2. **Memory-bound problems**: Data is larger than RAM, and sampling isn't an option.
Here's a handy diagram for visualizing these problems:
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/dimensions_of_scale.svg"
width="60%"
alt="scaling problems\" />
In the bottom-left quadrant, your datasets are not too large (they fit comfortably in RAM) and your model is not too large either. When these conditions are met, you are much better off using something like scikit-learn, XGBoost, and similar libraries. You don't need to leverage multiple machines in a distributed manner with a library like Dask-ML. However, if you are in any of the other quadrants, distributed machine learning is the way to go.
Summarizing:
* For in-memory problems, just use scikit-learn (or your favorite ML library).
* For large models, use `dask_ml.joblib` and your favorite scikit-learn estimator.
* For large datasets, use `dask_ml` estimators.
## Scikit-learn in five minutes
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/scikit_learn_logo_small.svg"
width="30%"
alt="sklearn logo\" />
In this section, we'll quickly run through a typical scikit-learn workflow:
* Load some data (in this case, we'll generate it)
* Import the scikit-learn module for our chosen ML algorithm
* Create an estimator for that algorithm and fit it with our data
* Inspect the learned attributes
* Check the accuracy of our model
Scikit-learn has a nice, consistent API:
* You instantiate an `Estimator` (e.g. `LinearRegression`, `RandomForestClassifier`, etc.). All of the models *hyperparameters* (user-specified parameters, not the ones learned by the estimator) are passed to the estimator when it's created.
* You call `estimator.fit(X, y)` to train the estimator.
* Use `estimator` to inspect attributes, make predictions, etc.
Here `X` is an array of *feature variables* (what you're using to predict) and `y` is an array of *target variables* (what we're trying to predict).
### Generate some random data
```
from sklearn.datasets import make_classification
# Generate data
X, y = make_classification(n_samples=10000, n_features=4, random_state=0)
```
**Refreshing some ML concepts**
- `X` is the samples matrix (or design matrix). The size of `X` is typically (`n_samples`, `n_features`), which means that samples are represented as rows and features are represented as columns.
- A "feature" (also called an "attribute") is a measurable property of the phenomenon we're trying to analyze. A feature for a dataset of employees might be their hire date, for example.
- `y` are the target values, which are real numbers for regression tasks, or integers for classification (or any other discrete set of values). For unsupervized learning tasks, `y` does not need to be specified. `y` is usually 1d array where the `i`th entry corresponds to the target of the `i`th sample (row) of `X`.
```
# Let's take a look at X
X[:8]
# Let's take a look at y
y[:8]
```
### Fitting and SVC
For this example, we will fit a [Support Vector Classifier](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html).
```
from sklearn.svm import SVC
estimator = SVC(random_state=0)
estimator.fit(X, y)
```
We can inspect the learned features by taking a look a the `support_vectors_`:
```
estimator.support_vectors_[:4]
```
And we check the accuracy:
```
estimator.score(X, y)
```
There are [3 different approaches](https://scikit-learn.org/0.15/modules/model_evaluation.html) to evaluate the quality of predictions of a model. One of them is the **estimator score method**. Estimators have a score method providing a default evaluation criterion for the problem they are designed to solve, which is discussed in each estimator's documentation.
### Hyperparameter Optimization
There are a few ways to learn the best *hyper*parameters while training. One is `GridSearchCV`.
As the name implies, this does a brute-force search over a grid of hyperparameter combinations. scikit-learn provides tools to automatically find the best parameter combinations via cross-validation (which is the "CV" in `GridSearchCV`).
```
from sklearn.model_selection import GridSearchCV
%%time
estimator = SVC(gamma='auto', random_state=0, probability=True)
param_grid = {
'C': [0.001, 10.0],
'kernel': ['rbf', 'poly'],
}
# Brute-force search over a grid of hyperparameter combinations
grid_search = GridSearchCV(estimator, param_grid, verbose=2, cv=2)
grid_search.fit(X, y)
grid_search.best_params_, grid_search.best_score_
```
## Compute Bound: Single-machine parallelism with Joblib
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/joblib_logo.svg"
alt="Joblib logo"
width="50%"/>
In this section we'll see how [Joblib](https://joblib.readthedocs.io/en/latest/) ("*a set of tools to provide lightweight pipelining in Python*") gives us parallelism on our laptop. Here's what our grid search graph would look like if we set up six training "jobs" in parallel:
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/unmerged_grid_search_graph.svg"
alt="grid search graph"
width="100%"/>
With Joblib, we can say that scikit-learn has *single-machine* parallelism.
Any scikit-learn estimator that can operate in parallel exposes an `n_jobs` keyword, which tells you how many tasks to run in parallel. Specifying `n_jobs=-1` jobs means running the maximum possible number of tasks in parallel.
```
%%time
grid_search = GridSearchCV(estimator, param_grid, verbose=2, cv=2, n_jobs=-1)
grid_search.fit(X, y)
```
Notice that the computation above it is faster than before. If you are running this computation on binder, you might not see a speed-up and the reason for that is that binder instances tend to have only one core with no threads so you can't see any parallelism.
## Compute Bound: Multi-machine parallelism with Dask
In this section we'll see how Dask (plus Joblib and scikit-learn) gives us multi-machine parallelism. Here's what our grid search graph would look like if we allowed Dask to schedule our training "jobs" over multiple machines in our cluster:
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/merged_grid_search_graph.svg"
alt="merged grid search graph"
width="100%"/>
We can say that Dask can talk to scikit-learn (via Joblib) so that our *cluster* is used to train a model.
If we run this on a laptop, it will take quite some time, but the CPU usage will be satisfyingly near 100% for the duration. To run faster, we would need a distributed cluster. For details on how to create a LocalCluster you can check the Dask documentation on [Single Machine: dask.distributed](https://docs.dask.org/en/latest/setup/single-distributed.html).
Let's instantiate a Client with `n_workers=4`, which will give us a `LocalCluster`.
```
import dask.distributed
client = dask.distributed.Client(n_workers=4)
client
```
**Note:** Click on Cluster Info, to see more details about the cluster. You can see the configuration of the cluster and some other specs.
We can expand our problem by specifying more hyperparameters before training, and see how using `dask` as backend can help us.
```
param_grid = {
'C': [0.001, 0.1, 1.0, 2.5, 5, 10.0],
'kernel': ['rbf', 'poly', 'linear'],
'shrinking': [True, False],
}
grid_search = GridSearchCV(estimator, param_grid, verbose=2, cv=2, n_jobs=-1)
```
### Dask parallel backend
We can fit our estimator with multi-machine parallelism by quickly *switching to a Dask parallel backend* when using joblib.
```
import joblib
%%time
with joblib.parallel_backend("dask", scatter=[X, y]):
grid_search.fit(X, y)
```
**What did just happen?**
Dask-ML developers worked with the scikit-learn and Joblib developers to implement a Dask parallel backend. So internally, scikit-learn now talks to Joblib, and Joblib talks to Dask, and Dask is what handles scheduling all of those tasks on multiple machines.
The best parameters and best score:
```
grid_search.best_params_, grid_search.best_score_
```
## Memory Bound: Single/Multi machine parallelism with Dask-ML
We have seen how to work with larger models, but sometimes you'll want to train on a larger than memory dataset. `dask-ml` has implemented estimators that work well on Dask `Arrays` and `DataFrames` that may be larger than your machine's RAM.
```
import dask.array as da
import dask.delayed
from sklearn.datasets import make_blobs
import numpy as np
```
We'll make a small (random) dataset locally using scikit-learn.
```
n_centers = 12
n_features = 20
X_small, y_small = make_blobs(n_samples=1000, centers=n_centers, n_features=n_features, random_state=0)
centers = np.zeros((n_centers, n_features))
for i in range(n_centers):
centers[i] = X_small[y_small == i].mean(0)
centers[:4]
```
**Note**: The small dataset will be the template for our large random dataset.
We'll use `dask.delayed` to adapt `sklearn.datasets.make_blobs`, so that the actual dataset is being generated on our workers.
If you are not in binder and you machine has 16GB of RAM you can make `n_samples_per_block=200_000` and the computations takes around 10 min. If you are in binder the resources are limited and the problem below is big enough.
```
n_samples_per_block = 60_000 #on binder replace this for 15_000
n_blocks = 500
delayeds = [dask.delayed(make_blobs)(n_samples=n_samples_per_block,
centers=centers,
n_features=n_features,
random_state=i)[0]
for i in range(n_blocks)]
arrays = [da.from_delayed(obj, shape=(n_samples_per_block, n_features), dtype=X.dtype)
for obj in delayeds]
X = da.concatenate(arrays)
X
```
### KMeans from Dask-ml
The algorithms implemented in Dask-ML are scalable. They handle larger-than-memory datasets just fine.
They follow the scikit-learn API, so if you're familiar with scikit-learn, you'll feel at home with Dask-ML.
```
from dask_ml.cluster import KMeans
clf = KMeans(init_max_iter=3, oversampling_factor=10)
%time clf.fit(X)
clf.labels_
clf.labels_[:10].compute()
client.close()
```
## Multi-machine parallelism in the cloud with Coiled
<br>
<img src="https://raw.githubusercontent.com/coiled/data-science-at-scale/master/images/Coiled-Logo_Horizontal_RGB_Black.png"
alt="Coiled logo"
width=25%/>
<br>
In this section we'll see how Coiled allows us to solve machine learning problems with multi-machine parallelism in the cloud.
Coiled, [among other things](https://coiled.io/product/), provides hosted and scalable Dask clusters. The biggest barriers to entry for doing machine learning at scale are "Do you have access to a cluster?" and "Do you know how to manage it?" Coiled solves both of those problems.
We'll spin up a Coiled cluster (with 10 workers in this case), then instantiate a Dask Client to use with that cluster.
If you are running on your local machine and not in binder, and you want to give Coiled a try, you can signup [here](https://cloud.coiled.io/login?redirect_uri=/) and you will get some free credits. If you installed the environment by following the steps on the repository's [README](https://github.com/coiled/dask-mini-tutorial/blob/main/README.md) you will have `coiled` installed. You will just need to login, by following the steps on the [setup page](https://docs.coiled.io/user_guide/getting_started.html), and you will be ready to go.
To learn more about how to set up an environment you can visit Coiled documentation on [Creating software environments](https://docs.coiled.io/user_guide/software_environment_creation.html). But for now you can use the envioronment we set up for this tutorial.
```
import coiled
from dask.distributed import Client
# Spin up a Coiled cluster, instantiate a Client
cluster = coiled.Cluster(n_workers=10, software="ncclementi/dask-mini-tutorial",)
client = Client(cluster)
client
```
### Memory bound: Dask-ML
We can use Dask-ML estimators on the cloud to work with larger datasets.
```
n_centers = 12
n_features = 20
X_small, y_small = make_blobs(n_samples=1000, centers=n_centers, n_features=n_features, random_state=0)
centers = np.zeros((n_centers, n_features))
for i in range(n_centers):
centers[i] = X_small[y_small == i].mean(0)
n_samples_per_block = 200_000
n_blocks = 500
delayeds = [dask.delayed(make_blobs)(n_samples=n_samples_per_block,
centers=centers,
n_features=n_features,
random_state=i)[0]
for i in range(n_blocks)]
arrays = [da.from_delayed(obj, shape=(n_samples_per_block, n_features), dtype=X.dtype)
for obj in delayeds]
X = da.concatenate(arrays)
X = X.persist()
from dask_ml.cluster import KMeans
clf = KMeans(init_max_iter=3, oversampling_factor=10)
%time clf.fit(X)
```
Computing the labels:
```
clf.labels_[:10].compute()
client.close()
```
## Extra resources:
- [Dask-ML documentation](https://ml.dask.org/)
- [Getting started with Coiled](https://docs.coiled.io/user_guide/getting_started.html)
| true |
code
| 0.693875 | null | null | null | null |
|
```
'''
A Convolutional Network implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
import tensorflow as tf
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Parameters
learning_rate = 0.001
training_iters = 200000
batch_size = 128
display_step = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)
# Create some wrappers for simplicity
def conv2d(x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(x, weights, biases, dropout):
# Reshape input picture
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer
conv1 = conv2d(x, weights['wc1'], biases['bc1'])
# Max Pooling (down-sampling)
conv1 = maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = conv2d(conv1, weights['wc2'], biases['bc2'])
# Max Pooling (down-sampling)
conv2 = maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, dropout)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
return out
# Store layers weight & bias
weights = {
# 5x5 conv, 1 input, 32 outputs
'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])),
# fully connected, 7*7*64 inputs, 1024 outputs
'wd1': tf.Variable(tf.random_normal([7*7*64, 1024])),
# 1024 inputs, 10 outputs (class prediction)
'out': tf.Variable(tf.random_normal([1024, n_classes]))
}
biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = conv_net(x, weights, biases, keep_prob)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# Initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# Keep training until reach max iterations
while step * batch_size < training_iters:
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y,
keep_prob: dropout})
if step % display_step == 0:
# Calculate batch loss and accuracy
loss, acc = sess.run([cost, accuracy], feed_dict={x: batch_x,
y: batch_y,
keep_prob: 1.})
print "Iter " + str(step*batch_size) + ", Minibatch Loss= " + \
"{:.6f}".format(loss) + ", Training Accuracy= " + \
"{:.5f}".format(acc)
step += 1
print "Optimization Finished!"
# Calculate accuracy for 256 mnist test images
print "Testing Accuracy:", \
sess.run(accuracy, feed_dict={x: mnist.test.images[:256],
y: mnist.test.labels[:256],
keep_prob: 1.})
```
| true |
code
| 0.858867 | null | null | null | null |
|
# Lesson 1
In the screencast for this lesson I go through a few scenarios for time series. This notebook contains the code for that with a few little extras! :)
# Setup
```
# !pip install -U tf-nightly-2.0-preview
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
```
# Trend and Seasonality
```
def trend(time, slope=0):
return slope * time
```
Let's create a time series that just trends upward:
```
time = np.arange(4 * 365 + 1)
baseline = 10
series = trend(time, 0.1)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
Now let's generate a time series with a seasonal pattern:
```
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
baseline = 10
amplitude = 40
series = seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
Now let's create a time series with both trend and seasonality:
```
slope = 0.05
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
# Noise
In practice few real-life time series have such a smooth signal. They usually have some noise, and the signal-to-noise ratio can sometimes be very low. Let's generate some white noise:
```
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(time, noise)
plt.show()
```
Now let's add this white noise to the time series:
```
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
```
All right, this looks realistic enough for now. Let's try to forecast it. We will split it into two periods: the training period and the validation period (in many cases, you would also want to have a test period). The split will be at time step 1000.
```
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ1 = 0.5
φ2 = -0.1
ar = rnd.randn(len(time) + 50)
ar[:50] = 100
for step in range(50, len(time) + 50):
ar[step] += φ1 * ar[step - 50]
ar[step] += φ2 * ar[step - 33]
return ar[50:] * amplitude
series = autocorrelation(time, 10, seed=42)
plot_series(time[:200], series[:200])
plt.show()
def autocorrelation(time, amplitude, seed=None):
rnd = np.random.RandomState(seed)
φ = 0.8
ar = rnd.randn(len(time) + 1)
for step in range(1, len(time) + 1):
ar[step] += φ * ar[step - 1]
return ar[1:] * amplitude
series = autocorrelation(time, 10, seed=42)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
plot_series(time[:200], series[:200])
plt.show()
series = autocorrelation(time, 10, seed=42) + seasonality(time, period=50, amplitude=150) + trend(time, 2)
series2 = autocorrelation(time, 5, seed=42) + seasonality(time, period=50, amplitude=2) + trend(time, -1) + 550
series[200:] = series2[200:]
#series += noise(time, 30)
plot_series(time[:300], series[:300])
plt.show()
def impulses(time, num_impulses, amplitude=1, seed=None):
rnd = np.random.RandomState(seed)
impulse_indices = rnd.randint(len(time), size=10)
series = np.zeros(len(time))
for index in impulse_indices:
series[index] += rnd.rand() * amplitude
return series
series = impulses(time, 10, seed=42)
plot_series(time, series)
plt.show()
def autocorrelation(source, φs):
ar = source.copy()
max_lag = len(φs)
for step, value in enumerate(source):
for lag, φ in φs.items():
if step - lag > 0:
ar[step] += φ * ar[step - lag]
return ar
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.99})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
signal = impulses(time, 10, seed=42)
series = autocorrelation(signal, {1: 0.70, 50: 0.2})
plot_series(time, series)
plt.plot(time, signal, "k-")
plt.show()
series_diff1 = series[1:] - series[:-1]
plot_series(time[1:], series_diff1)
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
from statsmodels.tsa.arima_model import ARIMA
model = ARIMA(series, order=(5, 1, 0))
model_fit = model.fit(disp=0)
print(model_fit.summary())
df = pd.read_csv("sunspots.csv", parse_dates=["Date"], index_col="Date")
series = df["Monthly Mean Total Sunspot Number"].asfreq("1M")
series.head()
series.plot(figsize=(12, 5))
series["1995-01-01":].plot()
series.diff(1).plot()
plt.axis([0, 100, -50, 50])
from pandas.plotting import autocorrelation_plot
autocorrelation_plot(series)
autocorrelation_plot(series.diff(1)[1:])
autocorrelation_plot(series.diff(1)[1:].diff(11 * 12)[11*12+1:])
plt.axis([0, 500, -0.1, 0.1])
autocorrelation_plot(series.diff(1)[1:])
plt.axis([0, 50, -0.1, 0.1])
116.7 - 104.3
[series.autocorr(lag) for lag in range(1, 50)]
pd.read_csv(filepath_or_buffer, sep=',', delimiter=None, header='infer', names=None, index_col=None, usecols=None, squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None, engine=None, converters=None, true_values=None, false_values=None, skipinitialspace=False, skiprows=None, skipfooter=0, nrows=None, na_values=None, keep_default_na=True, na_filter=True, verbose=False, skip_blank_lines=True, parse_dates=False, infer_datetime_format=False, keep_date_col=False, date_parser=None, dayfirst=False, iterator=False, chunksize=None, compression='infer', thousands=None, decimal=b'.', lineterminator=None, quotechar='"', quoting=0, doublequote=True, escapechar=None, comment=None, encoding=None, dialect=None, tupleize_cols=None, error_bad_lines=True, warn_bad_lines=True, delim_whitespace=False, low_memory=True, memory_map=False, float_precision=None)
Read a comma-separated values (csv) file into DataFrame.
from pandas.plotting import autocorrelation_plot
series_diff = series
for lag in range(50):
series_diff = series_diff[1:] - series_diff[:-1]
autocorrelation_plot(series_diff)
import pandas as pd
series_diff1 = pd.Series(series[1:] - series[:-1])
autocorrs = [series_diff1.autocorr(lag) for lag in range(1, 60)]
plt.plot(autocorrs)
plt.show()
```
| true |
code
| 0.694173 | null | null | null | null |
|
# `rlplay`-ing around with Policy Gradients
```
import torch
import numpy
import matplotlib.pyplot as plt
%matplotlib inline
import gym
# hotfix for gym's unresponsive viz (spawns gl threads!)
import rlplay.utils.integration.gym
```
See example.ipynb for the overview of `rlplay`
<br>
## Sophisticated CartPole with PG
### The environment
The environment factory
```
from rlplay.zoo.env import NarrowPath
class FP32Observation(gym.ObservationWrapper):
def observation(self, observation):
obs = observation.astype(numpy.float32)
obs[0] = 0. # mask the position info
return obs
# def step(self, action):
# obs, reward, done, info = super().step(action)
# reward -= abs(obs[1]) / 10 # punish for non-zero speed
# return obs, reward, done, info
class OneHotObservation(gym.ObservationWrapper):
def observation(self, observation):
return numpy.eye(1, self.env.observation_space.n,
k=observation, dtype=numpy.float32)[0]
def base_factory(seed=None):
# return gym.make("LunarLander-v2")
return FP32Observation(gym.make("CartPole-v0").unwrapped)
# return OneHotObservation(NarrowPath())
```
<br>
### the Actor
A procedure and a layer, which converts the input integer data into its
little-endian binary representation as float $\{0, 1\}^m$ vectors.
```
def onehotbits(input, n_bits=63, dtype=torch.float):
"""Encode integers to fixed-width binary floating point vectors"""
assert not input.dtype.is_floating_point
assert 0 < n_bits < 64 # torch.int64 is signed, so 64-1 bits max
# n_bits = {torch.int64: 63, torch.int32: 31, torch.int16: 15, torch.int8 : 7}
# get mask of set bits
pow2 = torch.tensor([1 << j for j in range(n_bits)]).to(input.device)
x = input.unsqueeze(-1).bitwise_and(pow2).to(bool)
# upcast bool to float to get one-hot
return x.to(dtype)
class OneHotBits(torch.nn.Module):
def __init__(self, n_bits=63, dtype=torch.float):
assert 1 <= n_bits < 64
super().__init__()
self.n_bits, self.dtype = n_bits, dtype
def forward(self, input):
return onehotbits(input, n_bits=self.n_bits, dtype=self.dtype)
```
A special module dictionary, which applies itself to the input dict of tensors
```
from typing import Optional, Mapping
from torch.nn import Module, ModuleDict as BaseModuleDict
class ModuleDict(BaseModuleDict):
"""The ModuleDict, that applies itself to the input dicts."""
def __init__(
self,
modules: Optional[Mapping[str, Module]] = None,
dim: Optional[int]=-1
) -> None:
super().__init__(modules)
self.dim = dim
def forward(self, input):
# enforce concatenation in the order of the declaration in __init__
return torch.cat([
m(input[k]) for k, m in self.items()
], dim=self.dim)
```
An $\ell_2$ normalization layer.
```
from torch.nn.functional import normalize
class Normalize(torch.nn.Module):
def __init__(self, dim=-1):
super().__init__()
self.dim = dim
def forward(self, input):
return normalize(input, dim=self.dim)
```
A more sophisticated policy learner
```
from rlplay.engine import BaseActorModule
from rlplay.utils.common import multinomial
from torch.nn import Sequential, Linear, ReLU, LogSoftmax
class CartPoleActor(BaseActorModule):
def __init__(self, lstm='none'):
assert lstm in ('none', 'loop', 'cudnn')
super().__init__()
self.use_lstm = self.use_cudnn = False
# blend the policy with a uniform distribution, determined by
# the exploration epsilon. We update it in the actor clones via a buffer
# self.register_buffer('epsilon', torch.tensor(epsilon))
# XXX isn't the stochastic policy random enough by itself?
self.baseline = Sequential(
Linear(4, 20),
ReLU(),
Linear(20, 1),
)
self.policy = Sequential(
Linear(4, 20),
ReLU(),
Linear(20, 2),
LogSoftmax(dim=-1),
)
def forward(self, obs, act, rew, fin, *, hx=None, stepno=None, virtual=False):
# value must not have any trailing dims, i.e. T x B
logits = self.policy(obs)
value = self.baseline(obs).squeeze(-1)
if not self.training:
actions = logits.argmax(dim=-1)
else:
actions = multinomial(logits.detach().exp())
return actions, (), dict(value=value, logits=logits)
```
<br>
### PPO/GAE A2C and V-trace A2C algos
Service functions for the algorithms
```
from plyr import apply, suply, xgetitem
def timeshift(state, *, shift=1):
"""Get current and shifted slices of nested objects."""
# use `xgetitem` to let None through
# XXX `curr[t]` = (x_t, a_{t-1}, r_t, d_t), t=0..T-H
curr = suply(xgetitem, state, index=slice(None, -shift))
# XXX `next[t]` = (x_{t+H}, a_{t+H-1}, r_{t+H}, d_{t+H}), t=0..T-H
next = suply(xgetitem, state, index=slice(shift, None))
return curr, next
```
The Advantage Actor-Critic algo
```
import torch.nn.functional as F
from rlplay.algo.returns import pyt_gae, pyt_returns, pyt_multistep
# @torch.enable_grad()
def a2c(
fragment, module, *, gamma=0.99, gae=1., ppo=0.,
C_entropy=1e-2, C_value=0.5, c_rho=1.0, multistep=0,
):
r"""The Advantage Actor-Critic algorithm (importance-weighted off-policy).
Close to REINFORCE, but uses separate baseline value estimate to compute
advantages in the policy gradient:
$$
\nabla_\theta J(s_t)
= \mathbb{E}_{a \sim \beta(a\mid s_t)}
\frac{\pi(a\mid s_t)}{\beta(a\mid s_t)}
\bigl( r_{t+1} + \gamma G_{t+1} - v(s_t) \bigr)
\nabla_\theta \log \pi(a\mid s_t)
\,, $$
where the critic estimates the state's value under the current policy
$$
v(s_t)
\approx \mathbb{E}_{\pi_{\geq t}}
G_t(a_t, s_{t+1}, a_{t+1}, ... \mid s_t)
\,. $$
"""
state, state_next = timeshift(fragment.state)
# REACT: (state[t], h_t) \to (\hat{a}_t, h_{t+1}, \hat{A}_t)
_, _, info = module(
state.obs, state.act, state.rew, state.fin,
hx=fragment.hx, stepno=state.stepno)
# info['value'] = V(`.state[t]`)
# <<-->> v(x_t)
# \approx \mathbb{E}( G_t \mid x_t)
# \approx \mathbb{E}( r_{t+1} + \gamma r_{t+2} + ... \mid x_t)
# <<-->> npv(`.state[t+1:]`)
# info['logits'] = \log \pi(... | .state[t] )
# <<-->> \log \pi( \cdot \mid x_t)
# `.actor[t]` is actor's extra info in reaction to `.state[t]`, t=0..T
bootstrap = fragment.actor['value'][-1]
# `bootstrap` <<-->> `.value[-1]` = V(`.state[-1]`)
# XXX post-mul by `1 - \gamma` fails to train, but seems appropriate
# for the continuation/survival interpretation of the discount factor.
# <<-- but who says this is a good interpretation?
# ret.mul_(1 - gamma)
# \pi is the target policy, \mu is the behaviour policy
log_pi, log_mu = info['logits'], fragment.actor['logits']
# Future rewards after `.state[t]` are recorded in `.state[t+1:]`
# G_t <<-->> ret[t] = rew[t] + gamma * (1 - fin[t]) * (ret[t+1] or bootstrap)
if multistep > 0:
ret = pyt_multistep(state_next.rew, state_next.fin,
info['value'].detach(),
gamma=gamma, n_lookahead=multistep,
bootstrap=bootstrap.unsqueeze(0))
else:
ret = pyt_returns(state_next.rew, state_next.fin,
gamma=gamma, bootstrap=bootstrap)
# the critic's mse score (min)
# \frac1{2 T} \sum_t (G_t - v(s_t))^2
value = info['value']
critic_mse = F.mse_loss(value, ret, reduction='mean') / 2
# v(x_t) \approx \mathbb{E}( G_t \mid x_t )
# \approx G_t (one-point estimate)
# <<-->> ret[t]
# compute the advantages $G_t - v(s_t)$
# or GAE [Schulman et al. (2016)](http://arxiv.org/abs/1506.02438)
# XXX sec 6.1 in the GAE paper uses V from the `current` value
# network, not the one used during the rollout.
# value = fragment.actor['value'][:-1]
if gae < 1.:
# the positional arguments are $r_{t+1}$, $d_{t+1}$, and $v(s_t)$,
# respectively, for $t=0..T-1$. The bootstrap is $v(S_T)$ from
# the rollout.
adv = pyt_gae(state_next.rew, state_next.fin, value.detach(),
gamma=gamma, C=gae, bootstrap=bootstrap)
else:
adv = ret.sub(value.detach())
# adv.sub_(adv.mean())
# adv.div_(adv.std(dim=0))
# Assume `.act` is unstructured: `act[t]` = a_{t+1} -->> T x B x 1
act = state_next.act.unsqueeze(-1) # actions taken during the rollout
# the importance weights
log_pi_a = log_pi.gather(-1, act).squeeze(-1)
log_mu_a = log_mu.gather(-1, act).squeeze(-1)
# the policy surrogate score (max)
if ppo > 0:
# the PPO loss is the properly clipped rho times the advantage
ratio = log_pi_a.sub(log_mu_a).exp()
a2c_score = torch.min(
ratio * adv,
ratio.clamp(1. - ppo, 1. + ppo) * adv
).mean()
else:
# \exp{- ( \log \mu - \log \pi )}, evaluated at $a_t \mid z_t$
rho = log_mu_a.sub_(log_pi_a.detach()).neg_()\
.exp_().clamp_(max=c_rho)
# \frac1T \sum_t \rho_t (G_t - v_t) \log \pi(a_t \mid z_t)
a2c_score = log_pi_a.mul(adv.mul_(rho)).mean()
# the policy's neg-entropy score (min)
# - H(\pi(•\mid s)) = - (-1) \sum_a \pi(a\mid s) \log \pi(a\mid s)
f_min = torch.finfo(log_pi.dtype).min
negentropy = log_pi.exp().mul(log_pi.clamp(min=f_min)).sum(dim=-1).mean()
# breakpoint()
# maximize the entropy and the reinforce score, minimize the critic loss
objective = C_entropy * negentropy + C_value * critic_mse - a2c_score
return objective.mean(), dict(
entropy=-float(negentropy),
policy_score=float(a2c_score),
value_loss=float(critic_mse),
)
```
A couple of three things:
* a2c is on-policy and no importance weight could change this!
* L72-80: [stable_baselines3](./common/on_policy_algorithm.py#L183-192)
and [rlpyt](./algos/pg/base.py#L49-58) use rollout data, when computing the GAE
* L61-62: [stable_baselines3](./stable_baselines3/a2c/a2c.py#L147-156) uses `vf_coef=0.5`,
and **unhalved** `F.mse-loss`, while [rlpyt](./rlpyt/rlpyt/algos/pg/a2c.py#L93-94)
uses `value_loss_coeff=0.5`, and **halved** $\ell_2$ loss!
The off-policy actor-critic algorithm for the learner, called V-trace,
from [Espeholt et al. (2018)](http://proceedings.mlr.press/v80/espeholt18a.html).
```
from rlplay.algo.returns import pyt_vtrace
# @torch.enable_grad()
def vtrace(fragment, module, *, gamma=0.99, C_entropy=1e-2, C_value=0.5):
# REACT: (state[t], h_t) \to (\hat{a}_t, h_{t+1}, \hat{A}_t)
_, _, info = module(
fragment.state.obs, fragment.state.act,
fragment.state.rew, fragment.state.fin,
hx=fragment.hx, stepno=fragment.state.stepno)
# Assume `.act` is unstructured: `act[t]` = a_{t+1} -->> T x B x 1
state, state_next = timeshift(fragment.state)
act = state_next.act.unsqueeze(-1) # actions taken during the rollout
# \pi is the target policy, \mu is the behaviour policy (T+1 x B x ...)
log_pi, log_mu = info['logits'], fragment.actor['logits']
# the importance weights
log_pi_a = log_pi.gather(-1, act).squeeze(-1)
log_mu_a = log_mu.gather(-1, act).squeeze(-1)
log_rho = log_mu_a.sub_(log_pi_a.detach()).neg_()
# `.actor[t]` is actor's extra info in reaction to `.state[t]`, t=0..T
val = fragment.actor['value'] # info['value'].detach()
# XXX Although Esperholt et al. (2018, sec.~4.2) use the value estimate of
# the rollout policy for the V-trace target in eq. (1), it makes more sense
# to use the estimates of the current policy, as has been done in monobeast.
# https://hackernoon.com/intuitive-rl-intro-to-advantage-actor-critic-a2c-4ff545978752
val, bootstrap = val[:-1], val[-1]
target = pyt_vtrace(state_next.rew, state_next.fin, val,
gamma=gamma, bootstrap=bootstrap,
omega=log_rho, r_bar=1., c_bar=1.)
# the critic's mse score against v-trace targets (min)
critic_mse = F.mse_loss(info['value'][:-1], target, reduction='mean') / 2
# \delta_t = r_{t+1} + \gamma \nu(s_{t+1}) 1_{\neg d_{t+1}} - v(s_t)
adv = torch.empty_like(state_next.rew).copy_(bootstrap)
adv[:-1].copy_(target[1:]) # copy the v-trace targets \nu(s_{t+1})
adv.masked_fill_(state_next.fin, 0.).mul_(gamma)
adv.add_(state_next.rew).sub_(val)
# XXX note `val` here, not `target`! see sec.~4.2 in (Esperholt et al.; 2018)
# the policy surrogate score (max)
# \rho_t = \min\{ \bar{\rho}, \frac{\pi_t(a_t)}{\mu_t(a_t)} \}
rho = log_rho.exp_().clamp_(max=1.)
vtrace_score = log_pi_a.mul(adv.mul_(rho)).mean()
# the policy's neg-entropy score (min)
f_min = torch.finfo(log_pi.dtype).min
negentropy = log_pi.exp().mul(log_pi.clamp(min=f_min)).sum(dim=-1).mean()
# maximize the entropy and the reinforce score, minimize the critic loss
objective = C_entropy * negentropy + C_value * critic_mse - vtrace_score
return objective.mean(), dict(
entropy=-float(negentropy),
policy_score=float(vtrace_score),
value_loss=float(critic_mse),
)
```
<br>
### Run!
Initialize the learner and the environment factories
```
from functools import partial
factory_eval = partial(base_factory)
factory = partial(base_factory)
learner = CartPoleActor(lstm='none')
learner.train()
device_ = torch.device('cpu') # torch.device('cuda:0')
learner.to(device=device_)
optim = torch.optim.Adam(learner.parameters(), lr=1e-3)
```
Initialize the sampler
```
T, B = 25, 4
sticky = learner.use_cudnn
```
```
from rlplay.engine.rollout import multi
batchit = multi.rollout(
factory,
learner,
n_steps=T,
n_actors=6,
n_per_actor=B,
n_buffers=15,
n_per_batch=2,
sticky=sticky,
pinned=False,
clone=True,
close=False,
device=device_,
start_method='fork', # fork in notebook for macos, spawn in linux
)
```
A generator of evaluation rewards
```
from rlplay.engine.rollout.evaluate import evaluate
test_it = evaluate(factory_eval, learner, n_envs=4, n_steps=500,
clone=False, device=device_, start_method='fork')
```
Implement your favourite training method
```
n_epochs = 100
use_vtrace = True
# gamma, gae, ppo = 0.99, 0.92, 0.2
gamma, gae, ppo, multistep = 0.99, 1., 0.2, 0
import tqdm
from torch.nn.utils import clip_grad_norm_
torch.set_num_threads(1)
losses, rewards = [], []
for epoch in tqdm.tqdm(range(n_epochs)):
for j, batch in zip(range(100), batchit):
if use_vtrace:
loss, info = vtrace(batch, learner, gamma=gamma)
else:
loss, info = a2c(batch, learner, gamma=gamma, gae=gae, ppo=ppo, multistep=multistep)
optim.zero_grad()
loss.backward()
grad = clip_grad_norm_(learner.parameters(), max_norm=1.0)
optim.step()
losses.append(dict(
loss=float(loss), grad=float(grad), **info
))
# fetch the evaluation results lagged by one inner loop!
rewards.append(next(test_it))
# close the generators
batchit.close()
test_it.close()
```
<br>
```
def collate(records):
"""collate identically keyed dicts"""
out, n_records = {}, 0
for record in records:
for k, v in record.items():
out.setdefault(k, []).append(v)
return out
data = {k: numpy.array(v) for k, v in collate(losses).items()}
if 'value_loss' in data:
plt.semilogy(data['value_loss'])
if 'entropy' in data:
plt.plot(data['entropy'])
if 'policy_score' in data:
plt.plot(data['policy_score'])
plt.semilogy(data['grad'])
rewards = numpy.stack(rewards, axis=0)
rewards
m, s = numpy.median(rewards, axis=-1), rewards.std(axis=-1)
fi, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.plot(numpy.mean(rewards, axis=-1))
ax.plot(numpy.median(rewards, axis=-1))
ax.plot(numpy.min(rewards, axis=-1))
ax.plot(numpy.std(rewards, axis=-1))
# ax.plot(m+s * 1.96)
# ax.plot(m-s * 1.96)
plt.show()
```
<br>
The ultimate evaluation run
```
from rlplay.engine import core
with factory_eval() as env:
learner.eval()
eval_rewards, info = core.evaluate([
env
], learner, render=True, n_steps=1e4, device=device_)
print(sum(eval_rewards))
```
<br>
Let's analyze the performance
```
from rlplay.algo.returns import npy_returns, npy_deltas
td_target = eval_rewards + gamma * info['value'][1:]
td_error = td_target - info['value'][:-1]
# td_error = npy_deltas(
# eval_rewards, numpy.zeros_like(eval_rewards, dtype=bool), info['value'][:-1],
# gamma=gamma, bootstrap=info['value'][-1])
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.semilogy(abs(td_error) / abs(td_target))
ax.set_title('relative td(1)-error');
from rlplay.algo.returns import npy_returns, npy_deltas
# plt.plot(
# npy_returns(eval_rewards, numpy.zeros_like(eval_rewards, dtype=bool),
# gamma=gamma, bootstrap=info['value'][-1]))
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.plot(info['value'])
ax.axhline(1 / (1 - gamma), c='k', alpha=0.5, lw=1);
import math
from scipy.special import softmax, expit, entr
*head, n_actions = info['logits'].shape
proba = softmax(info['logits'], axis=-1)
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.plot(entr(proba).sum(-1)[:, 0])
ax.axhline(math.log(n_actions), c='k', alpha=0.5, lw=1);
fig, ax = plt.subplots(1, 1, figsize=(4, 2), dpi=300)
ax.hist(info['logits'][..., 1] - info['logits'][..., 0], bins=51); # log-ratio
```
<br>
```
assert False
```
<br>
### Other agents
An agent that uses other inputs, beside `obs`.
```
class CartPoleActor(BaseActorModule):
def __init__(self, epsilon=0.1, lstm='none'):
assert lstm in ('none', 'loop', 'cudnn')
super().__init__()
self.use_lstm = (lstm != 'none')
self.use_cudnn = (lstm == 'cudnn')
# for updating the exploration epsilon in the actor clones
self.register_buffer('epsilon', torch.tensor(epsilon))
# the features
n_output_dim = dict(obs=64, act=8, stepno=0)
self.features = torch.nn.Sequential(
ModuleDict(dict(
obs=Linear(4, n_output_dim['obs']),
act=Embedding(2, n_output_dim['act']),
stepno=Sequential(
OneHotBits(32),
Linear(32, n_output_dim['stepno']),
),
)),
ReLU(),
)
# the core
n_features = sum(n_output_dim.values())
if self.use_lstm:
self.core = LSTM(n_features, 64, 1)
else:
self.core = Sequential(
Linear(n_features, 64, bias=True),
ReLU(),
)
# the rest of the actor's model
self.baseline = Linear(64, 1)
self.policy = Sequential(
Linear(64, 2),
LogSoftmax(dim=-1),
)
def forward(self, obs, act, rew, fin, *, hx=None, stepno=None, virtual=False):
# Everything is [T x B x ...]
input = self.features(locals())
# `input` is T x B x F, `hx` is either `None`, or a proper recurrent state
n_steps, n_envs, *_ = fin.shape
if not self.use_lstm:
# update `hx` into an empty container
out, hx = self.core(input), ()
elif not self.use_cudnn:
outputs = []
for x, m in zip(input.unsqueeze(1), ~fin.unsqueeze(-1)):
# `m` indicates if NO reset took place, otherwise
# multiply by zero to stop the grads
if hx is not None:
hx = suply(m.mul, hx)
# one LSTM step [1 x B x ...]
output, hx = self.core(x, hx)
outputs.append(output)
# compile the output
out = torch.cat(outputs, dim=0)
else:
# sequence padding (MUST have sampling with `sticky=True`)
if n_steps > 1:
lengths = 1 + (~fin[1:]).sum(0).cpu()
input = pack_padded_sequence(input, lengths, enforce_sorted=False)
out, hx = self.core(input, hx)
if n_steps > 1:
out, lens = pad_packed_sequence(
out, batch_first=False, total_length=n_steps)
# apply relu after the core and get the policy
logits = self.policy(out)
# value must not have any trailing dims, i.e. T x B
value = self.baseline(out).squeeze(-1)
if not self.training:
actions = logits.argmax(dim=-1)
else:
# blend the policy with a uniform distribution
prob = logits.detach().exp().mul_(1 - self.epsilon)
prob.add_(self.epsilon / logits.shape[-1])
actions = multinomial(prob)
return actions, hx, dict(value=value, logits=logits)
```
A non-recurrent actor with features shared between the policy and the baseline.
```
class CartPoleActor(BaseActorModule):
def __init__(self, epsilon=0.1, lstm='none'):
assert lstm in ('none', 'loop', 'cudnn')
super().__init__()
self.use_lstm = self.use_cudnn = False
# for updating the exploration epsilon in the actor clones
self.register_buffer('epsilon', torch.tensor(epsilon))
# the features
self.features = Sequential(
Linear(4, 20),
ReLU(),
)
self.baseline = Linear(20, 1)
self.policy = Sequential(
Linear(20, 2),
LogSoftmax(dim=-1),
)
def forward(self, obs, act, rew, fin, *, hx=None, stepno=None, virtual=False):
x = self.features(obs)
# value must not have any trailing dims, i.e. T x B
logits = self.policy(x)
value = self.baseline(x).squeeze(-1)
if not self.training:
actions = logits.argmax(dim=-1)
else:
# blend the policy with a uniform distribution
prob = logits.detach().exp().mul_(1 - self.epsilon)
prob.add_(self.epsilon / logits.shape[-1])
actions = multinomial(prob)
return actions, (), dict(value=value, logits=logits)
```
<br>
```
# stepno = batch.state.stepno
stepno = torch.arange(256)
with torch.no_grad():
out = learner.features[0]['stepno'](stepno)
out = F.linear(F.relu(out), learner.core[1].weight[:, -8:],
bias=learner.core[1].bias)
# out = F.linear(F.relu(out), learner.core.weight_ih_l0[:, -8:],
# bias=learner.core.bias_ih_l0)
# out = F.relu(out)
fig, axes = plt.subplots(3, 3, figsize=(8, 8), dpi=200,
sharex=True, sharey=True)
for j, ax in zip(range(out.shape[1]), axes.flat):
ax.plot(out[:, j], lw=1)
fig.tight_layout(pad=0, h_pad=0, w_pad=0)
with torch.no_grad():
plt.imshow(abs(learner.core[1].weight[:, -8:]).T)
lin = learner.features.stepno[1]
with torch.no_grad():
plt.imshow(abs(lin.weight))
```
| true |
code
| 0.708755 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/AI4Finance-Foundation/FinRL/blob/master/FinRL_Raytune_for_Hyperparameter_Optimization_RLlib%20Models.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
#Installing FinRL
%%capture
!pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git
%%capture
!pip install "ray[tune]" optuna
%%capture
!pip install int_date==0.1.8
```
#Importing libraries
```
#Importing the libraries
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# matplotlib.use('Agg')
import datetime
import optuna
%matplotlib inline
from finrl import config
from finrl.finrl_meta.preprocessor.yahoodownloader import YahooDownloader
from finrl.finrl_meta.preprocessor.preprocessors import FeatureEngineer, data_split
from finrl.finrl_meta.env_stock_trading.env_stocktrading_np import StockTradingEnv as StockTradingEnv_numpy
from finrl.agents.rllib.models import DRLAgent as DRLAgent_rllib
from stable_baselines3.common.vec_env import DummyVecEnv
from finrl.finrl_meta.data_processor import DataProcessor
from finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline
import ray
from pprint import pprint
from ray.rllib.agents.ppo import PPOTrainer
from ray.rllib.agents.ddpg import DDPGTrainer
from ray.rllib.agents.a3c import A2CTrainer
from ray.rllib.agents.a3c import a2c
from ray.rllib.agents.ddpg import ddpg, td3
from ray.rllib.agents.ppo import ppo
from ray.rllib.agents.sac import sac
import sys
sys.path.append("../FinRL-Library")
import os
import itertools
from ray import tune
from ray.tune.suggest import ConcurrencyLimiter
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.optuna import OptunaSearch
from ray.tune.registry import register_env
import time
import psutil
psutil_memory_in_bytes = psutil.virtual_memory().total
ray._private.utils.get_system_memory = lambda: psutil_memory_in_bytes
from typing import Dict, Optional, Any
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
# if not os.path.exists("./" + "tuned_models"):
# os.makedirs("./" + "tuned_models")
```
##Defining the hyperparameter search space
1. You can look up [here](https://docs.ray.io/en/latest/tune/key-concepts.html#search-spaces) to learn how to define hyperparameter search space
2. Jump over to this [link](https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/utils/hyperparams_opt.py) to find the range of different hyperparameter
3. To learn about different hyperparameters for different algorithms for RLlib models, jump over to this [link](https://docs.ray.io/en/latest/rllib-algorithms.html)
```
def sample_ddpg_params():
return {
"buffer_size": tune.choice([int(1e4), int(1e5), int(1e6)]),
"lr": tune.loguniform(1e-5, 1),
"train_batch_size": tune.choice([32, 64, 128, 256, 512])
}
def sample_a2c_params():
return{
"lambda": tune.choice([0.1,0.3,0.5,0.7,0.9,1.0]),
"entropy_coeff": tune.loguniform(0.00000001, 0.1),
"lr": tune.loguniform(1e-5, 1)
}
def sample_ppo_params():
return {
"entropy_coeff": tune.loguniform(0.00000001, 0.1),
"lr": tune.loguniform(5e-5, 1),
"sgd_minibatch_size": tune.choice([ 32, 64, 128, 256, 512]),
"lambda": tune.choice([0.1,0.3,0.5,0.7,0.9,1.0])
}
MODELS = {"a2c": a2c, "ddpg": ddpg, "td3": td3, "sac": sac, "ppo": ppo}
```
## Getting the training and testing environment
```
def get_train_env(start_date, end_date, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name, if_vix = True,
**kwargs):
#fetch data
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)
train_env_config = {'price_array':price_array,
'tech_array':tech_array,
'turbulence_array':turbulence_array,
'if_train':True}
return train_env_config
#Function to calculate the sharpe ratio from the list of total_episode_reward
def calculate_sharpe(episode_reward:list):
perf_data = pd.DataFrame(data=episode_reward,columns=['reward'])
perf_data['daily_return'] = perf_data['reward'].pct_change(1)
if perf_data['daily_return'].std() !=0:
sharpe = (252**0.5)*perf_data['daily_return'].mean()/ \
perf_data['daily_return'].std()
return sharpe
else:
return 0
def get_test_config(start_date, end_date, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name, if_vix = True,
**kwargs):
DP = DataProcessor(data_source, **kwargs)
data = DP.download_data(ticker_list, start_date, end_date, time_interval)
data = DP.clean_data(data)
data = DP.add_technical_indicator(data, technical_indicator_list)
if if_vix:
data = DP.add_vix(data)
price_array, tech_array, turbulence_array = DP.df_to_array(data, if_vix)
test_env_config = {'price_array':price_array,
'tech_array':tech_array,
'turbulence_array':turbulence_array,'if_train':False}
return test_env_config
def val_or_test(test_env_config,agent_path,model_name,env):
episode_total_reward = DRL_prediction(model_name,test_env_config,
env = env,
agent_path=agent_path)
return calculate_sharpe(episode_total_reward),episode_total_reward
TRAIN_START_DATE = '2014-01-01'
TRAIN_END_DATE = '2019-07-30'
VAL_START_DATE = '2019-08-01'
VAL_END_DATE = '2020-07-30'
TEST_START_DATE = '2020-08-01'
TEST_END_DATE = '2021-10-01'
technical_indicator_list =config.INDICATORS
model_name = 'a2c'
env = StockTradingEnv_numpy
ticker_list = ['TSLA']
data_source = 'yahoofinance'
time_interval = '1D'
train_env_config = get_train_env(TRAIN_START_DATE, VAL_END_DATE,
ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name)
```
## Registering the environment
```
from ray.tune.registry import register_env
env_name = 'StockTrading_train_env'
register_env(env_name, lambda config: env(train_env_config))
```
## Running tune
```
MODEL_TRAINER = {'a2c':A2CTrainer,'ppo':PPOTrainer,'ddpg':DDPGTrainer}
if model_name == "ddpg":
sample_hyperparameters = sample_ddpg_params()
elif model_name == "ppo":
sample_hyperparameters = sample_ppo_params()
elif model_name == "a2c":
sample_hyperparameters = sample_a2c_params()
def run_optuna_tune():
algo = OptunaSearch()
algo = ConcurrencyLimiter(algo,max_concurrent=4)
scheduler = AsyncHyperBandScheduler()
num_samples = 10
training_iterations = 100
analysis = tune.run(
MODEL_TRAINER[model_name],
metric="episode_reward_mean", #The metric to optimize for tuning
mode="max", #Maximize the metric
search_alg = algo,#OptunaSearch method which uses Tree Parzen estimator to sample hyperparameters
scheduler=scheduler, #To prune bad trials
config = {**sample_hyperparameters,
'env':'StockTrading_train_env','num_workers':1,
'num_gpus':1,'framework':'torch'},
num_samples = num_samples, #Number of hyperparameters to test out
stop = {'training_iteration':training_iterations},#Time attribute to validate the results
verbose=1,local_dir="./tuned_models",#Saving tensorboard plots
# resources_per_trial={'gpu':1,'cpu':1},
max_failures = 1,#Extra Trying for the failed trials
raise_on_failed_trial=False,#Don't return error even if you have errored trials
keep_checkpoints_num = num_samples-5,
checkpoint_score_attr ='episode_reward_mean',#Only store keep_checkpoints_num trials based on this score
checkpoint_freq=training_iterations#Checpointing all the trials
)
print("Best hyperparameter: ", analysis.best_config)
return analysis
analysis = run_optuna_tune()
```
## Best config, directory and checkpoint for hyperparameters
```
best_config = analysis.get_best_config(metric='episode_reward_mean',mode='max')
best_config
best_logdir = analysis.get_best_logdir(metric='episode_reward_mean',mode='max')
best_logdir
best_checkpoint = analysis.best_checkpoint
best_checkpoint
# sharpe,df_account_test,df_action_test = val_or_test(TEST_START_DATE, TEST_END_DATE, ticker_list, data_source, time_interval,
# technical_indicator_list, env, model_name,best_checkpoint, if_vix = True)
test_env_config = get_test_config(TEST_START_DATE, TEST_END_DATE, ticker_list, data_source, time_interval,
technical_indicator_list, env, model_name)
sharpe,account,actions = val_or_test(test_env_config,agent_path,model_name,env)
def DRL_prediction(
model_name,
test_env_config,
env,
model_config,
agent_path,
env_name_test='StockTrading_test_env'
):
env_instance = env(test_env_config)
register_env(env_name_test, lambda config: env(test_env_config))
model_config['env'] = env_name_test
# ray.init() # Other Ray APIs will not work until `ray.init()` is called.
if model_name == "ppo":
trainer = MODELS[model_name].PPOTrainer(config=model_config)
elif model_name == "a2c":
trainer = MODELS[model_name].A2CTrainer(config=model_config)
elif model_name == "ddpg":
trainer = MODELS[model_name].DDPGTrainer(config=model_config)
elif model_name == "td3":
trainer = MODELS[model_name].TD3Trainer(config=model_config)
elif model_name == "sac":
trainer = MODELS[model_name].SACTrainer(config=model_config)
try:
trainer.restore(agent_path)
print("Restoring from checkpoint path", agent_path)
except BaseException:
raise ValueError("Fail to load agent!")
# test on the testing env
state = env_instance.reset()
episode_returns = list() # the cumulative_return / initial_account
episode_total_assets = list()
episode_total_assets.append(env_instance.initial_total_asset)
done = False
while not done:
action = trainer.compute_single_action(state)
state, reward, done, _ = env_instance.step(action)
total_asset = (
env_instance.amount
+ (env_instance.price_ary[env_instance.day] * env_instance.stocks).sum()
)
episode_total_assets.append(total_asset)
episode_return = total_asset / env_instance.initial_total_asset
episode_returns.append(episode_return)
ray.shutdown()
print("episode return: " + str(episode_return))
print("Test Finished!")
return episode_total_assets
episode_total_assets = DRL_prediction(
model_name,
test_env_config,
env,
best_config,
best_checkpoint,
env_name_test='StockTrading_test_env')
print('The test sharpe ratio is: ',calculate_sharpe(episode_total_assets))
df_account_test = pd.DataFrame(data=episode_total_assets,columns=['account_value'])
```
| true |
code
| 0.579281 | null | null | null | null |
|
# Matrix Factorization for Recommender Systems - Part 2
As seen in [Part 1](/examples/matrix-factorization-for-recommender-systems-part-1), strength of [Matrix Factorization (MF)](https://en.wikipedia.org/wiki/Matrix_factorization_(recommender_systems)) lies in its ability to deal with sparse and high cardinality categorical variables. In this second tutorial we will have a look at Factorization Machines (FM) algorithm and study how it generalizes the power of MF.
**Table of contents of this tutorial series on matrix factorization for recommender systems:**
- [Part 1 - Traditional Matrix Factorization methods for Recommender Systems](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-1)
- [Part 2 - Factorization Machines and Field-aware Factorization Machines](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-2)
- [Part 3 - Large scale learning and better predictive power with multiple pass learning](https://online-ml.github.io/examples/matrix-factorization-for-recommender-systems-part-3)
## Factorization Machines
Steffen Rendel came up in 2010 with [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf), an algorithm able to handle any real valued feature vector, combining the advantages of general predictors with factorization models. It became quite popular in the field of online advertising, notably after winning several Kaggle competitions. The modeling technique starts with a linear regression to capture the effects of each variable individually:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j}
$$
Then are added interaction terms to learn features relations. Instead of learning a single and specific weight per interaction (as in [polynomial regression](https://en.wikipedia.org/wiki/Polynomial_regression)), a set of latent factors is learnt per feature (as in MF). An interaction is calculated by multiplying involved features product with their latent vectors dot product. The degree of factorization — or model order — represents the maximum number of features per interaction considered. The model equation for a factorization machine of degree $d$ = 2 is defined as:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{j=1}^{p} \sum_{j'=j+1}^{p} \langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle x_{j} x_{j'}
$$
Where $\normalsize \langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle$ is the dot product of $j$ and $j'$ latent vectors:
$$
\normalsize
\langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle = \sum_{f=1}^{k} \mathbf{v}_{j, f} \cdot \mathbf{v}_{j', f}
$$
Higher-order FM will be covered in a following section, just note that factorization models express their power in sparse settings, which is also where higher-order interactions are hard to estimate.
Strong emphasis must be placed on feature engineering as it allows FM to mimic most factorization models and significantly impact its performance. High cardinality categorical variables one hot encoding is the most frequent step before feeding the model with data. For more efficiency, `river` FM implementation considers string values as categorical variables and automatically one hot encode them. FM models have their own module [river.facto](/api/overview/#facto).
## Mimic Biased Matrix Factorization (BiasedMF)
Let's start with a simple example where we want to reproduce the Biased Matrix Factorization model we trained in the previous tutorial. For a fair comparison with [Part 1 example](/examples/matrix-factorization-for-recommender-systems-part-1/#biased-matrix-factorization-biasedmf), let's set the same evaluation framework:
```
from river import datasets
from river import metrics
from river.evaluate import progressive_val_score
def evaluate(model):
X_y = datasets.MovieLens100K()
metric = metrics.MAE() + metrics.RMSE()
_ = progressive_val_score(X_y, model, metric, print_every=25_000, show_time=True, show_memory=True)
```
In order to build an equivalent model we need to use the same hyper-parameters. As we can't replace FM intercept by the global running mean we won't be able to build the exact same model:
```
from river import compose
from river import facto
from river import meta
from river import optim
from river import stats
fm_params = {
'n_factors': 10,
'weight_optimizer': optim.SGD(0.025),
'latent_optimizer': optim.SGD(0.05),
'sample_normalization': False,
'l1_weight': 0.,
'l2_weight': 0.,
'l1_latent': 0.,
'l2_latent': 0.,
'intercept': 3,
'intercept_lr': .01,
'weight_initializer': optim.initializers.Zeros(),
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.1, seed=73),
}
regressor = compose.Select('user', 'item')
regressor |= facto.FMRegressor(**fm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
Both MAE are very close to each other (0.7486 vs 0.7485) showing that we almost reproduced [reco.BiasedMF](/api/reco/BiasedMF/) algorithm. The cost is a naturally slower running time as FM implementation offers more flexibility.
## Feature engineering for FM models
Let's study the basics of how to properly encode data for FM models. We are going to keep using MovieLens 100K as it provides various feature types:
```
import json
for x, y in datasets.MovieLens100K():
print(f'x = {json.dumps(x, indent=4)}\ny = {y}')
break
```
The features we are going to add to our model don't improve its predictive power. Nevertheless, they are useful to illustrate different methods of data encoding:
1. Set-categorical variables
We have seen that categorical variables are one hot encoded automatically if set to strings, in the other hand, set-categorical variables must be encoded explicitly by the user. A good way of doing so is to assign them a value of $1/m$, where $m$ is the number of elements of the sample set. It gives the feature a constant "weight" across all samples preserving model's stability. Let's create a routine to encode movies genres this way:
```
def split_genres(x):
genres = x['genres'].split(', ')
return {f'genre_{genre}': 1 / len(genres) for genre in genres}
```
2. Numerical variables
In practice, transforming numerical features into categorical ones works better in most cases. Feature binning is the natural way, but finding good bins is sometimes more an art than a science. Let's encode users age with something simple:
```
def bin_age(x):
if x['age'] <= 18:
return {'age_0-18': 1}
elif x['age'] <= 32:
return {'age_19-32': 1}
elif x['age'] < 55:
return {'age_33-54': 1}
else:
return {'age_55-100': 1}
```
Let's put everything together:
```
fm_params = {
'n_factors': 14,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.FMRegressor(**fm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
Note that using more variables involves factorizing a larger latent space, then increasing the number of latent factors $k$ often helps capturing more information.
Some other feature engineering tips from [3 idiots' winning solution](https://www.kaggle.com/c/criteo-display-ad-challenge/discussion/10555) for Kaggle [Criteo display ads](https://www.kaggle.com/c/criteo-display-ad-challenge) competition in 2014:
- Infrequent modalities often bring noise and little information, transforming them into a special tag can help
- In some cases, sample-wise normalization seems to make the optimization problem easier to be solved
## Higher-Order Factorization Machines (HOFM)
The model equation generalized to any order $d \geq 2$ is defined as:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{l=2}^{d} \sum_{j_1=1}^{p} \cdots \sum_{j_l=j_{l-1}+1}^{p} \left(\prod_{j'=1}^{l} x_{j_{j'}} \right) \left(\sum_{f=1}^{k_l} \prod_{j'=1}^{l} v_{j_{j'}, f}^{(l)} \right)
$$
```
hofm_params = {
'degree': 3,
'n_factors': 12,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.HOFMRegressor(**hofm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
As said previously, high-order interactions are often hard to estimate due to too much sparsity, that's why we won't spend too much time here.
## Field-aware Factorization Machines (FFM)
[Field-aware variant of FM (FFM)](https://www.csie.ntu.edu.tw/~cjlin/papers/ffm.pdf) improved the original method by adding the notion of "*fields*". A "*field*" is a group of features that belong to a specific domain (e.g. the "*users*" field, the "*items*" field, or the "*movie genres*" field).
FFM restricts itself to pairwise interactions and factorizes separated latent spaces — one per combination of fields (e.g. users/items, users/movie genres, or items/movie genres) — instead of a common one shared by all fields. Therefore, each feature has one latent vector per field it can interact with — so that it can learn the specific effect with each different field.
The model equation is defined by:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{j=1}^{p} \sum_{j'=j+1}^{p} \langle \mathbf{v}_{j, f_{j'}}, \mathbf{v}_{j', f_{j}} \rangle x_{j} x_{j'}
$$
Where $f_j$ and $f_{j'}$ are the fields corresponding to $j$ and $j'$ features, respectively.
```
ffm_params = {
'n_factors': 8,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'latent_initializer': optim.initializers.Normal(mu=0., sigma=0.05, seed=73),
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.FFMRegressor(**ffm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
Note that FFM usually needs to learn smaller number of latent factors $k$ than FM as each latent vector only deals with one field.
## Field-weighted Factorization Machines (FwFM)
[Field-weighted Factorization Machines (FwFM)](https://arxiv.org/abs/1806.03514) address FFM memory issues caused by its large number of parameters, which is in the order of *feature number* times *field number*. As FFM, FwFM is an extension of FM restricted to pairwise interactions, but instead of factorizing separated latent spaces, it learns a specific weight $r_{f_j, f_{j'}}$ for each field combination modelling the interaction strength.
The model equation is defined as:
$$
\normalsize
\hat{y}(x) = w_{0} + \sum_{j=1}^{p} w_{j} x_{j} + \sum_{j=1}^{p} \sum_{j'=j+1}^{p} r_{f_j, f_{j'}} \langle \mathbf{v}_j, \mathbf{v}_{j'} \rangle x_{j} x_{j'}
$$
```
fwfm_params = {
'n_factors': 10,
'weight_optimizer': optim.SGD(0.01),
'latent_optimizer': optim.SGD(0.025),
'intercept': 3,
'seed': 73,
}
regressor = compose.Select('user', 'item')
regressor += (
compose.Select('genres') |
compose.FuncTransformer(split_genres)
)
regressor += (
compose.Select('age') |
compose.FuncTransformer(bin_age)
)
regressor |= facto.FwFMRegressor(**fwfm_params)
model = meta.PredClipper(
regressor=regressor,
y_min=1,
y_max=5
)
evaluate(model)
```
| true |
code
| 0.599192 | null | null | null | null |
|
```
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
%matplotlib inline
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
import numpy as np
import torch, torch.optim
import torch.nn.functional as F
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
import os, sys
sys.path.append('utils/*')
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import models as md
import utils.common_utils as cu
import utils.diffuser_utils as df
import utils.utils_hyperspectral as helper
```
# Single-shot Imaging Demo
Load in the PSF, 2D measurement and rolling shutter mask.
```
simulated = True # True: Use a simulated measurement or False: use an experimental measurement
downsampling_factor = 2
meas_np, mask_np, psf_np, gt_np = helper.load_data(simulated = simulated)
plt.figure(figsize=(20,10))
plt.subplot(1,3,1);plt.title('PSF');plt.imshow(psf_np)
plt.subplot(1,3,2);plt.title('Measurement');plt.imshow(meas_np)
plt.subplot(1,3,3);plt.title('Rolling shutter mask');plt.imshow(mask_np[:,:,20])
```
Initialize the lensless forward model
```
DIMS0 = meas_np.shape[0] # Image Dimensions
DIMS1 = meas_np.shape[1] # Image Dimensions
py = int((DIMS0)//2) # Pad size
px = int((DIMS1)//2) # Pad size
def pad(x):
if len(x.shape) == 2:
out = np.pad(x, ([py, py], [px,px]), mode = 'constant')
elif len(x.shape) == 3:
out = np.pad(x, ([py, py], [px,px], [0, 0]), mode = 'constant')
elif len(x.shape) == 4:
out = np.pad(x, ([py, py], [px,px], [0, 0], [0, 0]), mode = 'constant')
return out
#meas_np = pad(meas_np)
psf_pad = pad(psf_np)
h_full = np.fft.fft2(np.fft.ifftshift(psf_pad))
forward = df.Forward_Model_combined(h_full,
shutter = mask_np,
imaging_type = 'spectral')
if simulated == True:
meas_torch = forward(cu.np_to_torch(gt_np.transpose(2,0,1)).type(dtype).unsqueeze(0))
meas_np = cu.torch_to_np(meas_torch)[0]
plt.imshow(meas_np)
```
Set up parameters and network
```
# Define network hyperparameters:
input_depth = 32
INPUT = 'noise'
pad = 'reflection'
LR = 1e-3
tv_weight = 0
reg_noise_std = 0.05
if simulated == True:
num_iter = 100000
net_input = cu.get_noise(input_depth, INPUT, (meas_np.shape[0], meas_np.shape[1])).type(dtype).detach()
else:
num_iter = 4600
input_depth = 1
net_input = cu.get_noise(input_depth, INPUT, (mask_np.shape[-1], meas_np.shape[0], meas_np.shape[1])).type(dtype).detach()
# Initialize network input
net_input_saved = net_input.detach().clone()
noise = net_input.detach().clone()
# reinitialize netowrk and optimizer
if simulated == True:
NET_TYPE = 'skip'
net = md.get_net(input_depth, NET_TYPE, pad, n_channels=32, skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5,upsample_mode='bilinear').type(dtype)
else:
print('experimental')
NET_TYPE = 'skip3D'
input_depth = 1
net = md.get_net(input_depth, NET_TYPE, pad, n_channels=1, skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=4,upsample_mode='trilinear').type(dtype)
#NET_TYPE = 'skip'
#net = md.get_net(input_depth, NET_TYPE, pad, n_channels=32, skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5,upsample_mode='bilinear').type(dtype)
p = [x for x in net.parameters()]
optimizer = torch.optim.Adam(p, lr=LR)
# Losses
mse = torch.nn.MSELoss().type(dtype)
def main():
global recons
full_recons = []
meas_ts = cu.np_to_ts(meas_np)
meas_ts = meas_ts.detach().clone().type(dtype).cuda()
for i in range(num_iter):
optimizer.zero_grad()
net_input = net_input_saved + (noise.normal_() * reg_noise_std)
recons = net(net_input)
gen_meas = forward.forward(recons)
gen_meas = F.normalize(gen_meas, dim=[1,2], p=2)
loss = mse(gen_meas, meas_ts)
loss += tv_weight * df.tv_loss(recons)
loss.backward()
print('Iteration %05d, loss %.8f '%(i, loss.item()), '\r', end='')
if i % 100 == 0:
helper.plot(recons)
print('Iteration {}, loss {:.8f}'.format(i, loss.item()))
optimizer.step()
full_recons = helper.preplot(recons)
return full_recons
```
### Run the reconstruction
```
full_recons = main()
full_recons = helper.preplot2(recons)
```
Reconstructed video
```
def plot_slider(x):
plt.title('Reconstruction: frame %d'%(x))
plt.axis('off')
plt.imshow(full_recons[...,x])
return x
interactive(plot_slider,x=(0,full_recons.shape[-1]-1,1))
```
| true |
code
| 0.677367 | null | null | null | null |
|
```
from sklearn.datasets import load_wine
wine_data = load_wine()
dir(wine_data)
print(wine_data.DESCR)
inputs = wine_data.data
output = wine_data.target
inputs.shape
output.shape
wine_data.feature_names
import pandas as pd
df = pd.DataFrame(inputs, columns=wine_data.feature_names)
df = pd.concat([df, pd.DataFrame(output)], axis=1)
df
df.describe()
df.describe().style.format("{:.5f}")
import matplotlib.pyplot as plt
plt.matshow(df.corr())
plt.xticks(range(len(df.columns)), df.columns)
plt.yticks(range(len(df.columns)), df.columns)
plt.colorbar()
plt.show()
```
Chapter Break
```
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(inputs, output, test_size=0.33, random_state=42)
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(LinearRegression())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
from sklearn.linear_model import Ridge
# tactic 1: minimize weights, smaller the better, higher penalty on large weights
# = ridge regression
pipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), Ridge())
pipe.fit(X_train, y_train)
pipe.steps[2][1].coef_
pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
from sklearn.linear_model import Lasso
# tactic 2: minimize number of non-zero weights
# = Lasso
pipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), Lasso())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
pipe.steps[2][1].coef_
pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()
from sklearn.linear_model import ElasticNet
# tactic 3: mix lasso and ridge!
# = elasticnet
pipe = make_pipeline(StandardScaler(), PolynomialFeatures(degree=3), ElasticNet())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train)
pipe.score(X_test, y_test)
pipe.steps[2][1].coef_
pipe.steps[2][1].coef_.max(), pipe.steps[2][1].coef_.min(), pipe.steps[2][1].coef_.std()
```
----
# Implementing a model to classify wines
```
from sklearn.datasets import load_wine
wine_data = load_wine()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
wine_data.data, wine_data.target, test_size=0.5, random_state=42)
import numpy as np
import pandas as pd
df_x_train = pd.DataFrame(X_train, columns=wine_data.feature_names)
df_x_train.describe()
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn import linear_model
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
pipe = make_pipeline(GaussianNB())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train), pipe.score(X_test, y_test)
confusion_matrix(y_test, pipe.predict(X_test))
pipe = make_pipeline(svm.SVC())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train), pipe.score(X_test, y_test)
confusion_matrix(y_test, pipe.predict(X_test))
pipe = make_pipeline(linear_model.LogisticRegression())
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train), pipe.score(X_test, y_test)
confusion_matrix(y_test, pipe.predict(X_test))
```
| true |
code
| 0.701151 | null | null | null | null |
|
**[Deep Learning Course Home Page](https://www.kaggle.com/learn/deep-learning)**
---
# Exercise Introduction
We will return to the automatic rotation problem you worked on in the previous exercise. But we'll add data augmentation to improve your model.
The model specification and compilation steps don't change when you start using data augmentation. The code you've already worked with for specifying and compiling a model is in the cell below. Run it so you'll be ready to work on data augmentation.
```
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D
num_classes = 2
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
my_new_model = Sequential()
my_new_model.add(ResNet50(include_top=False, pooling='avg', weights=resnet_weights_path))
my_new_model.add(Dense(num_classes, activation='softmax'))
my_new_model.layers[0].trainable = False
my_new_model.compile(optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.deep_learning.exercise_5 import *
print("Setup Complete")
```
# 1) Fit the Model Using Data Augmentation
Here is some code to set up some ImageDataGenerators. Run it, and then answer the questions below about it.
```
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
image_size = 224
# Specify the values for all arguments to data_generator_with_aug.
data_generator_with_aug = ImageDataGenerator(preprocessing_function=preprocess_input,
horizontal_flip = True,
width_shift_range = 0.1,
height_shift_range = 0.1)
data_generator_no_aug = ImageDataGenerator(preprocessing_function=preprocess_input)
```
Why do we need both a generator with augmentation and a generator without augmentation? After thinking about it, check out the solution below.
```
q_1.solution()
```
# 2) Choosing Augmentation Types
ImageDataGenerator offers many types of data augmentation. For example, one argument is `rotation_range`. This rotates each image by a random amount that can be up to whatever value you specify.
Would it be sensible to use automatic rotation for this problem? Why or why not?
```
q_2.solution()
```
# 3) Code
Fill in the missing pieces in the following code. We've supplied some boilerplate. You need to think about what ImageDataGenerator is used for each data source.
```
# Specify which type of ImageDataGenerator above is to load in training data
train_generator = data_generator_with_aug.flow_from_directory(
directory = '../input/dogs-gone-sideways/images/train',
target_size=(image_size, image_size),
batch_size=12,
class_mode='categorical')
# Specify which type of ImageDataGenerator above is to load in validation data
validation_generator = data_generator_no_aug.flow_from_directory(
directory = '../input/dogs-gone-sideways/images/val',
target_size=(image_size, image_size),
class_mode='categorical')
my_new_model.fit_generator(
train_generator, # if you don't know what argument goes first, try the hint
epochs = 3,
steps_per_epoch=19,
validation_data=validation_generator)
q_3.check()
# q_3.hint()
# q_3.solution()
```
# 4) Did Data Augmentation Help?
How could you test whether data augmentation improved your model accuracy?
```
q_4.solution()
```
# Keep Going
You are ready for **[a deeper understanding of deep learning](https://www.kaggle.com/dansbecker/a-deeper-understanding-of-deep-learning/)**.
---
**[Deep Learning Course Home Page](https://www.kaggle.com/learn/deep-learning)**
| true |
code
| 0.834895 | null | null | null | null |
|
# Interactive Plotting with Jupyter
There are several ways to interactively plot. In this tutorial I will show how to interact with 2D and 1D data. There are other ways to interact with large tables of data using either [Bokeh](https://docs.bokeh.org/en/latest/index.html) (shown the Skyfit notebook) or [Glue](http://docs.glueviz.org/en/stable). A non-python based solution that also works with large tables of data is Topcat.
Most of the methods here will work on the command line. In order to make this work within Jupyter you will need the following modules.
```
conda install -c conda-forge ipympl
conda install -c conda-forge ipywidgets
```
https://ipywidgets.readthedocs.io/
```
import sys
import astropy
import astroquery
import ipywidgets
import matplotlib
print('\n Python version: ', sys.version)
print('\n Astropy version: ', astropy.__version__)
print('\n Matplotlib version: ', matplotlib.__version__)
print('\n Astroquery version: ', astroquery.__version__)
print('\n ipywidgets version: ', ipywidgets.__version__)
import glob,os,sys
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pyfits
import astropy.units as u
from astroquery.skyview import SkyView
import ipywidgets as widgets
```
Here we need an image to play with, we can either download it via SkyView or load one from our machine.
```
ext = 0
# download an image
pflist = SkyView.get_images(position='M82', survey=['SDSSr'], radius=10 * u.arcmin)
pf = pflist[0] # first element of the list, might need a loop if multiple images
# or load an image
#pf = pyfits.open('m82.fits')
image = pf[ext].data
```
Next we need to turn on the interactive plotting.
```
# turn-on interactive plots
%matplotlib widget
```
# Display an image (2D data)
We plot a 2D image using imshow, we can set the scale of the image as well as the colormap.
```
#plt.ioff()
fig = plt.figure(figsize=[6,6])
plt.ion()
p = fig.add_subplot(111)
p.imshow(image, interpolation='Nearest', origin='lower', vmin=-10, vmax=20, cmap='viridis')
plt.show()
```
# Add an event to the display
There are several types of matplotlib events that you can use to interact with a figure.
A few useful events are the following:
`button_press_event`
`button_release_event`
`key_press_event`
`key_release_event`
For more information on event handling and examples check out the following website:
https://matplotlib.org/stable/users/event_handling.html
Here we add a python function linking to link to the `key_press_event`. The function checks for the which key being pressed and if the condition is met runs its code, in this case plotting a red point on the image. We can easily add more keys adding more functionaly to our interactive figure.
```
#plt.ioff()
fig = plt.figure(figsize=[6,6])
plt.ion()
p = fig.add_subplot(111)
p.imshow(image, interpolation='Nearest', origin='lower', vmin=-10, vmax=20, cmap='viridis')
def on_key_press(event):
xc, yc = event.xdata, event.ydata
if event.key == 'm':
p.plot(xc,yc,'ro', markersize=5)
fig.canvas.draw_idle
fig.canvas.mpl_connect('key_press_event', on_key_press)
plt.show()
```
# Add output to the display with the event
If we want to display the coordinate of the points we mark, we need to use the Output widget.
```
#plt.ioff()
fig = plt.figure(figsize=[6,6])
plt.ion()
p = fig.add_subplot(111)
p.imshow(image, interpolation='Nearest', origin='lower', vmin=-10, vmax=20, cmap='viridis')
out = widgets.Output()
@out.capture()
def on_key_press(event):
xc, yc = event.xdata, event.ydata
if event.key == 'm':
p.plot(xc,yc,'ro', markersize=5)
fig.canvas.draw_idle
print("[%.1f, %.1f] = %.4f" % (xc, yc, image[int(yc),int(xc)]))
fig.canvas.mpl_connect('key_press_event', on_key_press)
display(out)
```
We can also write a Python class, this makes it more convient for dealing with multiple interactive events (i.e. keypress, mouse clicking, dragging, etc).
```
class GUI_inter:
def __init__(self,fig,img):
self.fig = fig
self.p = self.fig.gca()
self.img = img
self.display()
def display(self,sigma=20.0):
plt.clf()
self.v0 = np.mean(self.img) - sigma * np.std(self.img)
self.v1 = np.mean(self.img) + sigma * np.std(self.img)
self.p = self.fig.add_subplot(111)
self.p.imshow(self.img, interpolation='Nearest', origin='lower',
vmin=self.v0, vmax=self.v1, cmap='viridis')
plt.draw()
def on_key_press(self, event):
xc, yc = event.xdata, event.ydata
if event.key == 'm':
self.p.plot(xc,yc,'ro', markersize=5)
fig.canvas.draw_idle
print("[%.2f, %.2f]" % (xc,yc))
fig = plt.figure(figsize=[6,6])
G = GUI_inter(fig, image)
fig.canvas.mpl_connect('key_press_event', G.on_key_press)
#display(fig)
```
# Interactive 1D data
```
slice = image[150,:]
fig = plt.figure(figsize=[6,6])
p = fig.add_subplot(111)
p.plot(slice)
plt.show()
zl,xl = image.shape
fig = plt.figure(figsize=[6,6])
p = fig.add_subplot(111)
#p.set_yscale('log')
slice = image[150,:]
line, = p.plot(slice)
def update(change):
line.set_ydata(image[change.new,:])
fig.canvas.draw()
int_slider = widgets.IntSlider(
value=150,
min=0,
max=zl,
step=1,
description='Z-axis:',
continuous_update=False
)
int_slider.observe(update, 'value')
int_slider
from astroquery.sdss import SDSS
from astropy import coordinates
ra, dec = 148.969687, 69.679383
co = coordinates.SkyCoord(ra=ra, dec=dec,unit=(u.deg, u.deg), frame='fk5')
xid = SDSS.query_region(co, radius=20 * u.arcmin, spectro=True)
sp = SDSS.get_spectra(matches=xid)
print("N =",len(sp))
pf = sp[0]
ext = 1
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.plot(wave,spec)
ax.set_xlabel('Wavelength [Angstroms]')
ax.set_ylabel('Flux')
ext = 1
n_max = len(sp)-1 # total number of spectra - 1
pf = sp[0]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
line, = ax.plot(wave,spec)
ax.set_xlabel('Wavelength [Angstroms]')
ax.set_ylabel('Flux')
def new_spec(change):
pf = sp[change.new]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
line.set_xdata(wave)
line.set_ydata(spec)
fig.canvas.draw()
int_slider = widgets.IntSlider(
value=0,
min=0,
max=n_max,
step=1,
description='Spectrum:',
continuous_update=False
)
int_slider.observe(new_spec, 'value')
int_slider
ext = 1
n_max = len(sp)-1 # total number of spectra - 1
pf = sp[0]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
line, = ax.plot(wave,spec)
ax.set_xlabel('Wavelength [Angstroms]')
ax.set_ylabel('Flux')
line2, = ax.plot([6563,6563],[0,20],"--",c="r")
line2.set_visible(False)
def new_spec(change):
pf = sp[change.new]
pf[ext].header
tab = pf[ext].data
spec = tab['flux']
wave = 10**tab['loglam']
line.set_xdata(wave)
line.set_ydata(spec)
fig.canvas.draw()
def display_lines(change):
if change.new: line2.set_visible(True)
else: line2.set_visible(False)
fig.canvas.draw()
int_slider = widgets.IntSlider(
value=0,
min=0,
max=n_max,
step=1,
description='Spectrum:',
continuous_update=False
)
int_slider.observe(new_spec, 'value')
display(int_slider)
chk_box = widgets.Checkbox(
value=False,
description='Line list',
)
chk_box.observe(display_lines, 'value')
display(chk_box)
# turn-off interactive plots
%matplotlib inline
```
# Resources
https://ipywidgets.readthedocs.io/
https://ipywidgets.readthedocs.io/en/stable/examples/Widget%20List.html
https://ipywidgets.readthedocs.io/en/latest/examples/Output%20Widget.html
https://kapernikov.com/ipywidgets-with-matplotlib/
https://matplotlib.org/stable/users/event_handling.html
https://docs.bokeh.org/en/latest/index.html
http://docs.glueviz.org/en/stable
| true |
code
| 0.511839 | null | null | null | null |
|
# LetsGrowMore
## ***Virtual Internship Program***
***Data Science Tasks***
### ***Author: SARAVANAVEL***
# ***ADVANCED LEVEL TASK***
### Task 9 -Handwritten equation solver using CNN
Simple Mathematical equation solver using character and symbol regonition using image processing and CNN
## 1. Import Libraries/Packages
```
import numpy as np
import pandas as pd
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
from imutils.contours import sort_contours
import imutils
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
```
## Data preprocessing
```
print(os.listdir("./input")) #without extracting the data.rar file
```
## Data Augementation
```
train_datagen = ImageDataGenerator(
rescale = 1./255,
shear_range = 0.2,
zoom_range = 0.2,
validation_split = 0.25
)
data_path='./input/extracted_images'
train_set = train_datagen.flow_from_directory(
data_path,
target_size = (40, 40),
color_mode = 'grayscale',
batch_size = 32,
class_mode = 'categorical',
shuffle = True,
subset='training',
seed = 123
)
valid_set = train_datagen.flow_from_directory(
data_path,
target_size = (40, 40),
color_mode = 'grayscale',
batch_size = 32,
class_mode = 'categorical',
shuffle = True,
subset='validation',
seed = 123
)
```
## Model Building
```
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(40, 40, 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(18, activation='softmax'))
# compile model
adam = tf.keras.optimizers.Adam(learning_rate = 5e-4)
model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
```
## Model Training
```
history=model.fit(train_set,
validation_data=valid_set,
epochs=1,
verbose=1)
```
## Model evaluation
```
val_loss, val_accuracy = model.evaluate(valid_set)
print(val_loss,val_accuracy)
train_set.class_indices
```
print('\n',train_set.class_indices, sep = "\n")
```
label_map = (train_set.class_indices)
label_map
def prediction(img):
#img = cv2.imread(img, cv2.IMREAD_GRAYSCALE)
plt.imshow(img, cmap = 'gray')
img = cv2.resize(img,(40, 40))
norm_image = cv2.normalize(img, None, alpha = 0, beta = 1, norm_type = cv2.NORM_MINMAX, dtype = cv2.CV_32F)
#norm_image=img/255
norm_image = norm_image.reshape((norm_image.shape[0], norm_image.shape[1], 1))
case = np.asarray([norm_image])
pred = (model.predict_classes([case]))
return ([i for i in train_set.class_indices if train_set.class_indices[i]==(pred[0])][0],pred)
image = cv2.imread('./input/data-eqns/test_image1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# perform edge detection, find contours in the edge map, and sort the
# resulting contours from left-to-right
edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sort_contours(cnts, method="left-to-right")[0]
chars=[]
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# filter out bounding boxes, ensuring they are neither too small
# nor too large
if w*h>1200:
# extract the character and threshold it to make the character
# appear as *white* (foreground) on a *black* background, then
# grab the width and height of the thresholded image
roi = gray[y:y + h, x:x + w]
chars.append(prediction(roi))
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
plt.figure(figsize=(20,20))
plt.imshow(image)
chars
labels=[i for i in train_set.class_indices]
print(labels)
eq=[]
pos=[]
for i in ((chars)):
if len(eq)==0 and i[0][0] in labels[3:]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[4:14]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[:4]:
eq.append(i[0][0])
pos.append(len(eq))
else:
pass
for i in pos:
if eq[i-1]=='+':
print(int(''.join(eq[:pos[0]-1]))+int(''.join(eq[pos[0]:])))
elif eq[i-1]=='%':
print(int(''.join(eq[:pos[0]-1]))/int(''.join(eq[pos[0]:])))
elif eq[i-1]=='*':
print(int(''.join(eq[:pos[0]-1]))*int(''.join(eq[pos[0]:])))
else:
print(int(''.join(eq[:pos[0]-1]))-int(''.join(eq[pos[0]:])))
image = cv2.imread('./input/data-eqns/test0.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
# perform edge detection, find contours in the edge map, and sort the
# resulting contours from left-to-right
edged = cv2.Canny(blurred, 30, 150)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = sort_contours(cnts, method="left-to-right")[0]
chars=[]
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# filter out bounding boxes, ensuring they are neither too small
# nor too large
if w*h>1200:
# extract the character and threshold it to make the character
# appear as *white* (foreground) on a *black* background, then
# grab the width and height of the thresholded image
roi = gray[y:y + h, x:x + w]
chars.append(prediction(roi))
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
plt.figure(figsize=(20,20))
plt.imshow(image)
chars
labels=[i for i in train_set.class_indices]
print(labels)
eq=[]
pos=[]
for i in ((chars)):
if len(eq)==0 and i[0][0] in labels[3:]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[4:14]:
eq.append(i[0][0])
elif len(eq)>0 and i[0][0] in labels[:4]:
eq.append(i[0][0])
pos.append(len(eq))
else:
pass
for i in pos:
if eq[i-1]=='+':
print(int(''.join(eq[:pos[0]-1]))+int(''.join(eq[pos[0]:])))
elif eq[i-1]=='%':
print(int(''.join(eq[:pos[0]-1]))/int(''.join(eq[pos[0]:])))
elif eq[i-1]=='*':
print(int(''.join(eq[:pos[0]-1]))*int(''.join(eq[pos[0]:])))
else:
print(int(''.join(eq[:pos[0]-1]))-int(''.join(eq[pos[0]:])))
```
# THANK YOU!!
| true |
code
| 0.578448 | null | null | null | null |
|
<i>Copyright (c) Microsoft Corporation. All rights reserved.<br>
Licensed under the MIT License.</i>
<br>
# Recommender Hyperparameter Tuning w/ AzureML
This notebook shows how to auto-tune hyperparameters of a recommender model by utilizing **Azure Machine Learning service** ([AzureML](https://azure.microsoft.com/en-us/services/machine-learning-service/))<sup><a href="#azureml-search">a</a>, <a href="#azure-subscription">b</a></sup>.
We present an overall process of utilizing AzureML, specifically [**Hyperdrive**](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive?view=azure-ml-py) component, for the hyperparameter tuning by demonstrating key steps:
1. Configure AzureML Workspace
2. Create Remote Compute Target (GPU cluster)
3. Prepare Data
4. Prepare Training Scripts
5. Setup and Run Hyperdrive Experiment
6. Model Import, Re-train and Test
In this notebook, we use [**Wide-and-Deep model**](https://ai.googleblog.com/2016/06/wide-deep-learning-better-together-with.html) from **TensorFlow high-level Estimator API (v1.12)** on the movie recommendation scenario. Wide-and-Deep learning jointly trains wide linear model and deep neural networks (DNN) to combine the benefits of memorization and generalization for recommender systems.
For more details about the **Wide-and-Deep** model:
* [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb)
* [Original paper](https://arxiv.org/abs/1606.07792)
* [TensorFlow API doc](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNLinearCombinedRegressor)
Regarding **AuzreML**, please refer:
* [Quickstart notebook](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
* [Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters)
* [Tensorflow model tuning with Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-train-tensorflow)
---
<sub><span id="azureml-search">a. To use AzureML, you will need an Azure subscription.</span><br>
<span id="azure-subscription">b. When you web-search "Azure Machine Learning", you will most likely to see mixed results of Azure Machine Learning (AzureML) and Azure Machine Learning **Studio**. Please note they are different services where AzureML's focuses are on ML model management, tracking and hyperparameter tuning, while the [ML Studio](https://studio.azureml.net/)'s is to provide a high-level tool for 'easy-to-use' experience of ML designing and experimentation based on GUI.</span></sub>
```
import sys
sys.path.append("../../")
import itertools
import os
import shutil
from tempfile import TemporaryDirectory
import time
from IPython.display import clear_output
import numpy as np
import papermill as pm
import pandas as pd
import sklearn.preprocessing
import tensorflow as tf
import azureml as aml
import azureml.widgets as widgets
import azureml.train.hyperdrive as hd
from reco_utils.dataset.pandas_df_utils import user_item_pairs
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_random_split
import reco_utils.evaluation.python_evaluation
print("Azure ML SDK Version:", aml.core.VERSION)
print("Tensorflow Version:", tf.__version__)
tmp_dir = TemporaryDirectory()
```
### 1. Configure AzureML Workspace
**AzureML workspace** is a foundational block in the cloud that you use to experiment, train, and deploy machine learning models via AzureML service. In this notebook, we 1) create a workspace from [**Azure portal**](https://portal.azure.com) and 2) configure from this notebook.
You can find more details about the setup and configure processes from the following links:
* [Quickstart with Azure portal](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started)
* [Quickstart with Python SDK](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-create-workspace-with-python)
#### 1.1 Create a workspace
1. Sign in to the [Azure portal](https://portal.azure.com) by using the credentials for the Azure subscription you use.
2. Select **Create a resource** menu, search for **Machine Learning service workspace** select **Create** button.
3. In the **ML service workspace** pane, configure your workspace with entering the *workspace name* and *resource group* (or **create new** resource group if you don't have one already), and select **Create**. It can take a few moments to create the workspace.
#### 1.2 Configure
To configure this notebook to communicate with the workspace, type in your Azure subscription id, the resource group name and workspace name to `<subscription-id>`, `<resource-group>`, `<workspace-name>` in the above notebook cell. Alternatively, you can create a *.\aml_config\config.json* file with the following contents:
```
{
"subscription_id": "<subscription-id>",
"resource_group": "<resource-group>",
"workspace_name": "<workspace-name>"
}
```
```
# AzureML workspace info. Note, will look up "aml_config\config.json" first, then fall back to use this
SUBSCRIPTION_ID = '<subscription-id>'
RESOURCE_GROUP = '<resource-group>'
WORKSPACE_NAME = '<workspace-name>'
# Remote compute (cluster) configuration. If you want to save the cost more, set these to small.
VM_SIZE = 'STANDARD_NC6'
VM_PRIORITY = 'lowpriority'
# Cluster nodes
MIN_NODES = 4
MAX_NODES = 8
# Hyperdrive experimentation configuration
MAX_TOTAL_RUNS = 100 # Number of runs (training-and-evaluation) to search the best hyperparameters.
MAX_CONCURRENT_RUNS = 8
# Recommend top k items
TOP_K = 10
# Select MovieLens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
EPOCHS = 50
# Metrics to track
RANKING_METRICS = ['ndcg_at_k', 'precision_at_k']
RATING_METRICS = ['rmse', 'mae']
PRIMARY_METRIC = 'rmse'
# Data column names
USER_COL = 'UserId'
ITEM_COL = 'MovieId'
RATING_COL = 'Rating'
ITEM_FEAT_COL = 'Genres'
```
Now let's see if everything is ready!
```
# Connect to a workspace
try:
ws = aml.core.Workspace.from_config()
except aml.exceptions.UserErrorException:
try:
ws = aml.core.Workspace(
subscription_id=SUBSCRIPTION_ID,
resource_group=RESOURCE_GROUP,
workspace_name=WORKSPACE_NAME
)
ws.write_config()
except aml.exceptions.AuthenticationException:
ws = None
if ws is None:
raise ValueError(
"""Cannot access the AzureML workspace w/ the config info provided.
Please check if you entered the correct id, group name and workspace name"""
)
else:
print("AzureML workspace name: ", ws.name)
clear_output() # Comment out this if you want to see your workspace info.
```
### 2. Create Remote Compute Target
We create a GPU cluster as our **remote compute target**. If a cluster with the same name is already exist in your workspace, the script will load it instead. You can see [this document](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets) to learn more about setting up a compute target on different locations.
This notebook selects **STANDARD_NC6** virtual machine (VM) and sets it's priority as *lowpriority* to save the cost.
Size | vCPU | Memory (GiB) | Temp storage (SSD, GiB) | GPU | GPU memory (GiB) | Max data disks | Max NICs
---|---|---|---|---|---|---|---
Standard_NC6 | <div align="center">6</div> | <div align="center">56</div> | <div align="center">340</div> | <div align="center">1</div> | <div align="center">8</div> | <div align="center">24</div> | <div align="center">1</div>
For more information about Azure virtual machine sizes, see [here](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-gpu).
```
CLUSTER_NAME = 'gpu-cluster-nc6'
try:
compute_target = aml.core.compute.ComputeTarget(workspace=ws, name=CLUSTER_NAME)
print("Found existing compute target")
except aml.core.compute_target.ComputeTargetException:
print("Creating a new compute target...")
compute_config = aml.core.compute.AmlCompute.provisioning_configuration(
vm_size=VM_SIZE,
vm_priority=VM_PRIORITY,
min_nodes=MIN_NODES,
max_nodes=MAX_NODES
)
# create the cluster
compute_target = aml.core.compute.ComputeTarget.create(ws, CLUSTER_NAME, compute_config)
compute_target.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# Use the 'status' property to get a detailed status for the current cluster.
print(compute_target.status.serialize())
```
### 3. Prepare Data
For demonstration purpose, we use 100k MovieLens dataset. First, download the data and convert the format (multi-hot encode *genres*) to make it work for our model. More details about this step is described in our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb).
```
data = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=[USER_COL, ITEM_COL, RATING_COL],
genres_col='Genres_string'
)
# Encode 'genres' into int array (multi-hot representation) to use as item features
genres_encoder = sklearn.preprocessing.MultiLabelBinarizer()
data[ITEM_FEAT_COL] = genres_encoder.fit_transform(
data['Genres_string'].apply(lambda s: s.split("|"))
).tolist()
data.drop('Genres_string', axis=1, inplace=True)
data.head()
```
The dataset is split into train, validation, and test sets. The train and validation sets will be used for hyperparameter tuning, and the test set will be used for the final evaluation of the model after we import the best model from AzureML workspace.
Here, we don't use multiple-split directly by passing `ratio=[0.56, 0.19, 0.25]`. Instead, we first split the data into train and test sets with the same `seed` we've been using in other notebooks to make the train set identical across them. Then, we further split the train set into train and validation sets.
```
# Use the same seed to make the train and test sets identical across other notebooks in the repo.
train, test = python_random_split(data, ratio=0.75, seed=42)
# Further split the train set into train and validation set.
train, valid = python_random_split(train)
print(len(train), len(valid), len(test))
```
Now, upload the train and validation sets to the AzureML workspace. Our Hyperdrivce experiment will use them.
```
DATA_DIR = os.path.join(tmp_dir.name, 'aml_data')
os.makedirs(DATA_DIR, exist_ok=True)
TRAIN_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_train.pkl"
train.to_pickle(os.path.join(DATA_DIR, TRAIN_FILE_NAME))
VALID_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_valid.pkl"
valid.to_pickle(os.path.join(DATA_DIR, VALID_FILE_NAME))
# Note, all the files under DATA_DIR will be uploaded to the data store
ds = ws.get_default_datastore()
ds.upload(
src_dir=DATA_DIR,
target_path='data',
overwrite=True,
show_progress=True
)
```
### 4. Prepare Training Scripts
Next step is to prepare scripts that AzureML Hyperdrive will use to train and evaluate models with selected hyperparameters. We re-use our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb) for that. To run the model notebook from the Hyperdrive Run, all we need is to prepare an [entry script](../../reco_utils/azureml/wide_deep.py) which parses the hyperparameter arguments, passes them to the notebook, and records the results of the notebook to AzureML Run logs by using `papermill`. Hyperdrive uses the logs to track the performance of each hyperparameter-set and finds the best performed one.
Here is a code snippet from the entry script:
```
...
from azureml.core import Run
run = Run.get_context()
...
NOTEBOOK_NAME = os.path.join(
"notebooks",
"00_quick_start",
"wide_deep_movielens.ipynb"
)
...
parser = argparse.ArgumentParser()
...
parser.add_argument('--dnn-optimizer', type=str, dest='dnn_optimizer', ...
parser.add_argument('--dnn-optimizer-lr', type=float, dest='dnn_optimizer_lr', ...
...
pm.execute_notebook(
NOTEBOOK_NAME,
OUTPUT_NOTEBOOK,
parameters=params,
kernel_name='python3',
)
...
```
```
# Prepare all the necessary scripts which will be loaded to our Hyperdrive Experiment Run
SCRIPT_DIR = os.path.join(tmp_dir.name, 'aml_script')
# Copy scripts to SCRIPT_DIR temporarly
shutil.copytree(os.path.join('..', '..', 'reco_utils'), os.path.join(SCRIPT_DIR, 'reco_utils'))
# We re-use our model notebook for training and testing models.
model_notebook_dir = os.path.join('notebooks', '00_quick_start')
dest_model_notebook_dir = os.path.join(SCRIPT_DIR, model_notebook_dir)
os.makedirs(dest_model_notebook_dir , exist_ok=True)
shutil.copy(
os.path.join('..', '..', model_notebook_dir, 'wide_deep_movielens.ipynb'),
dest_model_notebook_dir
)
# This is our entry script for Hyperdrive Run
ENTRY_SCRIPT_NAME = 'reco_utils/azureml/wide_deep.py'
```
### 5. Setup and Run Hyperdrive Experiment
[Hyperdrive](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters) create a machine learning Experiment [Run](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.run?view=azure-ml-py) on the workspace and utilizes child-runs to search the best set of hyperparameters.
#### 5.1 Create Experiment
[Experiment](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.experiment(class)?view=azure-ml-py) is the main entry point into experimenting with AzureML. To create new Experiment or get the existing one, we pass our experimentation name.
```
# Create an experiment to track the runs in the workspace
EXP_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_wide_deep_model"
exp = aml.core.Experiment(workspace=ws, name=EXP_NAME)
```
#### 5.2 Define Search Space
Now we define the search space of hyperparameters. For example, if you want to test different batch sizes of {64, 128, 256}, you can use `azureml.train.hyperdrive.choice(64, 128, 256)`. To search from a continuous space, use `uniform(start, end)`. For more options, see [Hyperdrive parameter expressions](https://docs.microsoft.com/en-us/python/api/azureml-train-core/azureml.train.hyperdrive.parameter_expressions?view=azure-ml-py).
In this notebook, we fix model type as `wide_deep` and the number of epochs to 50.
In the search space, we set different linear and DNN optimizers, structures, learning rates and regularization rates. Details about the hyperparameters can be found from our [Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb).
Hyperdrive provides three different parameter sampling methods: `RandomParameterSampling`, `GridParameterSampling`, and `BayesianParameterSampling`. Details about each method can be found from [Azure doc](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters). Here, we use the Bayesian sampling.
```
# Fixed parameters
script_params = {
'--datastore': ds.as_mount(),
'--train-datapath': "data/" + TRAIN_FILE_NAME,
'--test-datapath': "data/" + VALID_FILE_NAME,
'--top-k': TOP_K,
'--user-col': USER_COL,
'--item-col': ITEM_COL,
'--item-feat-col': ITEM_FEAT_COL,
'--rating-col': RATING_COL,
'--ranking-metrics': RANKING_METRICS,
'--rating-metrics': RATING_METRICS,
'--epochs': EPOCHS,
'--model-type': 'wide_deep'
}
# Hyperparameter search space
params = {
'--batch-size': hd.choice(64, 128, 256),
# Linear model hyperparameters
'--linear-optimizer': hd.choice('Ftrl'), # 'SGD' and 'Momentum' easily got exploded loss in regression problems.
'--linear-optimizer-lr': hd.uniform(0.0001, 0.1),
'--linear-l1-reg': hd.uniform(0.0, 0.1),
# Deep model hyperparameters
'--dnn-optimizer': hd.choice('Adagrad', 'Adam'),
'--dnn-optimizer-lr': hd.uniform(0.0001, 0.1),
'--dnn-user-embedding-dim': hd.choice(4, 8, 16, 32, 64),
'--dnn-item-embedding-dim': hd.choice(4, 8, 16, 32, 64),
'--dnn-hidden-layer-1': hd.choice(0, 32, 64, 128, 256, 512, 1024), # 0: not using this layer
'--dnn-hidden-layer-2': hd.choice(0, 32, 64, 128, 256, 512, 1024),
'--dnn-hidden-layer-3': hd.choice(0, 32, 64, 128, 256, 512, 1024),
'--dnn-hidden-layer-4': hd.choice(32, 64, 128, 256, 512, 1024),
'--dnn-batch-norm': hd.choice(0, 1),
'--dnn-dropout': hd.choice(0.0, 0.1, 0.2, 0.3, 0.4)
}
```
**AzureML Estimator** is the building block for training. An Estimator encapsulates the training code and parameters, the compute resources and runtime environment for a particular training scenario (Note, this is not TensorFlow's Estimator)
We create one for our experimentation with the dependencies our model requires as follows:
```
conda_packages=['pandas', 'scikit-learn'],
pip_packages=['ipykernel', 'papermill', 'tensorflow-gpu==1.12']
```
To the Hyperdrive Run Config, we set our primary metric name and the goal (our hyperparameter search criteria), hyperparameter sampling method, and number of total child-runs. The bigger the search space, the more number of runs we will need for better results.
```
est = aml.train.estimator.Estimator(
source_directory=SCRIPT_DIR,
entry_script=ENTRY_SCRIPT_NAME,
script_params=script_params,
compute_target=compute_target,
use_gpu=True,
conda_packages=['pandas', 'scikit-learn'],
pip_packages=['ipykernel', 'papermill', 'tensorflow-gpu==1.12']
)
hd_run_config = hd.HyperDriveRunConfig(
estimator=est,
hyperparameter_sampling=hd.BayesianParameterSampling(params),
primary_metric_name=PRIMARY_METRIC,
primary_metric_goal=hd.PrimaryMetricGoal.MINIMIZE,
max_total_runs=MAX_TOTAL_RUNS,
max_concurrent_runs=MAX_CONCURRENT_RUNS
)
```
#### 5.3 Run Experiment
Now we submit the Run to our experiment. You can see the experiment progress from this notebook by using `azureml.widgets.RunDetails(hd_run).show()` or check from the Azure portal with the url link you can get by running `hd_run.get_portal_url()`.
<img src="https://recodatasets.blob.core.windows.net/images/aml_0.png?sanitize=true" width="600"/>
<img src="https://recodatasets.blob.core.windows.net/images/aml_1.png?sanitize=true" width="600"/>
<center><i>AzureML Hyperdrive Widget</i></center>
To load an existing Hyperdrive Run instead of start new one, use `hd_run = hd.HyperDriveRun(exp, <user-run-id>, hyperdrive_run_config=hd_run_config)`. You also can cancel the Run with `hd_run.cancel()`.
```
hd_run = exp.submit(config=hd_run_config)
widgets.RunDetails(hd_run).show()
```
Once all the child-runs are finished, we can get the best run and the metrics.
> Note, if you run Hyperdrive experiment again, you will see the best metrics and corresponding hyperparameters are not the same. It is because of 1) the random initialization of the model and 2) Hyperdrive sampling (when you use RandomSampling). You will get different results as well if you use different training and validation sets.
```
# Get best run and printout metrics
best_run = hd_run.get_best_run_by_primary_metric()
best_run_metrics = best_run.get_metrics()
print("* Best Run Id:", best_run.id)
print("\n* Best hyperparameters:")
print("Model type =", best_run_metrics['MODEL_TYPE'])
print("Batch size =", best_run_metrics['BATCH_SIZE'])
print("Linear optimizer =", best_run_metrics['LINEAR_OPTIMIZER'])
print("\tLearning rate = {0:.4f}".format(best_run_metrics['LINEAR_OPTIMIZER_LR']))
print("\tL1 regularization = {0:.4f}".format(best_run_metrics['LINEAR_L1_REG']))
print("DNN optimizer =", best_run_metrics['DNN_OPTIMIZER'])
print("\tUser embedding dimension =", best_run_metrics['DNN_USER_DIM'])
print("\tItem embedding dimension =", best_run_metrics['DNN_ITEM_DIM'])
hidden_units = []
for i in range(1, 5):
hidden_nodes = best_run_metrics['DNN_HIDDEN_LAYER_{}'.format(i)]
if hidden_nodes > 0:
hidden_units.append(hidden_nodes)
print("\tHidden units =", hidden_units)
print("\tLearning rate = {0:.4f}".format(best_run_metrics['DNN_OPTIMIZER_LR']))
print("\tDropout rate = {0:.4f}".format(best_run_metrics['DNN_DROPOUT']))
print("\tBatch normalization =", best_run_metrics['DNN_BATCH_NORM'])
# Metrics evaluated on validation set
print("\n* Performance metrics:")
print("Top", TOP_K)
for m in RANKING_METRICS:
print("\t{0} = {1:.4f}".format(m, best_run_metrics[m]))
for m in RATING_METRICS:
print("\t{0} = {1:.4f}".format(m, best_run_metrics[m]))
```
### 6. Model Import and Test
[Wide-Deep Quickstart notebook](../00_quick_start/wide_deep_movielens.ipynb), which we've used in our Hyperdrive Experiment, exports the trained model to the output folder (the output path is recorded at `best_run_metrics['saved_model_dir']`). We can download a model from the best run and test it.
```
MODEL_DIR = os.path.join(tmp_dir.name, 'aml_model')
os.makedirs(MODEL_DIR, exist_ok=True)
model_file_dir = os.path.normpath(best_run_metrics['saved_model_dir'][2:-1]) + '/'
print(model_file_dir)
for f in best_run.get_file_names():
if f.startswith(model_file_dir):
output_file_path = os.path.join(MODEL_DIR, f[len(model_file_dir):])
print("Downloading {}..".format(f))
best_run.download_file(name=f, output_file_path=output_file_path)
saved_model = tf.contrib.estimator.SavedModelEstimator(MODEL_DIR)
cols = {
'col_user': USER_COL,
'col_item': ITEM_COL,
'col_rating': RATING_COL,
'col_prediction': 'prediction'
}
tf.logging.set_verbosity(tf.logging.ERROR)
# Prediction input function for TensorFlow SavedModel
def predict_input_fn(df):
def input_fn():
examples = [None] * len(df)
for index, test_sample in df.iterrows():
example = tf.train.Example()
example.features.feature[USER_COL].int64_list.value.extend([test_sample[USER_COL]])
example.features.feature[ITEM_COL].int64_list.value.extend([test_sample[ITEM_COL]])
example.features.feature[ITEM_FEAT_COL].float_list.value.extend(test_sample[ITEM_FEAT_COL])
examples[index] = example.SerializeToString()
return {'inputs': tf.constant(examples)}
return input_fn
# Rating prediction set
X_test = test.drop(RATING_COL, axis=1)
X_test.reset_index(drop=True, inplace=True)
# Rating prediction
predictions = list(itertools.islice(
saved_model.predict(predict_input_fn(X_test)),
len(X_test)
))
prediction_df = X_test.copy()
prediction_df['prediction'] = [p['outputs'][0] for p in predictions]
print(prediction_df['prediction'].describe(), "\n")
for m in RATING_METRICS:
fn = getattr(reco_utils.evaluation.python_evaluation, m)
result = fn(test, prediction_df, **cols)
print(m, "=", result)
# Unique items
if ITEM_FEAT_COL is None:
items = data.drop_duplicates(ITEM_COL)[[ITEM_COL]].reset_index(drop=True)
else:
items = data.drop_duplicates(ITEM_COL)[[ITEM_COL, ITEM_FEAT_COL]].reset_index(drop=True)
# Unique users
users = data.drop_duplicates(USER_COL)[[USER_COL]].reset_index(drop=True)
# Ranking prediction set
ranking_pool = user_item_pairs(
user_df=users,
item_df=items,
user_col=USER_COL,
item_col=ITEM_COL,
user_item_filter_df=pd.concat([train, valid]), # remove seen items
shuffle=True
)
predictions = []
# To prevent creating a tensor proto whose content is larger than 2GB (which will raise an error),
# divide ranking_pool into 10 chunks, predict each, and concat back.
for pool in np.array_split(ranking_pool, 10):
pool.reset_index(drop=True, inplace=True)
# Rating prediction
pred = list(itertools.islice(
saved_model.predict(predict_input_fn(pool)),
len(pool)
))
predictions.extend([p['outputs'][0] for p in pred])
ranking_pool['prediction'] = predictions
for m in RANKING_METRICS:
fn = getattr(reco_utils.evaluation.python_evaluation, m)
result = fn(test, ranking_pool, **{**cols, 'k': TOP_K})
print(m, "=", result)
```
#### Wide-and-Deep Baseline Comparison
To see if Hyperdrive found good hyperparameters, we simply compare with the model with known hyperparameters from [TensorFlow's wide-deep learning example](https://github.com/tensorflow/models/blob/master/official/wide_deep/movielens_main.py) which uses only the DNN part from the wide-and-deep model for MovieLens data.
> Note, this is not 'apples to apples' comparison. For example, TensorFlow's movielens example uses *rating-timestamp* as a numeric feature, but we did not use that here because we think the timestamps are not relevant to the movies' ratings. This comparison is more like to show how Hyperdrive can help to find comparable hyperparameters without requiring exhaustive efforts in going over a huge search-space.
```
OUTPUT_NOTEBOOK = os.path.join(tmp_dir.name, "output.ipynb")
OUTPUT_MODEL_DIR = os.path.join(tmp_dir.name, "known_hyperparam_model_checkpoints")
params = {
'MOVIELENS_DATA_SIZE': MOVIELENS_DATA_SIZE,
'TOP_K': TOP_K,
'MODEL_TYPE': 'deep',
'EPOCHS': EPOCHS,
'BATCH_SIZE': 256,
'DNN_OPTIMIZER': 'Adam',
'DNN_OPTIMIZER_LR': 0.001,
'DNN_HIDDEN_LAYER_1': 256,
'DNN_HIDDEN_LAYER_2': 256,
'DNN_HIDDEN_LAYER_3': 256,
'DNN_HIDDEN_LAYER_4': 128,
'DNN_USER_DIM': 16,
'DNN_ITEM_DIM': 64,
'DNN_DROPOUT': 0.3,
'DNN_BATCH_NORM': 0,
'MODEL_DIR': OUTPUT_MODEL_DIR,
'EVALUATE_WHILE_TRAINING': False,
'EXPORT_DIR_BASE': OUTPUT_MODEL_DIR,
'RANKING_METRICS': RANKING_METRICS,
'RATING_METRICS': RATING_METRICS,
}
start_time = time.time()
pm.execute_notebook(
"../00_quick_start/wide_deep_movielens.ipynb",
OUTPUT_NOTEBOOK,
parameters=params,
kernel_name='python3'
)
end_time = time.time()
print("Training and evaluation of Wide-and-Deep model took", end_time-start_time, "secs.")
nb = pm.read_notebook(OUTPUT_NOTEBOOK)
for m in RANKING_METRICS:
print(m, "=", nb.data[m])
for m in RATING_METRICS:
print(m, "=", nb.data[m])
```
### Concluding Remark
We showed how to tune hyperparameters by utilizing Azure Machine Learning service. Complex and powerful models like Wide-and-Deep model often have many number of hyperparameters that affect on the recommendation accuracy, and it is not practical to tune the model without using a GPU cluster. For example, a training and evaluation of a model took around 3 minutes on 100k MovieLens data on a single *Standard NC6* VM as we tested from the [above cell](#Wide-and-Deep-Baseline-Comparison). When we used 1M MovieLens, it took about 47 minutes. If we want to investigate through 100 different combinations of hyperparameters **manually**, it will take **78 hours** on the VM and we may still wonder if we had tested good candidates of hyperparameters. With AzureML, as we shown in this notebook, we can easily setup different size of GPU cluster fits to our problem and utilize Bayesian sampling to navigate through the huge search space efficiently, and tweak the experiment with different criteria and algorithms for further research.
#### Cleanup
```
tmp_dir.cleanup()
```
| true |
code
| 0.319562 | null | null | null | null |
|
# Show iterative steps of preprocessing
```
import data_utils
import numpy as np
import matplotlib.pyplot as plt
from preprocessing import binarize_per_slice, all_slice_analysis, fill_hole, two_lung_only, process_mask
# Show iterative steps of computing lung mask
first_patient_pixels, spacing, _ = data_utils.load_dicom_slices("../../data/LIDC-IDRI-DCM/LIDC-IDRI-0001/01-01-2000-30178/3000566-03192/")
print(first_patient_pixels.shape)
import matplotlib.pyplot as plt
plt.hist(first_patient_pixels.flatten(), bins=80, color='c')
plt.xlabel("Hounsfield Units (HU)")
plt.ylabel("Frequency")
plt.show()
# Show some slice in the middle
h = 80
plt.imshow(first_patient_pixels[h], cmap=plt.cm.gray)
plt.show()
bw = binarize_per_slice(first_patient_pixels, spacing)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Parallélisé mais très long sur Power (de l'ordre de 2 minutes).
```
flag = 0
cut_num = 0
while flag == 0:
bw, flag = all_slice_analysis(bw, spacing, cut_num=cut_num)
cut_num = cut_num + 1
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Pas de multiprocessing.
```
bw = fill_hole(bw)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Pas de multiprocessing.
```
bw1, bw2, bw = two_lung_only(bw, spacing)
plt.imshow(bw[h], cmap=plt.cm.gray)
plt.show()
```
Pas de multiprocessing. Plutôt long.
```
plt.imshow(bw1[h], cmap=plt.cm.gray)
plt.show()
plt.imshow(bw2[h], cmap=plt.cm.gray)
plt.show()
dm1 = process_mask(bw1)
dm2 = process_mask(bw2)
plt.imshow(dm1[h]+dm2[h], cmap=plt.cm.gray)
plt.show()
dm = process_mask(bw)
plt.imshow(dm[h], cmap=plt.cm.gray)
plt.show()
x = np.load("/wmlce/data/projects/lung_segmentation/output/preprocessing/2_128_256_256/456.npy")
plt.imshow(x[h], cmap=plt.cm.gray)
plt.show()
x_mask = np.load("/wmlce/data/projects/lung_segmentation/output/preprocessing/2_128_256_256/456_mask.npy")
plt.imshow(x_mask[h], cmap=plt.cm.gray)
plt.show()
```
# Using U-Net Lungs Segmentation
```
import os
import sys
import time
import torch
import mlflow
import mlflow.pytorch
import numpy as np
import SimpleITK as sitk
from pathlib import Path
import matplotlib.pyplot as plt
os.environ['MDT_DATASETS_DIR'] = '/wmlce/data/medical-datasets'
from preprocessing import binarize_per_slice, all_slice_analysis, fill_hole, two_lung_only, process_mask, resample_array, lumTrans
LS_PATH = os.path.join('.', 'lung-segmentation')
sys.path.append(LS_PATH)
import predict
from data import utils as data_utils
start_time = time.time()
pid = 'LIDC-IDRI-0489'
path = f'/wmlce/data/medical-datasets/MDT-LIDC-IDRI/NRRDs/{pid}'
target_spacing = (0.7, 0.7, 1.25)
remote_server_uri = "http://mlflow.10.7.13.202.nip.io/"
mlflow.set_tracking_uri(remote_server_uri)
h = 150
# Load scan
img = sitk.ReadImage(os.path.join(path, '{}_CT.nrrd'.format(pid)))
original_spacing = np.array(img.GetSpacing())
img_arr = sitk.GetArrayFromImage(img)
ls_img_arr = np.copy(img_arr)
load_time = time.time()
print(f'{pid}: loaded in {load_time - start_time} s')
# Resample and Normalize
img_arr = resample_array(img_arr, img.GetSpacing(), target_spacing)
lum_img_arr = np.copy(img_arr)
img_arr = np.clip(img_arr, -1200, 600)
img_arr = img_arr.astype(np.float32)
img_arr = (img_arr - np.mean(img_arr)) / np.std(img_arr).astype(np.float16)
norm_time = time.time()
print(f'{pid}: Resampled in {norm_time - load_time} s')
print(f'{pid}: {img_arr.shape}, {target_spacing}')
plt.imshow(img_arr[h], cmap=plt.cm.gray)
plt.show()
# Compute lungs mask
model_name = "2-lungs-segmentation"
unet = mlflow.pytorch.load_model("models:/{}/production".format(model_name))
print(ls_img_arr.shape, original_spacing)
ls_img_arr, spacing = data_utils.prep_img_arr(ls_img_arr, original_spacing)
print(ls_img_arr.shape, spacing)
mask = predict.predict(ls_img_arr, 1, unet, threshold=True, erosion=True)
print(mask.shape, spacing)
mask, spacing = data_utils.prep_img_arr(mask[0][0], spacing, target_shape=img_arr.shape)
mask = mask[0]
mask[mask>0.5] = 1
mask[mask!=1] = 0
print(mask.shape, target_spacing)
ls_time = time.time()
print(f'{pid}: Lung segmentation took {ls_time - norm_time} s')
plt.imshow(mask[h], cmap=plt.cm.gray)
plt.show()
dilatedMask = process_mask(mask)
Mask = mask
extramask = dilatedMask.astype(np.uint8) - Mask.astype(np.uint8)
bone_thresh = 210
pad_value = 1 #170
img_arr[np.isnan(img_arr)]=-2000
sliceim = lumTrans(lum_img_arr)
#sliceim = sliceim*dilatedMask+pad_value*(1-dilatedMask).astype('uint8')
bones = sliceim*extramask>bone_thresh
#sliceim[bones] = pad_value
img_arr = img_arr*dilatedMask+pad_value*(1-dilatedMask).astype('uint8')
img_arr[bones] = pad_value
bones_mask = np.zeros(sliceim.shape)
bones_mask[bones] = 1
print(f'{pid}: Cleaning took {time.time() - ls_time} s')
print(f'{pid}: Ellapsed {time.time() - start_time} s')
# Plot image
plt.subplot(2, 3, 1).imshow(img_arr[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 2).imshow(Mask[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 3).imshow(dilatedMask[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 4).imshow(img_arr[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 5).imshow(bones_mask[h], cmap=plt.cm.gray)
plt.subplot(2, 3, 6).imshow(extramask[h], cmap=plt.cm.gray)
plt.show()
dir_path = "/wmlce/data/medical-datasets/MDT-PP"
imgs = glob.glob(os.path.join(dir_path, "*_rois.npy"))
print(np.min(img_arr), np.max(img_arr))
plt.imshow(img_arr[h], cmap=plt.cm.gray)
plt.show()
```
## Load some images generated by such preprocessing
```
import os, glob
import numpy as np
import matplotlib.pyplot as plt
dir_path = "/wmlce/data/medical-datasets/MDT-PP"
imgs = glob.glob(os.path.join(dir_path, "*_rois.npy"))
h = 150
n = 10
for ix, img in enumerate(np.random.choice(imgs, n), 1):
img_arr = np.load(img.replace("_rois", "_img")).astype(np.float32)
rois_arr = np.load(img)
print(f"Image {os.path.splitext(os.path.basename(img))[0]} {img_arr.shape}, rois {rois_arr.shape}")
plt.subplot(2, n/2, ix).imshow(img_arr[h], cmap=plt.cm.gray)
plt.show()
dir_path = "/wmlce/data/medical-datasets/MDT-PP"
img = "LIDC-IDRI-0338_img.npy"
img = "LIDC-IDRI-0479_img.npy"
img = "LIDC-IDRI-0489_img.npy"
img = "LIDC-IDRI-0015_img.npy"
img = "LIDC-IDRI-0509_img.npy" # This image seems to have been swapped (axes issues / flipped ?)
img_arr = np.load(os.path.join(dir_path, img))
print(img_arr.shape, img_arr.dtype)
plt.imshow(img_arr[:,250,:], cmap=plt.cm.gray)
plt.show()
```
# Crap image analysis
### Resample to original size and save to nrrd
```
from preprocessing import resample_array_to_shape
itkimg = sitk.ReadImage("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-0015/1.3.6.1.4.1.14519.5.2.1.6279.6001.231462296937187240061810311146/1.3.6.1.4.1.14519.5.2.1.6279.6001.227962600322799211676960828223/LIDC-IDRI-0015_CT.nrrd")
seg_mask, seg_spacing = resample_array_to_shape(img_arr, [0.7,0.7,1.25], target_shape=sitk.GetArrayFromImage(itkimg).shape)
new_itk = sitk.GetImageFromArray(seg_mask)
new_itk.SetOrigin(itkimg.GetOrigin())
new_itk.SetSpacing(itkimg.GetSpacing())
sitk.WriteImage(new_itk, 'test.nrrd')
```
# Get list of all images with high spacing / flipped images
```
import nrrd
import os, glob
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
def load_itk(filename):
with open(filename) as f:
contents = f.readlines()
line = [k for k in contents if k.startswith('TransformMatrix')][0]
transformM = np.array(line.split(' = ')[1].split(' ')).astype('float')
transformM = np.round(transformM)
if np.any( transformM!=np.array([1,0,0, 0, 1, 0, 0, 0, 1])):
isflip = True
else:
isflip = False
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing, isflip
list_path = glob.glob("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-*/*/*/*_CT.nrrd")
spaces = set([])
kinds = set([])
dimensions = set([])
high_spacing = set([])
for path in list_path:
h = nrrd.read_header(path)
spaces.add(h['space'])
for k in h['kinds']:
kinds.add(k)
dimensions.add(h['dimension'])
if np.max(h['space directions']) > 2.5:
high_spacing.add(path)
print(spaces)
print(kinds)
print(dimensions)
print(len(high_spacing))
```
# Check scans manually
```
import nrrd
import os, glob
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
from ipywidgets import interact, interact_manual
from IPython.display import display
import matplotlib.pyplot as plt
import ipywidgets as widgets
ix = 0
#list_path = glob.glob("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-*/*/*/*_CT.nrrd")
list_path = glob.glob("/wmlce/data/medical-datasets/MDT-PP/*_img.npy")
import csv
#csv_path = 'scans_checkup.csv'
csv_path = 'pp_scans_checkup.csv'
header = ['pid', 'flip', 'crap', 'poor', 'warn', 'note']
if not os.path.exists(csv_path):
f = open(csv_path, 'w+')
writer = csv.DictWriter(f, fieldnames=header)
writer.writeheader()
f.close()
def f(flip, crap, poor, warn, note):
global ix, list_path, csv_path, header
if ix >= 1 and (flip or crap or poor or warn):
#pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_CT')[0]
pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_img')[0]
print(f'Adding abnormal img {pid} to csv')
f = open(csv_path, 'a')
writer = csv.DictWriter(f, fieldnames=header)
writer.writerow({header[0]: pid, header[1]: int(flip), header[2]: int(crap), header[3]: int(poor), header[4]: int(warn), header[5]: note})
f.close()
if ix >= 0 and ix < len(list_path):
print(f'Showing img: {ix}: {list_path[ix]}')
## Show ITK
#itkimage = sitk.ReadImage(list_path[ix])
#numpyImage = sitk.GetArrayFromImage(itkimage)
numpyImage = np.load(list_path[ix])
plt.imshow(numpyImage[int(len(numpyImage)/2)].astype(np.uint8), cmap=plt.cm.gray)
plt.show()
elif ix >= 0:
print('Done')
ix = ix + 1
_ = interact_manual(f, flip=False, crap=False, poor=False, warn=False, note='')
```
# Check ROI labels
```
import os, glob
import numpy as np
list_paths = glob.glob("/wmlce/data/medical-datasets/MDT-PP/*_rois.npy")
for path in list_paths[:10]:
arr = np.load(path)
pid = os.path.splitext(os.path.basename(path))[0].split('_')[0]
print(pid, np.unique(arr))
```
## Upsample ROIs to original scan size for visualization
```
os.environ["MDT_DATASETS_DIR"] = "/wmlce/data/medical-datasets"
from preprocessing import resample_array_to_shape
import numpy as np
import os, glob
itkimg = sitk.ReadImage(glob.glob("/wmlce/data/medical-datasets/LIDC-IDRI/LIDC-IDRI-0806/*/*/*_CT.nrrd")[0])
rois_path = "/wmlce/data/medical-datasets/MDT-PP/LIDC-IDRI-0806_rois.npy"
pid = os.path.splitext(os.path.basename(rois_path))[0].split('_')[0]
rois_arr = np.load(rois_path)
rois_arr[rois_arr != 0] = 1
seg_mask, seg_spacing = resample_array_to_shape(rois_arr, [0.7,0.7,1.25], target_shape=sitk.GetArrayFromImage(itkimg).shape)
seg_mask[seg_mask >= 0.5] = 1
seg_mask[seg_mask < 0.5] = 0
seg_mask = seg_mask.astype(np.uint8)
new_itk = sitk.GetImageFromArray(seg_mask)
new_itk.SetOrigin(itkimg.GetOrigin())
new_itk.SetSpacing(itkimg.GetSpacing())
sitk.WriteImage(new_itk, f'{pid}_rois.nrrd')
```
## GAN generated scans
```
import os, glob
import numpy as np
import SimpleITK as sitk
import matplotlib.pyplot as plt
from ipywidgets import interact, interact_manual
from IPython.display import display
import matplotlib.pyplot as plt
import ipywidgets as widgets
list_path = glob.glob("/wmlce/data/medical-datasets/MDT-PP/*-AUG_img.npy")
for path in list_path:
pid = os.path.basename(path).replace("_img.npy", "")
n_nods = len(glob.glob(f"/wmlce/data/medical-datasets/MDT-LIDC-IDRI/NRRDs/{pid}/*nod*"))
print(pid, n_nods, np.unique(np.load(path.replace("_img", "_rois"))))
import csv
#csv_path = 'scans_checkup.csv'
csv_path = 'pp_aug_scans_checkup.csv'
header = ['pid', 'flip', 'crap', 'poor', 'warn', 'note']
if not os.path.exists(csv_path):
f = open(csv_path, 'w+')
writer = csv.DictWriter(f, fieldnames=header)
writer.writeheader()
f.close()
def f(flip, crap, poor, warn, note):
global ix, list_path, csv_path, header
if ix >= 1 and (flip or crap or poor or warn):
#pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_CT')[0]
pid = os.path.splitext(os.path.basename(list_path[ix-1]))[0].split('_img')[0]
print(f'Adding abnormal img {pid} to csv')
f = open(csv_path, 'a')
writer = csv.DictWriter(f, fieldnames=header)
writer.writerow({header[0]: pid, header[1]: int(flip), header[2]: int(crap), header[3]: int(poor), header[4]: int(warn), header[5]: note})
f.close()
if ix >= 0 and ix < len(list_path):
print(f'Showing img: {ix}: {list_path[ix]}')
## Show ITK
#itkimage = sitk.ReadImage(list_path[ix])
#numpyImage = sitk.GetArrayFromImage(itkimage)
numpyImage = np.load(list_path[ix])
plt.imshow(numpyImage[int(len(numpyImage)/2)].astype(np.uint8), cmap=plt.cm.gray)
plt.show()
elif ix >= 0:
print('Done')
ix = ix + 1
_ = interact_manual(f, flip=False, crap=False, poor=False, warn=False, note='')
```
| true |
code
| 0.499756 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W2D3_BiologicalNeuronModels/student/W2D3_Intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Intro
**Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
<p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
## Overview
Today you will learn about a few interesting properties of biological neurons and synapses. In his intro lecture Upi Bhalla will start with an overview of the complexity of the neurons and synapses in the brain. He will also introduce a mathematical description of action potential generation and propagation by which neurons communicate with each other. Then, in a series of short tutorials Richard Naud will introduce simple neuron and synapse models. These tutorials will give you insights about how neurons may generate irregular spike patterns and synchronize their activity. In the first tutorial you will learn about the input-output transfer function of the leaky integrate and fire neuron model. In the second tutorial you will use this model to understand how statistics of inputs affects transfer of synchrony. In the third tutorial you will explore the short-term dynamics of synapses which means that synaptic weight is dependent on the recent history of spiking activity of the pre-synaptic neurons. In the fourth tutorial, you will learn about spike timing dependent plasticity and explore how synchrony in the input may shape the synaptic weight distribution. Finally, in the outro lecture Yiota Poirazi will explain how the simplified description of neurons can be expanded to include more biological complexity. She will provide evidence of how dendritic morphology may expand the computational repertoire of individual neurons.
The models we use in today’s lecture fall in the category of how models (W1D1). You will use several concepts from linear systems (W2D2). The insights developed in these tutorials will be useful to understand the dynamics of neural networks (W3D4). Moreover, you will learn about the origin of statistics of neuronal activity which will be useful for several tutorials. For example, the understanding of synchrony will be very useful in appreciating the problem of causality (W3D5).
Neuron and synapse models are essential building blocks of mechanistic models of brain function and dysfunction. One of the common questions in neuroscience is to identify the causes of changes in the statistics of spiking activity patterns. Whether these changes are caused by changes in neuron/synapse properties or by a change in the input or by a combination of both? With the contents of this tutorial, you should have a framework to think about which changes in spike patterns are due to neuron/synapse or input changes.
## Video
```
# @markdown
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV18A411v7Yy", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"MAOOPv3whZ0", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
```
## Slides
```
# @markdown
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/gyfr2/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
```
| true |
code
| 0.425307 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/bereml/iap/blob/master/libretas/1f_fashion_fcn.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Clasificación de Fashion-MNIST con una red densa
Curso: [Introducción al Aprendizaje Profundo](http://turing.iimas.unam.mx/~ricardoml/course/iap/). Profesores: [Bere](https://turing.iimas.unam.mx/~bereml/) y [Ricardo](https://turing.iimas.unam.mx/~ricardoml/) Montalvo Lezama.
---
---
En esta libreta debes entrenar dos clasificadores para el conjunto Fashion-MNIST.
1. El primero usando la misma arquitectura e hiperparámetros que en el ejemplo de MNIST.
2. En un segundo clasificador modifica la arquitectura intentando obtener un mejor desempeño.
Para resolver este ejercicio emplea la clase [`FashionMNIST`](https://pytorch.org/vision/0.8/datasets.html#fashion-mnist) proporcionada por PyTorch.
[Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) es un conjunto para remplzar MNIST. Fue recolectado con la intención de proveer un conjunto un poco más dificil que MNIST.
<img src="https://miro.medium.com/max/800/1*RNBs0OsymwAzDyYMk3_0Aw.jpeg" width="600"/>
Conjunto Fashion-MNIST. Imagen tomada de https://medium.com/@sankarchanna2k18/fashion-mnist-data-image-classification-in-tensorflow-bd22f9e680bc.
## 1 Preparación
```
# biblioteca para inspeccionar arquitecturas
# https://github.com/tyleryep/torchinfo
!pip install torchinfo
```
### 1.1 Bibliotecas
```
# funciones aleatorias
import random
# tomar n elementos de una secuencia
from itertools import islice as take
# gráficas
import matplotlib.pyplot as plt
# arreglos multidimensionales
import numpy as np
# redes neuronales
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as T
# procesamiento de imágenes
from skimage import io
# redes neuronales
from torch.utils.data import DataLoader
from torchvision.datasets import FashionMNIST
# inspección de arquitectura
from torchinfo import summary
# barras de progreso
from tqdm import trange
```
### 1.2 Auxiliares
```
# directorio de datos
DATA_DIR = '../data'
# tamaño del lote
BATCH_SIZE = 32
# filas y columnas de la regilla de imágenes
ROWS, COLS = 4, 8
# Fashion-MNIST classes
CLASSES = {
0: "T-shirt/top",
1: "Trouser",
2: "Pullover",
3: "Dress",
4: "Coat",
5: "Sandal",
6: "Shirt",
7: "Sneaker",
8: "Bag",
9: "Ankle boot",
}
def display_grid(xs, titles, rows, cols, figsize=(12, 6)):
"""Displays examples in a grid."""
fig, ax = plt.subplots(rows, cols, figsize=figsize)
i = 0
for r in range(rows):
for c in range(cols):
ax[r, c].imshow(xs[i], cmap='gray')
ax[r, c].set_title(titles[i])
ax[r, c].set_xticklabels([])
ax[r, c].set_yticklabels([])
i += 1
fig.tight_layout()
plt.show()
def display_batch(x, titles, rows, cols, figsize=(12, 6)):
"""Displays a batch of processed examples in a grid."""
# denormalizamos [0, 1] => [0, 255]
x *= 255
# rotamos canales (C x H x W) => (H x W x C)
x = x.permute(0, 2, 3, 1)
# convertimos a entero
x = (x.numpy()).astype(np.uint8)
# aplanamos canal
x = x.reshape(*x.shape[:3])
# desplegamos
display_grid(x, titles, rows, cols, figsize)
def set_seed(seed=0):
"""Initializes pseudo-random number generators."""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# reproducibilidad
set_seed()
```
## 2 Datos
### 2.1 Tuberias de datos con PyTorch
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/mnist_pipeline.png"/>
Tuberia de datos para MNIST.
### 2.2 Exploración
### 2.3 Cargadores de datos
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/eval_trn_tst.svg" width="450"/>
Entrenamiento con una partición de entrenamiento y prueba.
#### Entrenamiento
#### Prueba
## 3 Modelo
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/fcn_arch.png"/>
Arquitectura de la red completamente conectada.
### 3.1 Definición de la arquitectura
### 3.2 Instancia de la arquitectura
### 3.3 Inspección de la arquitectura
## 4 Entrenamiento
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/supervisado.svg" width="700"/>
Ciclo de entrenamiento supervisado.
### 4.1 Ciclo de entrenamiento
Entrenamos un modelo:
### 4.2 Gráficas de pérdidas y exactitud
## 5 Evaluación
<img src="https://raw.githubusercontent.com/bereml/iap/master/fig/eval_trn_tst.svg" width="450"/>
Entrenamiento con una partición de entrenamiento y prueba.
### 5.1 Evaluación final
### 5.2 Inferencia
| true |
code
| 0.732057 | null | null | null | null |
|
# Variance Component Analysis
This notebook illustrates variance components analysis for two-level
nested and crossed designs.
```
import numpy as np
import statsmodels.api as sm
from statsmodels.regression.mixed_linear_model import VCSpec
import pandas as pd
```
Make the notebook reproducible
```
np.random.seed(3123)
```
## Nested analysis
In our discussion below, "Group 2" is nested within "Group 1". As a
concrete example, "Group 1" might be school districts, with "Group
2" being individual schools. The function below generates data from
such a population. In a nested analysis, the group 2 labels that
are nested within different group 1 labels are treated as
independent groups, even if they have the same label. For example,
two schools labeled "school 1" that are in two different school
districts are treated as independent schools, even though they have
the same label.
```
def generate_nested(n_group1=200, n_group2=20, n_rep=10, group1_sd=2,
group2_sd=3, unexplained_sd=4):
# Group 1 indicators
group1 = np.kron(np.arange(n_group1), np.ones(n_group2 * n_rep))
# Group 1 effects
u = group1_sd * np.random.normal(size=n_group1)
effects1 = np.kron(u, np.ones(n_group2 * n_rep))
# Group 2 indicators
group2 = np.kron(np.ones(n_group1), np.kron(np.arange(n_group2), np.ones(n_rep)))
# Group 2 effects
u = group2_sd * np.random.normal(size=n_group1*n_group2)
effects2 = np.kron(u, np.ones(n_rep))
e = unexplained_sd * np.random.normal(size=n_group1 * n_group2 * n_rep)
y = effects1 + effects2 + e
df = pd.DataFrame({"y":y, "group1": group1, "group2": group2})
return df
```
Generate a data set to analyze.
```
df = generate_nested()
```
Using all the default arguments for `generate_nested`, the population
values of "group 1 Var" and "group 2 Var" are 2^2=4 and 3^2=9,
respectively. The unexplained variance, listed as "scale" at the
top of the summary table, has population value 4^2=16.
```
model1 = sm.MixedLM.from_formula("y ~ 1", re_formula="1", vc_formula={"group2": "0 + C(group2)"},
groups="group1", data=df)
result1 = model1.fit()
print(result1.summary())
```
If we wish to avoid the formula interface, we can fit the same model
by building the design matrices manually.
```
def f(x):
n = x.shape[0]
g2 = x.group2
u = g2.unique()
u.sort()
uv = {v: k for k, v in enumerate(u)}
mat = np.zeros((n, len(u)))
for i in range(n):
mat[i, uv[g2[i]]] = 1
colnames = ["%d" % z for z in u]
return mat, colnames
```
Then we set up the variance components using the VCSpec class.
```
vcm = df.groupby("group1").apply(f).to_list()
mats = [x[0] for x in vcm]
colnames = [x[1] for x in vcm]
names = ["group2"]
vcs = VCSpec(names, [colnames], [mats])
```
Finally we fit the model. It can be seen that the results of the
two fits are identical.
```
oo = np.ones(df.shape[0])
model2 = sm.MixedLM(df.y, oo, exog_re=oo, groups=df.group1, exog_vc=vcs)
result2 = model2.fit()
print(result2.summary())
```
## Crossed analysis
In a crossed analysis, the levels of one group can occur in any
combination with the levels of the another group. The groups in
Statsmodels MixedLM are always nested, but it is possible to fit a
crossed model by having only one group, and specifying all random
effects as variance components. Many, but not all crossed models
can be fit in this way. The function below generates a crossed data
set with two levels of random structure.
```
def generate_crossed(n_group1=100, n_group2=100, n_rep=4, group1_sd=2,
group2_sd=3, unexplained_sd=4):
# Group 1 indicators
group1 = np.kron(np.arange(n_group1, dtype=np.int),
np.ones(n_group2 * n_rep, dtype=np.int))
group1 = group1[np.random.permutation(len(group1))]
# Group 1 effects
u = group1_sd * np.random.normal(size=n_group1)
effects1 = u[group1]
# Group 2 indicators
group2 = np.kron(np.arange(n_group2, dtype=np.int),
np.ones(n_group2 * n_rep, dtype=np.int))
group2 = group2[np.random.permutation(len(group2))]
# Group 2 effects
u = group2_sd * np.random.normal(size=n_group2)
effects2 = u[group2]
e = unexplained_sd * np.random.normal(size=n_group1 * n_group2 * n_rep)
y = effects1 + effects2 + e
df = pd.DataFrame({"y":y, "group1": group1, "group2": group2})
return df
```
Generate a data set to analyze.
```
df = generate_crossed()
```
Next we fit the model, note that the `groups` vector is constant.
Using the default parameters for `generate_crossed`, the level 1
variance should be 2^2=4, the level 2 variance should be 3^2=9, and
the unexplained variance should be 4^2=16.
```
vc = {"g1": "0 + C(group1)", "g2": "0 + C(group2)"}
oo = np.ones(df.shape[0])
model3 = sm.MixedLM.from_formula("y ~ 1", groups=oo, vc_formula=vc, data=df)
result3 = model3.fit()
print(result3.summary())
```
If we wish to avoid the formula interface, we can fit the same model
by building the design matrices manually.
```
def f(g):
n = len(g)
u = g.unique()
u.sort()
uv = {v: k for k, v in enumerate(u)}
mat = np.zeros((n, len(u)))
for i in range(n):
mat[i, uv[g[i]]] = 1
colnames = ["%d" % z for z in u]
return [mat], [colnames]
vcm = [f(df.group1), f(df.group2)]
mats = [x[0] for x in vcm]
colnames = [x[1] for x in vcm]
names = ["group1", "group2"]
vcs = VCSpec(names, colnames, mats)
```
Here we fit the model without using formulas, it is simple to check
that the results for models 3 and 4 are identical.
```
oo = np.ones(df.shape[0])
model4 = sm.MixedLM(df.y, oo[:, None], exog_re=None, groups=oo, exog_vc=vcs)
result4 = model4.fit()
print(result4.summary())
```
| true |
code
| 0.55429 | null | null | null | null |
|
# Relevancy Analysis
<div class="alert alert-info">
This tutorial is available as an IPython notebook at [Malaya/example/relevancy](https://github.com/huseinzol05/Malaya/tree/master/example/relevancy).
</div>
<div class="alert alert-warning">
This module only trained on standard language structure, so it is not save to use it for local language structure.
</div>
```
%%time
import malaya
```
### Models accuracy
We use `sklearn.metrics.classification_report` for accuracy reporting, check at https://malaya.readthedocs.io/en/latest/models-accuracy.html#relevancy-analysis
### labels supported
Default labels for relevancy module.
```
malaya.relevancy.label
```
### Explanation
Positive relevancy: The article or piece of text is relevant, tendency is high to become not a fake news. Can be a positive or negative sentiment.
Negative relevancy: The article or piece of text is not relevant, tendency is high to become a fake news. Can be a positive or negative sentiment.
**Right now relevancy module only support deep learning model**.
```
negative_text = 'Roti Massimo Mengandungi DNA Babi. Roti produk Massimo keluaran Syarikat The Italian Baker mengandungi DNA babi. Para pengguna dinasihatkan supaya tidak memakan produk massimo. Terdapat pelbagai produk roti keluaran syarikat lain yang boleh dimakan dan halal. Mari kita sebarkan berita ini supaya semua rakyat Malaysia sedar dengan apa yang mereka makna setiap hari. Roti tidak halal ada DNA babi jangan makan ok.'
positive_text = 'Jabatan Kemajuan Islam Malaysia memperjelaskan dakwaan sebuah mesej yang dikitar semula, yang mendakwa kononnya kod E dikaitkan dengan kandungan lemak babi sepertimana yang tular di media sosial. . Tular: November 2017 . Tular: Mei 2014 JAKIM ingin memaklumkan kepada masyarakat berhubung maklumat yang telah disebarkan secara meluas khasnya melalui media sosial berhubung kod E yang dikaitkan mempunyai lemak babi. Untuk makluman, KOD E ialah kod untuk bahan tambah (aditif) dan ianya selalu digunakan pada label makanan di negara Kesatuan Eropah. Menurut JAKIM, tidak semua nombor E yang digunakan untuk membuat sesuatu produk makanan berasaskan dari sumber yang haram. Sehubungan itu, sekiranya sesuatu produk merupakan produk tempatan dan mendapat sijil Pengesahan Halal Malaysia, maka ia boleh digunakan tanpa was-was sekalipun mempunyai kod E-kod. Tetapi sekiranya produk tersebut bukan produk tempatan serta tidak mendapat sijil pengesahan halal Malaysia walaupun menggunakan e-kod yang sama, pengguna dinasihatkan agar berhati-hati dalam memilih produk tersebut.'
```
### List available Transformer models
```
malaya.relevancy.available_transformer()
```
### Load Transformer model
```python
def transformer(model: str = 'xlnet', quantized: bool = False, **kwargs):
"""
Load Transformer relevancy model.
Parameters
----------
model : str, optional (default='bert')
Model architecture supported. Allowed values:
* ``'bert'`` - Google BERT BASE parameters.
* ``'tiny-bert'`` - Google BERT TINY parameters.
* ``'albert'`` - Google ALBERT BASE parameters.
* ``'tiny-albert'`` - Google ALBERT TINY parameters.
* ``'xlnet'`` - Google XLNET BASE parameters.
* ``'alxlnet'`` - Malaya ALXLNET BASE parameters.
* ``'bigbird'`` - Google BigBird BASE parameters.
* ``'tiny-bigbird'`` - Malaya BigBird BASE parameters.
* ``'fastformer'`` - FastFormer BASE parameters.
* ``'tiny-fastformer'`` - FastFormer TINY parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: model
List of model classes:
* if `bert` in model, will return `malaya.model.bert.MulticlassBERT`.
* if `xlnet` in model, will return `malaya.model.xlnet.MulticlassXLNET`.
* if `bigbird` in model, will return `malaya.model.xlnet.MulticlassBigBird`.
* if `fastformer` in model, will return `malaya.model.fastformer.MulticlassFastFormer`.
"""
```
```
model = malaya.relevancy.transformer(model = 'tiny-bigbird')
```
### Load Quantized model
To load 8-bit quantized model, simply pass `quantized = True`, default is `False`.
We can expect slightly accuracy drop from quantized model, and not necessary faster than normal 32-bit float model, totally depends on machine.
```
quantized_model = malaya.relevancy.transformer(model = 'alxlnet', quantized = True)
```
#### Predict batch of strings
```python
def predict(self, strings: List[str]):
"""
classify list of strings.
Parameters
----------
strings: List[str]
Returns
-------
result: List[str]
"""
```
```
%%time
model.predict([negative_text, positive_text])
%%time
quantized_model.predict([negative_text, positive_text])
```
#### Predict batch of strings with probability
```python
def predict_proba(self, strings: List[str]):
"""
classify list of strings and return probability.
Parameters
----------
strings : List[str]
Returns
-------
result: List[dict[str, float]]
"""
```
```
%%time
model.predict_proba([negative_text, positive_text])
%%time
quantized_model.predict_proba([negative_text, positive_text])
```
#### Open relevancy visualization dashboard
Default when you call `predict_words` it will open a browser with visualization dashboard, you can disable by `visualization=False`.
```python
def predict_words(
self,
string: str,
method: str = 'last',
bins_size: float = 0.05,
visualization: bool = True,
):
"""
classify words.
Parameters
----------
string : str
method : str, optional (default='last')
Attention layer supported. Allowed values:
* ``'last'`` - attention from last layer.
* ``'first'`` - attention from first layer.
* ``'mean'`` - average attentions from all layers.
bins_size: float, optional (default=0.05)
default bins size for word distribution histogram.
visualization: bool, optional (default=True)
If True, it will open the visualization dashboard.
Returns
-------
dictionary: results
"""
```
**This method not available for BigBird models**.
```
quantized_model.predict_words(negative_text)
```
### Vectorize
Let say you want to visualize sentence / word level in lower dimension, you can use `model.vectorize`,
```python
def vectorize(self, strings: List[str], method: str = 'first'):
"""
vectorize list of strings.
Parameters
----------
strings: List[str]
method : str, optional (default='first')
Vectorization layer supported. Allowed values:
* ``'last'`` - vector from last sequence.
* ``'first'`` - vector from first sequence.
* ``'mean'`` - average vectors from all sequences.
* ``'word'`` - average vectors based on tokens.
Returns
-------
result: np.array
"""
```
#### Sentence level
```
texts = [negative_text, positive_text]
r = model.vectorize(texts, method = 'first')
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE().fit_transform(r)
tsne.shape
plt.figure(figsize = (7, 7))
plt.scatter(tsne[:, 0], tsne[:, 1])
labels = texts
for label, x, y in zip(
labels, tsne[:, 0], tsne[:, 1]
):
label = (
'%s, %.3f' % (label[0], label[1])
if isinstance(label, list)
else label
)
plt.annotate(
label,
xy = (x, y),
xytext = (0, 0),
textcoords = 'offset points',
)
```
#### Word level
```
r = quantized_model.vectorize(texts, method = 'word')
x, y = [], []
for row in r:
x.extend([i[0] for i in row])
y.extend([i[1] for i in row])
tsne = TSNE().fit_transform(y)
tsne.shape
plt.figure(figsize = (7, 7))
plt.scatter(tsne[:, 0], tsne[:, 1])
labels = x
for label, x, y in zip(
labels, tsne[:, 0], tsne[:, 1]
):
label = (
'%s, %.3f' % (label[0], label[1])
if isinstance(label, list)
else label
)
plt.annotate(
label,
xy = (x, y),
xytext = (0, 0),
textcoords = 'offset points',
)
```
Pretty good, the model able to know cluster bottom left as positive relevancy.
### Stacking models
More information, you can read at [https://malaya.readthedocs.io/en/latest/Stack.html](https://malaya.readthedocs.io/en/latest/Stack.html)
```
albert = malaya.relevancy.transformer(model = 'albert')
malaya.stack.predict_stack([albert, model], [positive_text, negative_text])
```
| true |
code
| 0.680799 | null | null | null | null |
|
```
%matplotlib inline
import numpy as np
import pandas as pd
import math
from scipy import stats
import pickle
from causality.analysis.dataframe import CausalDataFrame
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import plotly
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
```
Open the data from past notebooks and correct them to only include years that are common between the data structures (>1999).
```
with open('VariableData/money_data.pickle', 'rb') as f:
income_data, housing_data, rent_data = pickle.load(f)
with open('VariableData/demographic_data.pickle', 'rb') as f:
demographic_data = pickle.load(f)
with open('VariableData/endowment.pickle', 'rb') as f:
endowment = pickle.load(f)
with open('VariableData/expander.pickle', 'rb') as f:
expander = pickle.load(f)
endowment = endowment[endowment['FY'] > 1997].reset_index()
endowment.drop('index', axis=1, inplace=True)
demographic_data = demographic_data[demographic_data['year'] > 1999].reset_index()
demographic_data.drop('index', axis=1, inplace=True)
income_data = income_data[income_data['year'] > 1999].reset_index()
income_data.drop('index', axis=1, inplace=True)
housing_data = housing_data[housing_data['year'] > 1999].reset_index()
housing_data.drop('index', axis=1, inplace=True)
rent_data = rent_data[rent_data['year'] > 1999].reset_index()
rent_data.drop('index', axis=1, inplace=True)
```
Define a function to graph (and perform linear regression on) a given set of data.
```
def grapher(x, y, city, title, ytitle, xtitle, filename):
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
fit = slope * x + intercept
trace0 = go.Scatter(
x = x,
y = y,
mode = 'markers',
name=city,
marker=go.Marker(color='#D2232A')
)
fit0 = go.Scatter(
x = x,
y = fit,
mode='lines',
marker=go.Marker(color='#AC1D23'),
name='Linear Fit'
)
data = [trace0, fit0]
layout = go.Layout(
title = title,
font = dict(family='Gotham', size=12),
yaxis=dict(
title=ytitle
),
xaxis=dict(
title=xtitle)
)
fig = go.Figure(data=data, layout=layout)
return iplot(fig, filename=filename)
```
Investigate the connection between the endowment's value and the Black population in Cambridge, controlling for rent and housing prices.
```
x = pd.to_numeric(endowment['Value ($B)']).as_matrix()
y = pd.to_numeric(demographic_data['c_black']).as_matrix()
z1 = pd.to_numeric(rent_data['cambridge']).as_matrix()
z2 = pd.to_numeric(housing_data['cambridge']).as_matrix()
X = CausalDataFrame({'x': x, 'y': y, 'z1': z1, 'z2': z2})
plt.rcParams['font.size'] = 10
gotham_black = fm.FontProperties(fname='/Users/hakeemangulu/Library/Fonts/Gotham Black Regular.ttf')
gotham_book = fm.FontProperties(fname='/Users/hakeemangulu/Library/Fonts/Gotham Book Regular.otf')
endow_black = grapher(x, y, "Cambridge", "The Correlation Between Endowment and Black Population", "Black Population of Cambridge", "Endowment ($B)", "endow_black")
causal_endow_black = X.zplot(x='x', y='y', z=['z1', 'z2'], z_types={'z1': 'c', 'z2': 'c'}, kind='line', color="#D2232A")
fig = causal_endow_black.get_figure()
fig.set_size_inches(9, 5.5)
ax = plt.gca()
ax.set_frame_on(False)
ax.get_yaxis().set_visible(False)
ax.legend_.remove()
ax.set_title("The Controlled Correlation Between Endowment (Billions of Dollars) and Black Population", fontproperties=gotham_black, size=10, color="#595959")
ax.set_xlabel("Endowment", fontproperties=gotham_book, fontsize=10, color="#595959")
for tick in ax.get_xticklabels():
tick.set_fontproperties(gotham_book)
tick.set_fontsize(10)
tick.set_color("#595959")
fig.savefig('images/black_endow.svg', format='svg', dpi=1200, bbox_inches='tight')
```
Investigate the connection between the endowment's value and the housing prices in Cambridge, controlling for growth of the population.
```
x = pd.to_numeric(endowment['Value ($B)']).as_matrix()
y = pd.to_numeric(housing_data['cambridge']).as_matrix()
z1 = pd.to_numeric(demographic_data['c_white']).as_matrix()
z2 = pd.to_numeric(demographic_data['c_poc']).as_matrix()
X = CausalDataFrame({'x': x, 'y': y, 'z1': z1, 'z2': z2})
endow_housing = grapher(x, y, "Cambridge", "The Correlation Between Endowment and Housing Prices", "Housing Prices in Cambridge", "Endowment ($B)", "endow_housing")
causal_endow_housing = X.zplot(x='x', y='y', z=['z1', 'z2'], z_types={'z1': 'c', 'z2': 'c'}, kind='line', color="#D2232A")
fig = causal_endow_housing.get_figure()
fig.set_size_inches(9, 5.5)
ax = plt.gca()
ax.set_frame_on(False)
ax.get_yaxis().set_visible(False)
ax.legend_.remove()
ax.set_title("The Controlled Correlation Between Endowment (Billions of Dollars) and Housing Prices", fontproperties=gotham_black, size=10, color="#595959")
ax.set_xlabel("Endowment", fontproperties=gotham_book, fontsize=10, color="#595959")
for tick in ax.get_xticklabels():
tick.set_fontproperties(gotham_book)
tick.set_fontsize(10)
tick.set_color("#595959")
fig.savefig('images/housing_endow.svg', format='svg', dpi=1200, bbox_inches='tight')
```
Investigate the connection between the endowment's value and the rent prices in Cambridge, controlling for growth of the population.
```
x = pd.to_numeric(endowment['Value ($B)']).as_matrix()
y = pd.to_numeric(rent_data['cambridge']).as_matrix()
z1 = pd.to_numeric(demographic_data['c_white']).as_matrix()
z2 = pd.to_numeric(demographic_data['c_poc']).as_matrix()
X = CausalDataFrame({'x': x, 'y': y, 'z1': z1, 'z2': z2})
endow_rent = grapher(x, y, "Cambridge", "The Correlation Between Endowment and Rent", "Rent in Cambridge", "Endowment ($B)", "endow_rent")
causal_endow_rent = X.zplot(x='x', y='y', z=['z1', 'z2'], z_types={'z1': 'c', 'z2': 'c'}, kind='line', title='The Controlled Correlation Between Endowment and Rent')
fig = causal_endow_rent.get_figure()
fig.set_size_inches(9, 5.5)
ax = plt.gca()
ax.set_frame_on(False)
ax.get_yaxis().set_visible(False)
ax.legend_.remove()
ax.set_title("The Controlled Correlation Between Endowment (Billions of Dollars) and Housing Prices", fontproperties=gotham_black, size=10, color="#595959")
ax.set_xlabel("Endowment", fontproperties=gotham_book, fontsize=10, color="#595959")
for tick in ax.get_xticklabels():
tick.set_fontproperties(gotham_book)
tick.set_fontsize(10)
tick.set_color("#595959")
fig.savefig('images/rent_endow.svg', format='svg', dpi=1200, bbox_inches='tight')
```
Investigate the connection between the amount Harvard pays the city of Cambridge per year (PILOT) and the rent prices in Cambridge, controlling for growth of the population.
```
x = pd.to_numeric(expander['Payments to City']).as_matrix()
y = pd.to_numeric(rent_data['cambridge']).as_matrix()
# Remove the last two elements of the other arrays – PILOT data is not sufficient otherwise.
y = y[:-2].copy()
z1 = pd.to_numeric(demographic_data['c_white']).as_matrix()
z1 = z1[:-2].copy()
z2 = pd.to_numeric(demographic_data['c_poc']).as_matrix()
z2 = z2[:-2].copy()
X = CausalDataFrame({'x': x, 'y': y, 'z1': z1, 'z2': z2})
pilot_rent = grapher(x, y, "Cambridge", "The Correlation Between Harvard's PILOT and Rent", "Rent in Cambridge", "PILOT ($)", "pilot_rent")
causal_endow_rent = X.zplot(x='x', y='y', z=['z1', 'z2'], z_types={'z1': 'c', 'z2': 'c'}, kind='line')
```
| true |
code
| 0.568835 | null | null | null | null |
|
# Data Mining, Preparation and Understanding
Today we'll go through Data Mining, Preparation & Understanding which is a really fun one (and important).
In this notebook we'll try out some important libs to understand & also learn how to parse Twitter with some help from `Twint`. All in all we'll go through `pandas`, `twint` and some more - let's start by installing them.
```
%%capture
!pip install twint
!pip install wordcloud
import twint
import pandas as pd
import tqdm
import nltk
nltk.download('stopwords')
```
## Tonights theme: ÅF Pöyry (and perhaps some AFRY)
To be a Data Miner we need something to mine.

In this case it won't be Doge Coin but rather ÅF, ÅF Pöyry & AFRY.
To be honest, it's not the best theme (pretty generic names ones you go ASCII which we'll do to simplify our lifes.
### What is Twint
`Twint` is a really helpful library to scrape Twitter, it uses the search (i.e. not the API) and simplifies the whole process for us as users.
The other way to do this would be to use either the API yourself (time-consuming to learn and also limited in calls) or to use BS4 (Beatiful Soup) which is a great python-lib to scrape websites. But I'd dare say that it is better for static content sites such as Wikipedia, Aftonbladet etc rather than Twitter etc.
This all together led to the choice of `Twint` _even_ though it has a **huge** disadvantage - it does not support UTF8 from what I can find.
### What is pandas
Pandas is a library to parse, understand and work with data. It's really fast using the `DataFrame` they supply.
Using this `DataFrame` we can manipulate the data in different ways. It has all the functions you can imagine from both SQL and Excel, a great tool all in all.
### Bringing it all together
Let's take a look at how we can use this all together!
First a quick look at the Twint config.
```
"""
Twint Config:
Variable Type Description
--------------------------------------------
Retweets (bool) - Display replies to a subject.
Search (string) - Search terms
Store_csv (bool) - Set to True to write as a csv file.
Pandas (bool) - Enable Pandas integration.
Store_pandas (bool) - Save Tweets in a DataFrame (Pandas) file.
Get_replies (bool) - All replies to the tweet.
Lang (string) - Compatible language codes: https://github.com/twintproject/twint/wiki/Langauge-codes (sv, fi & en supported)
Format (string) - Custom terminal output formatting.
Hide_output (bool) - Hide output.
Rest of config: https://github.com/twintproject/twint/wiki/Configuration
"""
c = twint.Config()
c.Query
c.Search = " ÅF "
c.Format = "Username: {username} | Tweet: {tweet}"
c.Pandas = True
c.Store_pandas = True
c.Pandas_clean = True
c.Show_hashtags = True
c.Limit = 10
twint.run.Search(c)
```
**What do we see?**
No Swedish, what so ever. This is not interesting for our usecase as all the tweets are about something else really.
Let's try ÅF Pöyry instead
```
c.Search = "ÅF AFRY Pöyry"
twint.run.Search(c)
```
Looking at this we have a much better result. This really shows the power of Ngrams (bigram).
Let's play around some in the next box trying `@AFkarriar` as keyword and also to include `Replies` and some other fields.
```
c.Replies = True
twint.run.Search(c)
# Play around with params, do whatever!
```
### Results
Ok, so we have tried out a few different things we can use in `Twint`. For me `@AFkarriar` worked out best - **what was your favorite?**
Let's analyze some more.
```
FILENAME = "afpoyry.csv"
c = twint.Config()
c.Query
c.Show_hashtags = True
c.Search = "ÅF"
c.Lang = "sv"
#c.Get_replies = True
c.Store_csv = True
c.Hide_output = True
c.Output = FILENAME
twint.run.Search(c)
data = pd.read_csv(FILENAME)
print(data.shape)
print(data.dtypes)
```
### Cleaning
We can most likely clean some titles from here, just to make it simpler for us
```
data_less = data.filter(["tweet", "username"])
data_less.head()
data_less["tweet"].head()
from wordcloud import WordCloud
from IPython.display import Image
t = '\n'.join([x.tweet for i, x in data_less.iterrows()])
WordCloud().generate(t).to_file('cloud.png')
Image('cloud.png')
```
**Stop Words** - Anyone remember? Let's remove them!
NLTK is a great toolkit for just about everything in NLP, we can find a list of stopwords for most languages here, including Swedish.
```
from nltk.corpus import stopwords
swe_stop = set(stopwords.words('swedish'))
list(swe_stop)[:5]
```
**Stemming** - Anyone remember? Let's do it!
NLTK is _the_ lib to use when you want at least _some_ swedish. But I think I've squeezed all the swedish out of NLTK that I can find right now...
```
from nltk.stem import SnowballStemmer
stemmer = SnowballStemmer("swedish")
stemmer.stem("hoppade")
```
**Cleaning** - Anyone remember? Let's do it!

To have a "better" word cloud we need to reduce the dimensions and keep more important words.
```
%%capture
!pip install regex
from string import punctuation
import regex as re
# bad_words = re.compile("https|http|pic|www|och|med|att|åf|pöyry|läs")
http_re = re.compile("https?.*?(\w+)\.\w+(\/\s)?")
whitespace_re = re.compile("\s+")
punc_set = set(punctuation)
def clean_punct(tweet):
return ''.join([c for c in tweet if c not in punc_set])
def remove_stopwords(tweet):
return " ".join([t for t in tweet.split(" ") if t not in swe_stop])
# Example of cleaning: remove punct, lowercase, https and stemming/lemmatizing
# (we want to reduce the space/dimensions)
def clean_text(tweet):
tweet = tweet.lower()
tweet = ' '.join([word for word in tweet.split() if not word.startswith('pic.')])
tweet = http_re.sub(r'\1', tweet)
tweet = tweet.lower()
tweet = remove_stopwords(clean_punct(tweet)).strip()
tweet = whitespace_re.sub(' ', tweet)
return tweet
clean_text("hej där borta. hur mår du? vem vet.. Jag vet inte. http:/google.com pic.twitterlol")
#data_less["tweet"] = data_less["tweet"].apply(lambda x: clean_text(x))
data_less["tweet"]
from wordcloud import WordCloud
from IPython.display import Image
t = '\n'.join([x.tweet for i, x in data_less.iterrows()])
WordCloud().generate(t).to_file('cloud_clean.png')
Image('cloud_clean.png')
from collections import Counter
def print_most_common(wcount, n=5):
for (name, count) in wcount.most_common(n):
print(f"{name}: {count}")
t_hash = ' '.join([x for x in t.split() if x.startswith("#")])
hash_count = Counter(t_hash.split())
WordCloud().generate(t_hash).to_file('cloud_#.png')
print_most_common(hash_count, 10)
t_at = ' '.join([x for x in t.split() if x.startswith("@")])
at_count = Counter(t_at.split())
WordCloud().generate(t_at).to_file('[email protected]')
print_most_common(at_count, 10)
```
### WordClouds!
Let's take a look at what we've got.
```
Image('cloud_clean.png')
Image('cloud_no_stop.png')
Image('[email protected]')
Image('cloud_#.png')
```
### What to do?
A big problem with Swedish is that there's very few models which we can do some fun with, and our time is very limited.
Further on we can do the following:
1. Look at Ngram see if we can see common patterns
2. ...
```
"""
1. Try perhaps some type of Ngrams
4. Find different shit
4. Try to find connections
5. Move over to spark (?)
https://towardsdatascience.com/nlp-for-beginners-cleaning-preprocessing-text-data-ae8e306bef0f
https://medium.com/@kennycontreras/natural-language-processing-using-spark-and-pandas-f60a5eb1cfc6
"""
```
### AFRY
Let's create a wordcloud & everything for AFRY. This is for you to implement fully!
```
FILENAME2 = "afry.csv"
c = twint.Config()
c.Query
c.Show_hashtags = True
c.Search = "afry"
c.Lang = "sv"
c.Get_replies = True
c.Store_csv = True
c.Hide_output = True
c.Output = FILENAME2
twint.run.Search(c)
data_afry = pd.read_csv(FILENAME2)
t_afry = '\n'.join([x.tweet for i, x in data_afry.iterrows()])
WordCloud().generate(t_afry).to_file('cloud_afry.png')
Image('cloud_afry.png')
```
### Jonas Sjöstedt (jsjostedt) vs Jimmy Åkesson (jimmieakesson)
Implementation as follows:
1. Get data for both (tip: use `c.Username` or `c.User_id` and don't forget formatting output in terminal if used)
2. Clean data
3. ?? (Perhaps wordclouds etc)
4. TfIdf
5. Join ds & shuffle, train clf
6. Testing!
## Jimmie Åkesson
```
FILENAME = "jimmie2.csv"
c = twint.Config()
c.Query
c.Show_hashtags = True
#c.Search = "ÅF"
c.Username = "jimmieakesson"
#c.Get_replies = True
c.Store_csv = True
c.Output = FILENAME
twint.run.Search(c)
data_jimmie = pd.read_csv(FILENAME)
print(data_jimmie.shape)
data_less_jimmie = data_jimmie.filter(["tweet", "username"])
data_less_jimmie.head()
data_less_jimmie["tweet"] = data_less_jimmie["tweet"].apply(lambda x: clean_text(x))
data_less_jimmie.head()
from wordcloud import WordCloud
from IPython.display import Image
t = '\n'.join([x.tweet for i, x in data_less_jimmie.iterrows()])
WordCloud().generate(t).to_file('cloud_clean_jimmie.png')
Image('cloud_clean_jimmie.png')
```
## Jonas Sjöstedt
```
FILENAME_J = "jonas.csv"
c = twint.Config()
c.Query
c.Show_hashtags = True
#c.Search = "ÅF"
c.Username = "jsjostedt"
#c.Get_replies = True
c.Store_csv = True
c.Hide_output = True
c.Output = FILENAME_J
twint.run.Search(c)
data_jonas = pd.read_csv(FILENAME_J)
print(data_jonas.shape)
data_less_jonas = data_jonas.filter(["tweet", "username"])
data_less_jonas.head()
data_less_jonas["tweet"] = data_less_jonas["tweet"].apply(lambda x: clean_text(x))
data_less_jonas.head()
t = '\n'.join([x.tweet for i, x in data_less_jonas.iterrows()])
WordCloud().generate(t).to_file('cloud_clean_jonas.png')
Image('cloud_clean_jonas.png')
```
# TfIdf
```
from sklearn.feature_extraction.text import TfidfVectorizer
cv=TfidfVectorizer(ngram_range=(1,1))
word_count_vector_jonas = cv.fit_transform(data_less_jonas["tweet"])
feature_names = cv.get_feature_names()
#get tfidf vector for first document
first_document_vector=word_count_vector_jonas[0]
#print the scores
df = pd.DataFrame(first_document_vector.T.todense(), index=feature_names, columns=["tfidf"])
df.sort_values(by=["tfidf"],ascending=False)
word_count_vector_jimmie = cv.fit_transform(data_less_jimmie["tweet"])
feature_names = cv.get_feature_names()
#get tfidf vector for first document
first_document_vector=word_count_vector_jimmie[2]
#print the scores
df = pd.DataFrame(first_document_vector.T.todense(), index=feature_names, columns=["tfidf"])
df.sort_values(by=["tfidf"],ascending=False)
```
# Join dfs & shuffle, train clf
```
print(data_jimmie.shape)
print(data_jonas.shape)
from sklearn.utils import shuffle
tfidf = TfidfVectorizer(ngram_range=(1,2))
data_less_jonas = data_less_jonas.head(2581)
print(data_less_jonas.shape)
combined = pd.concat([data_less_jimmie,data_less_jonas])
combined = shuffle(combined)
print(combined.shape)
combined.head()
from sklearn.model_selection import train_test_split
tweet_tfidf = tfidf.fit_transform(combined["tweet"])
X_train, X_test, y_train, y_test = train_test_split(tweet_tfidf, combined["username"], test_size=0.1, random_state=42)
X_train[:3]
from sklearn.svm import LinearSVC
clf = LinearSVC()
model = clf.fit(X_train, y_train)
from sklearn.metrics import classification_report
y_pred = clf.predict(X_test)
print(classification_report(y_test, y_pred))
```
# Testing!
```
def testClassifier(tweet):
vector = tfidf.transform([clean_text(tweet)])
print(model.predict(vector))
testClassifier("")
testClassifier("Arbetsmarknaden är inte fri svenska kollektivavtal privatisering arbetslösa kommun")
```
# Going forward
I see 4 options:
1. Find stuffs that can help people in the office (@AFRY)
2. Create models for Swedish and perhaps Open Source
3. Make "interesting"/"fun" stuffs (such as applying Text Generation on something like Cards Against Humanity etc)
4. Try something new (perhaps Image Recognition?)
Focusing on Swedish is only possible in 1 & 2.
Some concrete options:
* Explore SparkNLP
* Ask around at AFRY for things to automate
* Apply text-generation with SOTA to generate either something like Cards Against Humanity or some persons Tweet etc.
* Create datasets to create Swedish models on (might need a mech-turk; this will be pretty time-consuming before we see any type of results).
* Something completely different.
```
```
| true |
code
| 0.358718 | null | null | null | null |
|
# Finite Difference Method
This note book illustrates the finite different method for a Boundary Value Problem.
### Example Boudary Value Problem
$$ \frac{d^2 y}{dx^2} = 4y$$
### Boundary Condition
$$ y(0)=1.1752, y(1)=10.0179 $$
```
import numpy as np
import math
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
class ListTable(list):
""" Overridden list class which takes a 2-dimensional list of
the form [[1,2,3],[4,5,6]], and renders an HTML Table in
IPython Notebook. """
from IPython.core.display import HTML
def _repr_html_(self):
html = ["<table>"]
for row in self:
html.append("<tr>")
for col in row:
html.append("<td>{0}</td>".format(col))
html.append("</tr>")
html.append("</table>")
return ''.join(html)
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
```
## Discrete Axis
The stepsize is defined as
$$h=\frac{b-a}{N}$$
here it is
$$h=\frac{1-0}{10}$$
giving
$$x_i=0+0.1 i$$
for $i=0,1,...10.$
```
## BVP
N=10
h=1/N
x=np.linspace(0,1,N+1)
fig = plt.figure(figsize=(10,4))
plt.plot(x,0*x,'o:',color='red')
plt.xlim((0,1))
plt.xlabel('x',fontsize=16)
plt.title('Illustration of discrete time points for h=%s'%(h),fontsize=32)
plt.show()
```
## The Difference Equation
The gerenal difference equation is
$$ \frac{1}{h^2}\left(y_{i-1}-2y_i+y_{i+1}\right)=4y_i \ \ \ i=1,..,N-1. $$
Rearranging the equation we have the system of N-1 equations
$$i=1: \frac{1}{0.1^2}\color{green}{y_{0}} -\left(\frac{2}{0.1^2}+4\right)y_1 +\frac{1}{0.1^2} y_{2}=0$$
$$i=2: \frac{1}{0.1^2}y_{1} -\left(\frac{2}{0.1^2}+4\right)y_2 +\frac{1}{0.1^2} y_{3}=0$$
$$ ...$$
$$i=8: \frac{1}{0.1^2}y_{7} -\left(\frac{2}{0.1^2}+4\right)y_8 +\frac{1}{0.1^2} y_{9}=0$$
$$i=9: \frac{1}{0.1^2}y_{8} -\left(\frac{2}{0.1^2}+4\right)y_9 +\frac{1}{0.1^2} \color{green}{y_{10}}=0$$
where the green terms are the known boundary conditions.
Rearranging the equation we have the system of 9 equations
$$i=1: -\left(\frac{2}{0.1^2}+4\right)y_1 +\frac{1}{0.1^2} y_{2}=0$$
$$i=2: \frac{1}{0.1^2}y_{1} -\left(\frac{2}{0.1^2}+4\right)y_2 +\frac{1}{0.1^2} y_{3}=-\frac{1}{0.1^2}\color{green}{y_{0}}$$
$$ ...$$
$$i=8: \frac{1}{0.1^2}y_{7} -\left(\frac{2}{0.1^2}+4\right)y_8 +\frac{1}{0.1^2} y_{9}=0$$
$$i=9: \frac{1}{0.1^2}y_{8} -\left(\frac{2}{0.1^2}+4\right)y_9 =0$$
where the green terms are the known boundary conditions.
Putting this into matrix form gives a $9\times 9 $ matrix
$$
A=\left(\begin{array}{ccc ccc ccc}
-204&100&0& 0&0&0& 0&0&0\\
100&-204&100 &0&0&0& 0&0&0\\
0&100&-204& 100&0&0& 0&0&0\\
.&.&.& .&.&.& .&.&.\\
.&.&.& .&.&.& .&.&.\\
0&0&0& 0&0&0& 100&-204&100\\
0&0&0& 0&0&0& 0&100&-204
\end{array}\right)
$$
an unknown vector
$$
\color{red}{\mathbf{y}}=\color{red}{
\left(\begin{array}{c} y_1\\
y_2\\
y_3\\
.\\
.\\
y_8\\
y_9
\end{array}\right)}
$$
```
y=np.zeros((N+1))
# Boundary Condition
y[0]=1.1752
y[N]=10.0179
```
and the known right hand side is a known $9\times 1$ vector with the boundary conditions
$$
\mathbf{b}=\left(\begin{array}{c}-117.52\\
0\\
0\\
.\\
.\\
0\\
-1001.79 \end{array}\right)
$$
$$ A\mathbf{y}=\mathbf{b}$$
The plot below is a graphical representation of the matrix A.
```
b=np.zeros(N-1)
# Boundary Condition
b[0]=-y[0]/(h*h)
b[N-2]=-y[N]/(h*h)
A=np.zeros((N-1,N-1))
# Diagonal
for i in range (0,N-1):
A[i,i]=-(2/(h*h)+4)
for i in range (0,N-2):
A[i+1,i]=1/(h*h)
A[i,i+1]=1/(h*h)
plt.imshow(A)
plt.xlabel('i',fontsize=16)
plt.ylabel('j',fontsize=16)
plt.yticks(np.arange(N-1), np.arange(1,N-0.9,1))
plt.xticks(np.arange(N-1), np.arange(1,N-0.9,1))
clb=plt.colorbar()
clb.set_label('Matrix value')
plt.title('Matrix A',fontsize=32)
plt.tight_layout()
plt.subplots_adjust()
plt.show()
```
## Solving the system
To solve invert the matrix $A$ such that
$$A^{-1}Ay=A^{-1}b$$
$$y=A^{-1}b$$
The plot below shows the graphical representation of $A^{-1}$.
```
invA=np.linalg.inv(A)
plt.imshow(invA)
plt.xlabel('i',fontsize=16)
plt.ylabel('j',fontsize=16)
plt.yticks(np.arange(N-1), np.arange(1,N-0.9,1))
plt.xticks(np.arange(N-1), np.arange(1,N-0.9,1))
clb=plt.colorbar()
clb.set_label('Matrix value')
plt.title(r'Matrix $A^{-1}$',fontsize=32)
plt.tight_layout()
plt.subplots_adjust()
plt.show()
y[1:N]=np.dot(invA,b)
```
## Result
The plot below shows the approximate solution of the Boundary Value Problem (blue v) and the exact solution (black dashed line).
```
fig = plt.figure(figsize=(8,4))
plt.plot(x,y,'v',label='Finite Difference')
plt.plot(x,np.sinh(2*x+1),'k:',label='exact')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc='best')
plt.show()
```
| true |
code
| 0.451085 | null | null | null | null |
|
### Package installs
If you are using jupyter lab online, all packages will be available. If you are running this on your local computer, you may need to install some packages. Run the cell below if using jupyter lab locally.
```
!pip install numpy
!pip install scipy
!pip install pandas
!pip install scikit-learn
!pip install seaborn
```
### Importing data
To begin, we need to understand the data.
The ribosome genes are available in a .fasta file called 'ribosome_genes.fasta'. You can have a look if you like.
These genes will be imported as classes (RibosomeGene).
Each RibosomeGene object has a name, accession, sequence and length.
You can access these properties using '.' syntax. (see below).
Try to think of each gene as something physical, rather than code.
In real life, each gene has a .length, its organism has a .name, and it has a .sequence. We can write code in this way too.
we will import these into **ribosome_genes**, which is a list of our genes.
```
import warnings
warnings.filterwarnings('ignore')
from utilities import import_16s_sequences
ribosome_genes = import_16s_sequences()
print('{:<20}{:<30}{:<15}{:<10}'.format('gene.accession', 'gene.name', 'gene.length', 'gene.sequence'))
for gene in ribosome_genes:
print('{:<20}{:<30}{:<15}{:<10}'.format(gene.accession, gene.name[:27], gene.length, gene.sequence[:8] + '...'))
```
### SECTION 1: PAIRWISE DISTANCES
To be able to compare organisms via their sequences, we need a way to measure their difference as a distance.
**K-mer distance**<br>
The kmer distance between two sequences is defined here as the total number of k-mers that are unique to either sequence.<br>
eg: If seq1 has 3 unique kmers not found in seq2 (copy number difference also matters), and seq2 has 2 unique kmers, the kmer distance is 5.
```
def create_kmer_dictionary(seq, k):
kmer_dict = {}
num = len(seq) - k + 1
for i in range(num):
kmer = seq[i:i+k]
if kmer not in kmer_dict:
kmer_dict[kmer] = 0
kmer_dict[kmer] += 1
return kmer_dict
create_kmer_dictionary('CCUUCGGG', 2)
def calculate_total_unique_kmers(kmers1, kmers2):
unique_kmers = 0
c1 = c2 = c3 = c4 = c5 = 0
for k in kmers1:
if k not in kmers2:
c1 += kmers1[k]
elif k in kmers1 and k in kmers2:
c2 = c2+ abs(kmers1[k] - kmers2[k])
for k2 in kmers2:
if k2 not in kmers1:
c3 += kmers2[k2]
unique_kmers = c1+c2+c3
return unique_kmers
kmers1 = {'CCUUCGGG':1}
kmers2 = {'CCUUUUUG':2}
calculate_total_unique_kmers(kmers1, kmers2)
def kmer_distance(seq1, seq2, k):
kmers1 = create_kmer_dictionary(seq1,k)
kmers2 = create_kmer_dictionary(seq2,k)
distance = calculate_total_unique_kmers(kmers1, kmers2)
return distance
```
Let's check our function. We can use the first two entries in the 'ribosome_genes' list.
If implemented correctly, the following should return 24
```
distance = kmer_distance(ribosome_genes[1].sequence, ribosome_genes[3].sequence, 8)
print(distance)
```
**smith-waterman alignment**<br>
Another way to compare the similarity of two sequences is through alignment.
The alignment score of two sequences will be high when they are similar, and low when they are distinct.
Keep in mind the matrix must be 1 element larger than the sequence lengths. Consider whether indel scores for the first row and column need to be filled in.
```
import numpy as np
def init_scoregrid(seq1, seq2, indel_score=-4):
rs = len(seq1) +1
cs = len(seq2) +1
scoregrid = np.zeros((rs, cs), np.int)
return scoregrid
```
Let's do a sanity check that the grid has been initialised properly. <br>
The following should print the initialised scoregrid
```
print(init_scoregrid('hello', 'kittycat'))
```
Write a function that calculates the initialised scoregrid. It accepts two sequences, a scoregrid and match/mismatch and indel scores.
```
import itertools
def calculate_scoregrid(seq1, seq2, scoregrid, match_score=1, mismatch_score=-4, indel_score=-4):
for i, j in itertools.product(range(1, scoregrid.shape[0]), range(1, scoregrid.shape[1])):
match = scoregrid[i - 1, j - 1] + (match_score if seq1[i - 1] == seq2[j - 1] else + mismatch_score)
delete = scoregrid[i - 1, j] + indel_score
insert = scoregrid[i, j - 1] + indel_score
scoregrid[i, j] = max(match, delete, insert, 0)
return scoregrid
```
Let's do another sanity check. <br>
The following should print a calculated scoregrid, with the these numbers in the bottom right corner: <br>
2 0 <br>
0 3
```
scoregrid = init_scoregrid('hello', 'helllo')
print(calculate_scoregrid('hello', 'helllo', scoregrid))
def report_alignment_score(scoregrid):
# given a completed scoregrid, return the smith-waterman alignment score.
sw_alignment_score = scoregrid.max()
return sw_alignment_score
```
Final sanity check. Should return 4.
```
scoregrid = init_scoregrid('hello', 'helllo')
calculated_scoregrid = calculate_scoregrid('hello', 'helllo', scoregrid)
print(report_alignment_score(calculated_scoregrid))
```
Ok! now we're ready to put it all together. <br>
Fill in the function below with the three functions you wrote to calculate the alignment score of two sequences
```
def smith_waterman(seq1, seq2):
matrix = init_scoregrid(seq1, seq2, indel_score=-4)
element_scores = calculate_scoregrid(seq1, seq2, scoregrid, match_score=1, mismatch_score=-4, indel_score=-4)
alignment_score = report_alignment_score(scoregrid)
return alignment_score
```
The following should print 4
```
print(smith_waterman('hello', 'helllo'))
```
**pairwise distances**
We have now written two functions which can calculate the distance of two sequences.
We can calculate the k-mer distance, and the smith-waterman alignment score.
lets use these two methods to calculate the pairwise distance of our genes.
```
import numpy as np
def init_distance_matrix(genes):
values=[]
for gene in genes:
s=gene.accession
values.append(s)
values.append(0)
distance_matrix = {}
for gene in ribosome_genes:
key = gene.accession
distance_matrix[key]={values[i]: values[i + 1] for i in range(0, len(values), 2)}
return distance_matrix
```
Let's print the distance matrix to make sure it worked.
```
from utilities import print_distance_matrix
distance_matrix = init_distance_matrix(ribosome_genes)
print_distance_matrix(distance_matrix)
```
Time to fill in the matrix with distances. <br>
Write a function which calculates the pairwise distance of genes using kmer distance.
you will need to call the 'kmer_distance' function you have written above.
```
def calculate_kmer_distance_matrix(genes, matrix, k):
for gene1 in genes:
key1=gene1.accession
for gene2 in genes:
key2=gene2.accession
matrix[key1][key2]=kmer_distance(gene1.sequence,gene2.sequence,k)
return matrix
```
Let's do the same as above, but this time use the 'smith_waterman' alignment distance function you wrote.
```
def calculate_sw_alignment_distance_matrix(genes, matrix):
for gene1 in genes:
key1=gene1.accession
for gene2 in genes:
key2=gene2.accession
matrix[key1][key2]=smith_waterman(gene1.sequence,gene2.sequence)
return matrix
```
Let's test them out. The two cells below will use your calculate_kmer_distance_matrix, and calculate_sw_alignment_distance_matrix functions to add distances to the matrix. <br>
**NOTE:** the smith-waterman distance calculations can take time. Give it a minute.
```
distance_matrix = init_distance_matrix(ribosome_genes)
kmer_distance_matrix = calculate_kmer_distance_matrix(ribosome_genes, distance_matrix, 8)
print('\nkmer distance matrix')
print_distance_matrix(kmer_distance_matrix)
distance_matrix = init_distance_matrix(ribosome_genes)
sw_alignment_distance_matrix = calculate_sw_alignment_distance_matrix(ribosome_genes, distance_matrix)
print('\nsmith waterman alignment score distance matrix')
print_distance_matrix(sw_alignment_distance_matrix)
```
Let's visualise those in a better manner for human eyes. The cell below will plot heatmaps instead of raw numbers.
```
from utilities import heatmap
heatmap(kmer_distance_matrix, sw_alignment_distance_matrix)
```
### SECTION 2: CLUSTERING
From the heatmaps, it seems like there are a few clusters in the data. <br>
First, lets convert the pairwise distances to 2D coordinates.
This is possible using Multidimensional scaling (MDS).
After we have transformed the distance matrix to 2D coordinates, we can plot it to see if any clusters are evident.
```
from utilities import mds_scatterplot, distance_matrix_to_coordinates_MDS
kmer_distances_xy = distance_matrix_to_coordinates_MDS(kmer_distance_matrix)
sw_distances_xy = distance_matrix_to_coordinates_MDS(sw_alignment_distance_matrix)
mds_scatterplot(kmer_distances_xy)
mds_scatterplot(sw_distances_xy)
```
Seems like there is some clustering happening. <br>
Let's use some clustering algorithms to define the clusters.
in this manner, we can have an objective way to talk about the patterns in the data.
Let's implement the k-means algorithm.
```
from utilities import initialise_centroids, average_point, assign_points, plot_kmeans, points_equal, euclidean_distance
def calculate_mean_centroids(data, assignments, k):
centroids = []
for cluster in range(k):
points = [point for point, assignment in zip(data, assignments) if assignment == cluster]
centroids.append(average_point(points))
return centroids
```
Place calculate_mean_centroids() in the kmeans function below to complete kmeans
```
def kmeans(data, k):
centroids=initialise_centroids(data,k)
cluster_assignments=assign_points(centroids,data)
centroids=calculate_mean_centroids(data,cluster_assignments,k)
return centroids, cluster_assignments
```
You can check your implementation using the cell below:
```
centroids, cluster_assignments = kmeans(kmer_distances_xy, 3)
plot_kmeans(kmer_distances_xy, centroids, cluster_assignments, 3)
```
Let's also implement k-medoids while we're at it. <br>
The only difference between k-means and k-medoids is the calculate_mean_centroids() step, which will instead be calculate_median_centroids()
the median can be taken here as the point in the cluster which has smallest cumulative distance to the other points in the cluster
You can use the provided euclidean_distance() function to calculate distances between points
write a function which calculates new centroid locations (using the median)
```
def calculate_median_centroids(data, assignments, k):
centroids = []
for cluster in range(k):
points = [point for point, assignment in zip(data, assignments) if assignment == cluster]
centroids.append(tuple(np.median(np.array(points), axis=0)))
return centroids
```
Place calculate_median_centroids() in the kmedoids function below to complete kmedoids
```
def kmedoids(data, k):
centroids=initialise_centroids(data,k)
cluster_assignments=assign_points(centroids,data)
centroids=calculate_median_centroids(data,cluster_assignments,k)
return centroids, cluster_assignments
```
Here is another check cell, for kmedoids this time:
```
centroids,cluster_assignments = kmedoids(kmer_distances_xy, 3)
plot_kmeans(kmer_distances_xy, centroids, cluster_assignments, 3)
```
| true |
code
| 0.312691 | null | null | null | null |
|
# Deep Learning Toolkit for Splunk - Notebook for STL - Seasonality and Trend Decomposition
This notebook contains a barebone example workflow how to work on custom containerized code that seamlessly interfaces with the Deep Learning Toolkit for Splunk.
Note: By default every time you save this notebook the cells are exported into a python module which is then invoked by Splunk MLTK commands like <code> | fit ... | apply ... | summary </code>. Please read the Model Development Guide in the Deep Learning Toolkit app for more information.
## Stage 0 - import libraries
At stage 0 we define all imports necessary to run our subsequent code depending on various libraries.
```
# this definition exposes all python module imports that should be available in all subsequent commands
import json
import numpy as np
import pandas as pd
from statsmodels.tsa.seasonal import STL
import statsmodels as sm
# ...
# global constants
MODEL_DIRECTORY = "/srv/app/model/data/"
# THIS CELL IS NOT EXPORTED - free notebook cell for testing or development purposes
print("numpy version: " + np.__version__)
print("pandas version: " + pd.__version__)
print("statsmodels version: " + sm.__version__)
```
## Stage 1 - get a data sample from Splunk
In Splunk run a search to pipe a dataset into your notebook environment. Note: mode=stage is used in the | fit command to do this.
| inputlookup cyclical_business_process.csv<br>
| fit MLTKContainer mode=stage algo=seasonality_and_trend_decomposition _time logons
After you run this search your data set sample is available as a csv inside the container to develop your model. The name is taken from the into keyword ("barebone_model" in the example above) or set to "default" if no into keyword is present. This step is intended to work with a subset of your data to create your custom model.
```
# this cell is not executed from MLTK and should only be used for staging data into the notebook environment
def stage(name):
with open("data/"+name+".csv", 'r') as f:
df = pd.read_csv(f)
with open("data/"+name+".json", 'r') as f:
param = json.load(f)
return df, param
# THIS CELL IS NOT EXPORTED - free notebook cell for testing or development purposes
df, param = stage("default")
print(df.describe())
print(param)
```
## Stage 2 - create and initialize a model
```
# initialize your model
# available inputs: data and parameters
# returns the model object which will be used as a reference to call fit, apply and summary subsequently
def init(df,param):
model = {}
return model
# THIS CELL IS NOT EXPORTED - free notebook cell for testing or development purposes
print(init(df,param))
model=init(df,param)
```
## Stage 3 - fit the model
```
# train your model
# returns a fit info json object and may modify the model object
def fit(model,df,param):
return "info"
# THIS CELL IS NOT EXPORTED - free notebook cell for testing or development purposes
print(fit(model,df,param))
```
## Stage 4 - apply the model
```
# apply your model
# returns the calculated results
def apply(model,df,param):
data=df
data['_time']=pd.to_datetime(data['_time'])
data = data.set_index('_time') # Set the index to datetime object.
data=data.asfreq('H')
res=STL(data).fit()
results=pd.DataFrame({"seasonality": res.seasonal, "trend": res.trend, "residual": res.resid})
results.reset_index(level=0, inplace=True)
return results
# THIS CELL IS NOT EXPORTED - free notebook cell for testing or development purposes
apply(model,df,param)
```
## Stage 5 - save the model
```
# save model to name in expected convention "<algo_name>_<model_name>"
def save(model,name):
return model
```
## Stage 6 - load the model
```
# load model from name in expected convention "<algo_name>_<model_name>"
def load(name):
return model
```
## Stage 7 - provide a summary of the model
```
# return a model summary
def summary(model=None):
returns = {"version": {"numpy": np.__version__, "pandas": pd.__version__} }
return returns
```
## End of Stages
All subsequent cells are not tagged and can be used for further freeform code
| true |
code
| 0.495056 | null | null | null | null |
|
# Intro
[PyTorch](https://pytorch.org/) is a very powerful machine learning framework. Central to PyTorch are [tensors](https://pytorch.org/docs/stable/tensors.html), a generalization of matrices to higher ranks. One intuitive example of a tensor is an image with three color channels: A 3-channel (red, green, blue) image which is 64 pixels wide and 64 pixels tall is a $3\times64\times64$ tensor. You can access the PyTorch framework by writing `import torch` near the top of your code, along with all of your other import statements.
This guide will help introduce you to the functionality of PyTorch, but don't worry too much about memorizing it: the assignments will link to relevant documentation where necessary.
```
import torch
```
# Why PyTorch?
One important question worth asking is, why is PyTorch being used for this course? There is a great breakdown by [the Gradient](https://thegradient.pub/state-of-ml-frameworks-2019-pytorch-dominates-research-tensorflow-dominates-industry/) looking at the state of machine learning frameworks today. In part, as highlighted by the article, PyTorch is generally more pythonic than alternative frameworks, easier to debug, and is the most-used language in machine learning research by a large and growing margin. While PyTorch's primary alternative, Tensorflow, has attempted to integrate many of PyTorch's features, Tensorflow's implementations come with some inherent limitations highlighted in the article.
Notably, while PyTorch's industry usage has grown, Tensorflow is still (for now) a slight favorite in industry. In practice, the features that make PyTorch attractive for research also make it attractive for education, and the general trend of machine learning research and practice to PyTorch makes it the more proactive choice.
# Tensor Properties
One way to create tensors from a list or an array is to use `torch.Tensor`. It'll be used to set up examples in this notebook, but you'll never need to use it in the course - in fact, if you find yourself needing it, that's probably not the correct answer.
```
example_tensor = torch.Tensor(
[
[[1, 2], [3, 4]],
[[5, 6], [7, 8]],
[[9, 0], [1, 2]]
]
)
```
You can view the tensor in the notebook by simple printing it out (though some larger tensors will be cut off)
```
example_tensor
```
## Tensor Properties: Device
One important property is the device of the tensor - throughout this notebook you'll be sticking to tensors which are on the CPU. However, throughout the course you'll also be using tensors on GPU (that is, a graphics card which will be provided for you to use for the course). To view the device of the tensor, all you need to write is `example_tensor.device`. To move a tensor to a new device, you can write `new_tensor = example_tensor.to(device)` where device will be either `cpu` or `cuda`.
```
example_tensor.device
```
## Tensor Properties: Shape
And you can get the number of elements in each dimension by printing out the tensor's shape, using `example_tensor.shape`, something you're likely familiar with if you've used numpy. For example, this tensor is a $3\times2\times2$ tensor, since it has 3 elements, each of which are $2\times2$.
```
example_tensor.shape
```
You can also get the size of a particular dimension $n$ using `example_tensor.shape[n]` or equivalently `example_tensor.size(n)`
```
print("shape[0] =", example_tensor.shape[0])
print("size(1) =", example_tensor.size(1))
```
Finally, it is sometimes useful to get the number of dimensions (rank) or the number of elements, which you can do as follows
```
print("Rank =", len(example_tensor.shape))
print("Number of elements =", example_tensor.numel())
```
# Indexing Tensors
As with numpy, you can access specific elements or subsets of elements of a tensor. To access the $n$-th element, you can simply write `example_tensor[n]` - as with Python in general, these dimensions are 0-indexed.
```
example_tensor[1]
```
In addition, if you want to access the $j$-th dimension of the $i$-th example, you can write `example_tensor[i, j]`
```
example_tensor[1, 1, 0]
```
Note that if you'd like to get a Python scalar value from a tensor, you can use `example_scalar.item()`
```
example_scalar = example_tensor[1, 1, 0]
example_scalar.item()
```
In addition, you can index into the ith element of a column by using `x[:, i]`. For example, if you want the top-left element of each element in `example_tensor`, which is the `0, 0` element of each matrix, you can write:
```
example_tensor[:, 0, 0]
```
# Initializing Tensors
There are many ways to create new tensors in PyTorch, but in this course, the most important ones are:
[`torch.ones_like`](https://pytorch.org/docs/master/generated/torch.ones_like.html): creates a tensor of all ones with the same shape and device as `example_tensor`.
```
torch.ones_like(example_tensor)
```
[`torch.zeros_like`](https://pytorch.org/docs/master/generated/torch.zeros_like.html): creates a tensor of all zeros with the same shape and device as `example_tensor`
```
torch.zeros_like(example_tensor)
```
[`torch.randn_like`](https://pytorch.org/docs/stable/generated/torch.randn_like.html): creates a tensor with every element sampled from a [Normal (or Gaussian) distribution](https://en.wikipedia.org/wiki/Normal_distribution) with the same shape and device as `example_tensor`
```
torch.randn_like(example_tensor)
```
Sometimes (though less often than you'd expect), you might need to initialize a tensor knowing only the shape and device, without a tensor for reference for `ones_like` or `randn_like`. In this case, you can create a $2x2$ tensor as follows:
```
torch.randn(2, 2, device='cpu') # Alternatively, for a GPU tensor, you'd use device='cuda'
```
# Basic Functions
There are a number of basic functions that you should know to use PyTorch - if you're familiar with numpy, all commonly-used functions exist in PyTorch, usually with the same name. You can perform element-wise multiplication / division by a scalar $c$ by simply writing `c * example_tensor`, and element-wise addition / subtraction by a scalar by writing `example_tensor + c`
Note that most operations are not in-place in PyTorch, which means that they don't change the original variable's data (However, you can reassign the same variable name to the changed data if you'd like, such as `example_tensor = example_tensor + 1`)
```
(example_tensor - 5) * 2
```
You can calculate the mean or standard deviation of a tensor using [`example_tensor.mean()`](https://pytorch.org/docs/stable/generated/torch.mean.html) or [`example_tensor.std()`](https://pytorch.org/docs/stable/generated/torch.std.html).
```
print("Mean:", example_tensor.mean())
print("Stdev:", example_tensor.std())
```
You might also want to find the mean or standard deviation along a particular dimension. To do this you can simple pass the number corresponding to that dimension to the function. For example, if you want to get the average $2\times2$ matrix of the $3\times2\times2$ `example_tensor` you can write:
```
example_tensor.mean(0)
# Equivalently, you could also write:
# example_tensor.mean(dim=0)
# example_tensor.mean(axis=0)
# torch.mean(example_tensor, 0)
# torch.mean(example_tensor, dim=0)
# torch.mean(example_tensor, axis=0)
```
PyTorch has many other powerful functions but these should be all of PyTorch functions you need for this course outside of its neural network module (`torch.nn`).
# PyTorch Neural Network Module (`torch.nn`)
PyTorch has a lot of powerful classes in its `torch.nn` module (Usually, imported as simply `nn`). These classes allow you to create a new function which transforms a tensor in specific way, often retaining information when called multiple times.
```
import torch.nn as nn
```
## `nn.Linear`
To create a linear layer, you need to pass it the number of input dimensions and the number of output dimensions. The linear object initialized as `nn.Linear(10, 2)` will take in a $n\times10$ matrix and return an $n\times2$ matrix, where all $n$ elements have had the same linear transformation performed. For example, you can initialize a linear layer which performs the operation $Ax + b$, where $A$ and $b$ are initialized randomly when you generate the [`nn.Linear()`](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) object.
```
linear = nn.Linear(10, 2)
example_input = torch.randn(3, 10)
example_output = linear(example_input)
example_output
```
## `nn.ReLU`
[`nn.ReLU()`](https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html) will create an object that, when receiving a tensor, will perform a ReLU activation function. This will be reviewed further in lecture, but in essence, a ReLU non-linearity sets all negative numbers in a tensor to zero. In general, the simplest neural networks are composed of series of linear transformations, each followed by activation functions.
```
relu = nn.ReLU()
relu_output = relu(example_output)
relu_output
```
## `nn.BatchNorm1d`
[`nn.BatchNorm1d`](https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm1d.html) is a normalization technique that will rescale a batch of $n$ inputs to have a consistent mean and standard deviation between batches.
As indicated by the `1d` in its name, this is for situations where you expects a set of inputs, where each of them is a flat list of numbers. In other words, each input is a vector, not a matrix or higher-dimensional tensor. For a set of images, each of which is a higher-dimensional tensor, you'd use [`nn.BatchNorm2d`](https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html), discussed later on this page.
`nn.BatchNorm1d` takes an argument of the number of input dimensions of each object in the batch (the size of each example vector).
```
batchnorm = nn.BatchNorm1d(2)
batchnorm_output = batchnorm(relu_output)
batchnorm_output
```
## `nn.Sequential`
[`nn.Sequential`](https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html) creates a single operation that performs a sequence of operations. For example, you can write a neural network layer with a batch normalization as
```
mlp_layer = nn.Sequential(
nn.Linear(5, 2),
nn.BatchNorm1d(2),
nn.ReLU()
)
test_example = torch.randn(5,5) + 1
print("input: ")
print(test_example)
print("output: ")
print(mlp_layer(test_example))
```
# Optimization
One of the most important aspects of essentially any machine learning framework is its automatic differentiation library.
## Optimizers
To create an optimizer in PyTorch, you'll need to use the `torch.optim` module, often imported as `optim`. [`optim.Adam`](https://pytorch.org/docs/stable/optim.html#torch.optim.Adam) corresponds to the Adam optimizer. To create an optimizer object, you'll need to pass it the parameters to be optimized and the learning rate, `lr`, as well as any other parameters specific to the optimizer.
For all `nn` objects, you can access their parameters as a list using their `parameters()` method, as follows:
```
import torch.optim as optim
adam_opt = optim.Adam(mlp_layer.parameters(), lr=1e-1)
```
## Training Loop
A (basic) training step in PyTorch consists of four basic parts:
1. Set all of the gradients to zero using `opt.zero_grad()`
2. Calculate the loss, `loss`
3. Calculate the gradients with respect to the loss using `loss.backward()`
4. Update the parameters being optimized using `opt.step()`
That might look like the following code (and you'll notice that if you run it several times, the loss goes down):
```
train_example = torch.randn(100,5) + 1
adam_opt.zero_grad()
# We'll use a simple loss function of mean distance from 1
# torch.abs takes the absolute value of a tensor
cur_loss = torch.abs(1 - mlp_layer(train_example)).mean()
cur_loss.backward()
adam_opt.step()
print(cur_loss)
```
## `requires_grad_()`
You can also tell PyTorch that it needs to calculate the gradient with respect to a tensor that you created by saying `example_tensor.requires_grad_()`, which will change it in-place. This means that even if PyTorch wouldn't normally store a grad for that particular tensor, it will for that specified tensor.
## `with torch.no_grad():`
PyTorch will usually calculate the gradients as it proceeds through a set of operations on tensors. This can often take up unnecessary computations and memory, especially if you're performing an evaluation. However, you can wrap a piece of code with `with torch.no_grad()` to prevent the gradients from being calculated in a piece of code.
## `detach():`
Sometimes, you want to calculate and use a tensor's value without calculating its gradients. For example, if you have two models, A and B, and you want to directly optimize the parameters of A with respect to the output of B, without calculating the gradients through B, then you could feed the detached output of B to A. There are many reasons you might want to do this, including efficiency or cyclical dependencies (i.e. A depends on B depends on A).
# New `nn` Classes
You can also create new classes which extend the `nn` module. For these classes, all class attributes, as in `self.layer` or `self.param` will automatically treated as parameters if they are themselves `nn` objects or if they are tensors wrapped in `nn.Parameter` which are initialized with the class.
The `__init__` function defines what will happen when the object is created. The first line of the init function of a class, for example, `WellNamedClass`, needs to be `super(WellNamedClass, self).__init__()`.
The `forward` function defines what runs if you create that object `model` and pass it a tensor `x`, as in `model(x)`. If you choose the function signature, `(self, x)`, then each call of the forward function, gets two pieces of information: `self`, which is a reference to the object with which you can access all of its parameters, and `x`, which is the current tensor for which you'd like to return `y`.
One class might look like the following:
```
class ExampleModule(nn.Module):
def __init__(self, input_dims, output_dims):
super(ExampleModule, self).__init__()
self.linear = nn.Linear(input_dims, output_dims)
self.exponent = nn.Parameter(torch.tensor(1.))
def forward(self, x):
x = self.linear(x)
# This is the notation for element-wise exponentiation,
# which matches python in general
x = x ** self.exponent
return x
```
And you can view its parameters as follows
```
example_model = ExampleModule(10, 2)
list(example_model.parameters())
```
And you can print out their names too, as follows:
```
list(example_model.named_parameters())
```
And here's an example of the class in action:
```
input = torch.randn(2, 10)
example_model(input)
```
# 2D Operations
You won't need these for the first lesson, and the theory behind each of these will be reviewed more in later lectures, but here is a quick reference:
* 2D convolutions: [`nn.Conv2d`](https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html) requires the number of input and output channels, as well as the kernel size.
* 2D transposed convolutions (aka deconvolutions): [`nn.ConvTranspose2d`](https://pytorch.org/docs/master/generated/torch.nn.ConvTranspose2d.html) also requires the number of input and output channels, as well as the kernel size
* 2D batch normalization: [`nn.BatchNorm2d`](https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html) requires the number of input dimensions
* Resizing images: [`nn.Upsample`](https://pytorch.org/docs/master/generated/torch.nn.Upsample.html) requires the final size or a scale factor. Alternatively, [`nn.functional.interpolate`](https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.interpolate) takes the same arguments.
| true |
code
| 0.842248 | null | null | null | null |
|
# Ramp Optimization Examples
This notebook outlines an example to optimize the ramp settings for a few different types of observations.
In these types of optimizations, we must consider observations constraints such as saturation levels, SNR requirements, and limits on acquisition time.
**Note**: The reported acquisition time does not include obsevatory and instrument-level overheads, such as slew times, filter changes, script compilations, etc. It only includes detector readout times (including reset frames).
```
# Import the usual libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Enable inline plotting at lower left
%matplotlib inline
import pynrc
from pynrc import nrc_utils
from pynrc.nrc_utils import S, jl_poly_fit
from pynrc.pynrc_core import table_filter
pynrc.setup_logging('WARNING', verbose=False)
from astropy.table import Table
# Progress bar
from tqdm.auto import tqdm, trange
```
## Example 1: M-Dwarf companion (imaging vs coronagraphy)
We want to observe an M-Dwarf companion (K=18 mag) in the vicinity of a brighter F0V (K=13 mag) in the F430M filter. Assume the M-Dwarf flux is not significantly impacted by the brighter PSF (ie., in the background limited regime). In this scenario, the F0V star will saturate much more quickly compared to the fainter companion, so it limits which ramp settings we can use.
We will test a couple different types of observations (direct imaging vs coronagraphy).
```
# Get stellar spectra and normalize at K-Band
# The stellar_spectrum convenience function creates a Pysynphot spectrum
bp_k = S.ObsBandpass('k')
sp_M2V = pynrc.stellar_spectrum('M2V', 18, 'vegamag', bp_k)#, catname='ck04models')
sp_F0V = pynrc.stellar_spectrum('F0V', 13, 'vegamag', bp_k)#, catname='ck04models')
# Initiate a NIRCam observation
nrc = pynrc.NIRCam(filter='F430M', wind_mode='WINDOW', xpix=160, ypix=160)
# Set some observing constraints
# Let's assume we want photometry on the primary to calibrate the M-Dwarf for direct imaging
# - Set well_frac_max=0.75
# Want a SNR~100 in the F430M filter
# - Set snr_goal=100
res = nrc.ramp_optimize(sp_M2V, sp_bright=sp_F0V, snr_goal=100, well_frac_max=0.75, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Do the same thing, but for coronagraphic mask instead
nrc = pynrc.NIRCam(filter='F430M', image_mask='MASK430R', pupil_mask='CIRCLYOT',
wind_mode='WINDOW', xpix=320, ypix=320)
# We assume that longer ramps will give us the best SNR for time
patterns = ['MEDIUM8', 'DEEP8']
res = nrc.ramp_optimize(sp_M2V, sp_bright=sp_F0V, snr_goal=100,
patterns=patterns, even_nints=True)
# Take the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
```
**RESULTS**
Based on these two comparisons, it looks like direct imaging is much more efficient in getting to the requisite SNR. In addition, direct imaging gives us a photometric comparison source that is inaccessible when occulting the primary with the coronagraph masks. **Of course, this assumes the companion exists in the background limit as opposed to the contrast limit.**
## Example 2: Exoplanet Coronagraphy
We want to observe GJ 504 for an hour in the F444W filter using the MASK430R coronagraph.
- What is the optimal ramp settings to maximize the SNR of GJ 504b?
- What is the final background sensitivity limit?
```
# Get stellar spectra and normalize at K-Band
# The stellar_spectrum convenience function creates a Pysynphot spectrum
bp_k = pynrc.bp_2mass('ks')
sp_G0V = pynrc.stellar_spectrum('G0V', 4, 'vegamag', bp_k)
# Choose a representative planet spectrum
planet = pynrc.planets_sb12(atmo='hy3s', mass=8, age=200, entropy=8, distance=17.5)
sp_pl = planet.export_pysynphot()
# Renormalize to F360M = 18.8
bp_l = pynrc.read_filter('F360M') #
sp_pl = sp_pl.renorm(18.8, 'vegamag', bp_l)
# Initiate a NIRCam observation
nrc = pynrc.NIRCam(filter='F444W', pupil_mask='CIRCLYOT', image_mask='MASK430R',
wind_mode='WINDOW', xpix=320, ypix=320)
# Set even_nints=True assume 2 roll angles
res = nrc.ramp_optimize(sp_pl, sp_bright=sp_G0V, tacq_max=3600, tacq_frac=0.05,
even_nints=True, verbose=True)
# Take the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# The SHALLOWs, DEEPs, and MEDIUMs are very similar for SNR and efficiency.
# Let's go with SHALLOW2 for more GROUPS & INTS
# MEDIUM8 would be fine as well.
nrc.update_detectors(read_mode='SHALLOW2', ngroup=10, nint=70)
keys = list(nrc.multiaccum_times.keys())
keys.sort()
for k in keys:
print("{:<10}: {: 12.5f}".format(k, nrc.multiaccum_times[k]))
# Background sensitivity (5 sigma)
sens_dict = nrc.sensitivity(nsig=5, units='vegamag', verbose=True)
```
## Example 3: Single-Object Grism Spectroscopy
Similar to the above, but instead we want to obtain a slitless grism spectrum of a K=12 mag M0V dwarf. Each grism resolution element should have SNR~100.
```
# M0V star normalized to K=12 mags
bp_k = S.ObsBandpass('k')
sp_M0V = pynrc.stellar_spectrum('M0V', 12, 'vegamag', bp_k)
nrc = pynrc.NIRCam(filter='F444W', pupil_mask='GRISMR', wind_mode='STRIPE', ypix=128)
# Set a minimum of 10 integrations to be robust against cosmic rays
# Also set a minimum of 10 groups for good ramp sampling
res = nrc.ramp_optimize(sp_M0V, snr_goal=100, nint_min=10, ng_min=10, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Let's say we choose SHALLOW4, NGRP=10, NINT=10
# Update detector readout
nrc.update_detectors(read_mode='SHALLOW4', ngroup=10, nint=10)
keys = list(nrc.multiaccum_times.keys())
keys.sort()
for k in keys:
print("{:<10}: {: 12.5f}".format(k, nrc.multiaccum_times[k]))
# Print final wavelength-dependent SNR
# For spectroscopy, the snr_goal is the median over the bandpass
snr_dict = nrc.sensitivity(sp=sp_M0V, forwardSNR=True, units='mJy', verbose=True)
```
**Mock observed spectrum**
Create a series of ramp integrations based on the current NIRCam settings. The gen_exposures() function creates a series of mock observations in raw DMS format by default. By default, it's point source objects centered in the observing window.
```
# Ideal spectrum and wavelength solution
wspec, imspec = nrc.calc_psf_from_coeff(sp=sp_M0V, return_hdul=False, return_oversample=False)
# Resize to detector window
nx = nrc.det_info['xpix']
ny = nrc.det_info['ypix']
# Shrink/expand nx (fill value of 0)
# Then shrink to a size excluding wspec=0
# This assumes simulated spectrum is centered
imspec = nrc_utils.pad_or_cut_to_size(imspec, (ny,nx))
wspec = nrc_utils.pad_or_cut_to_size(wspec, nx)
# Add simple zodiacal background
im_slope = imspec + nrc.bg_zodi()
# Create a series of ramp integrations based on the current NIRCam settings
# Output is a single HDUList with 10 INTs
# Ignore detector non-linearity to return output in e-/sec
kwargs = {
'apply_nonlinearity' : False,
'apply_flats' : False,
}
res = nrc.simulate_level1b('M0V Target', 0, 0, '2023-01-01', '12:00:00',
im_slope=im_slope, return_hdul=True, **kwargs)
res.info()
tvals = nrc.Detector.times_group_avg
header = res['PRIMARY'].header
data_all = res['SCI'].data
slope_list = []
for data in tqdm(data_all):
ref = pynrc.ref_pixels.NRC_refs(data, header, DMS=True, do_all=False)
ref.calc_avg_amps()
ref.correct_amp_refs()
# Linear fit to determine slope image
cf = jl_poly_fit(tvals, ref.data, deg=1)
slope_list.append(cf[1])
# Create a master averaged slope image
slopes_all = np.array(slope_list)
slope_sim = slopes_all.mean(axis=0) * nrc.Detector.gain
fig, ax = plt.subplots(1,1, figsize=(12,3))
ax.imshow(slope_sim, vmin=0, vmax=10)
fig.tight_layout()
ind = wspec>0
# Estimate background emission and subtract from slope_sim
bg = np.median(slope_sim[:,~ind])
slope_sim -= bg
ind = wspec>0
plt.plot(wspec[ind], slope_sim[63,ind])
# Extract 2 spectral x 5 spatial pixels
# First, cut out the central 5 pixels
wspec_sub = wspec[ind]
sh_new = (5, len(wspec_sub))
slope_sub = nrc_utils.pad_or_cut_to_size(slope_sim, sh_new)
slope_sub_ideal = nrc_utils.pad_or_cut_to_size(imspec, sh_new)
# Sum along the spatial axis
spec = slope_sub.sum(axis=0)
spec_ideal = slope_sub_ideal.sum(axis=0)
spec_ideal_rebin = nrc_utils.frebin(spec_ideal, scale=0.5, total=False)
# Build a quick RSRF from extracted ideal spectral slope
sp_M0V.convert('mjy')
rsrf = spec_ideal / sp_M0V.sample(wspec_sub*1e4)
# Rebin along spectral direction
wspec_rebin = nrc_utils.frebin(wspec_sub, scale=0.5, total=False)
spec_rebin_cal = nrc_utils.frebin(spec/rsrf, scale=0.5, total=False)
# Expected noise per extraction element
snr_interp = np.interp(wspec_rebin, snr_dict['wave'], snr_dict['snr'])
_spec_rebin = spec_ideal_rebin / snr_interp
_spec_rebin_cal = _spec_rebin / nrc_utils.frebin(rsrf, scale=0.5, total=False)
fig, ax = plt.subplots(1,1, figsize=(12,8))
ax.plot(sp_M0V.wave/1e4, sp_M0V.flux, label='Input Spectrum')
ax.plot(wspec_rebin, spec_rebin_cal, alpha=0.7, label='Extracted Observation')
ax.errorbar(wspec_rebin, spec_rebin_cal, yerr=_spec_rebin_cal, zorder=3,
fmt='none', label='Expected Error Bars', alpha=0.7, color='C2')
ax.set_ylim([0,10])
ax.set_xlim([3.7,5.1])
ax.set_xlabel('Wavelength ($\mu m$)')
ax.set_ylabel('Flux (mJy)')
ax.set_title('Simulated Spectrum')
ax.legend(loc='upper right');
```
## Example 4: Exoplanet Transit Spectroscopy
Let's say we want to observe an exoplanet transit using NIRCam grisms in the F322W2 filter.
We assume a 2.1-hour transit duration for a K6V star (K=8.4 mag).
```
nrc = pynrc.NIRCam('F322W2', pupil_mask='GRISM0', wind_mode='STRIPE', ypix=64)
# K6V star at K=8.4 mags
bp_k = S.ObsBandpass('k')
sp_K6V = pynrc.stellar_spectrum('K6V', 8.4, 'vegamag', bp_k)
# Constraints
well = 0.5 # Keep well below 50% full
tacq = 2.1*3600. # 2.1 hour transit duration
ng_max = 30 # Transit spectroscopy allows for up to 30 groups per integrations
nint_max = int(1e6) # Effectively no limit on number of integrations
# Let's bin the spectrum to R~100
# dw_bin is a passable parameter for specifiying spectral bin sizes
R = 100
dw_bin = (nrc.bandpass.avgwave() / 10000) / R
res = nrc.ramp_optimize(sp_K6V, tacq_max=tacq, nint_max=nint_max,
ng_min=10, ng_max=ng_max, well_frac_max=well,
dw_bin=dw_bin, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Even though BRIGHT1 has a slight efficiency preference over RAPID
# and BRIGHT2, we decide to choose RAPID, because we are convinced
# that saving all data (and no coadding) is a better option.
# If APT informs you that the data rates or total data shorage is
# an issue, you can select one of the other options.
# Update to RAPID, ngroup=30, nint=700 and plot PPM
nrc.update_detectors(read_mode='RAPID', ngroup=30, nint=700)
snr_dict = nrc.sensitivity(sp=sp_K6V, dw_bin=dw_bin, forwardSNR=True, units='Jy')
wave = np.array(snr_dict['wave'])
snr = np.array(snr_dict['snr'])
# Let assume bg subtraction of something with similar noise
snr /= np.sqrt(2.)
ppm = 1e6 / snr
# NOTE: We have up until now neglected to include a "noise floor"
# which represents the expected minimum achievable ppm from
# unknown systematics. To first order, this can be added in
# quadrature to the calculated PPM.
noise_floor = 30 # in ppm
ppm_floor = np.sqrt(ppm**2 + noise_floor**2)
plt.plot(wave, ppm, marker='o', label='Calculated PPM')
plt.plot(wave, ppm_floor, marker='o', label='PPM + Noise Floor')
plt.xlabel('Wavelength ($\mu m$)')
plt.ylabel('Noise Limit (PPM)')
plt.xlim([2.4,4.1])
plt.ylim([20,100])
plt.legend()
```
## Example 5: Extended Souce
Expect some faint galaxies of 25 ABMag/arcsec^2 in our field. What is the best we can do with 10,000 seconds of acquisition time?
```
# Detection bandpass is F200W
nrc = pynrc.NIRCam(filter='F200W')
# Flat spectrum (in photlam) with ABMag = 25 in the NIRCam bandpass
sp = pynrc.stellar_spectrum('flat', 25, 'abmag', nrc.bandpass)
res = nrc.ramp_optimize(sp, is_extended=True, tacq_max=10000, tacq_frac=0.05, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# MEDIUM8 10 10 looks like a good option
nrc.update_detectors(read_mode='MEDIUM8', ngroup=10, nint=10, verbose=True)
# Calculate flux/mag for various nsigma detection limits
tbl = Table(names=('Sigma', 'Point (nJy)', 'Extended (nJy/asec^2)',
'Point (AB Mag)', 'Extended (AB Mag/asec^2)'))
tbl['Sigma'].format = '.0f'
for k in tbl.keys()[1:]:
tbl[k].format = '.2f'
for sig in [1,3,5,10]:
snr_dict1 = nrc.sensitivity(nsig=sig, units='nJy', verbose=False)
snr_dict2 = nrc.sensitivity(nsig=sig, units='abmag', verbose=False)
tbl.add_row([sig, snr_dict1[0]['sensitivity'], snr_dict1[1]['sensitivity'],
snr_dict2[0]['sensitivity'], snr_dict2[1]['sensitivity']])
tbl
```
| true |
code
| 0.618953 | null | null | null | null |
|
# Training a Custom TensorFlow.js Audio Model
In this notebook, we show how to train a custom audio model based on the model topology of the
[TensorFlow.js Speech Commands model](https://www.npmjs.com/package/@tensorflow-models/speech-commands).
The training is done in Python by using a set of audio examples stored as .wav files.
The trained model is convertible to the
[TensorFlow.js LayersModel](https://js.tensorflow.org/api/latest/#loadLayersModel) format for
inference and further fine-tuning in the browser.
It may also be converted to the [TFLite](https://www.tensorflow.org/lite) format
for inference on mobile devices.
This example uses a small subset of the
[Speech Commands v0.02](https://arxiv.org/abs/1804.03209) dataset, and builds
a model that detects two English words ("yes" and "no") against background noises. But the methodology demonstrated here is general and can be applied to
other sounds, as long as they are stored in the same .wav file format as in this example.
## Data format
The training procedure in this notebook makes the following assumption about the raw audio data:
1. The root data directory contains a number of folders. The name of each folder is the name
of the audio class. You can select any subset of the folders (i.e., classes) to train the
model on.
2. Within each folder, there are a number of .wav files. Each .wav file corresponds to an
example. Each .wav file is mono (single-channel) and has the typical pulse-code modulation
(PCM) encoding. The duration of each wave file should be 1 second or slightly longer.
3. There can be a special folder called "_background_noise_" that contains .wav files for
audio samples that fall into the background noise class. Each of these .wav files can be
much longer than 1 second in duration. This notebook contains code that extracts 1-second
snippets from these .wav files
The Speech Commands v0.3 dataset used in this notebook meets these data format requirements.
```
!pip install librosa tensorflowjs
import glob
import json
import os
import random
import librosa
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from scipy.io import wavfile
import tensorflow as tf
import tensorflowjs as tfjs
import tqdm
print(tf.__version__)
print(tfjs.__version__)
# Download the TensorFlow.js Speech Commands model and the associated
# preprocesssing model.
!mkdir -p /tmp/tfjs-sc-model
!curl -o /tmp/tfjs-sc-model/metadata.json -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/metadata.json
!curl -o /tmp/tfjs-sc-model/model.json -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/model.json
!curl -o /tmp/tfjs-sc-model/group1-shard1of2 -fSsL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/group1-shard1of2
!curl -o /tmp/tfjs-sc-model/group1-shard2of2 -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/group1-shard2of2
!curl -o /tmp/tfjs-sc-model/sc_preproc_model.tar.gz -fSsL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/conversion/sc_preproc_model.tar.gz
!cd /tmp/tfjs-sc-model/ && tar xzvf sc_preproc_model.tar.gz
# Download Speech Commands v0.02 dataset. The dataset contains 30+ word and
# sound categories, but we will only use a subset of them
!mkdir -p /tmp/speech_commands_v0.02
!curl -o /tmp/speech_commands_v0.02/speech_commands_v0.02.tar.gz -fSsL http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz
!cd /tmp/speech_commands_v0.02 && tar xzf speech_commands_v0.02.tar.gz
# Load the preprocessing model, which transforms audio waveform into
# spectrograms (2D image-like representation of sound).
# This preprocessing model replicates WebAudio's AnalyzerNode.getFloatFrequencyData
# (https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/getFloatFrequencyData).
# It performs short-time Fourier transform (STFT) using a length-2048 Blackman
# window. It opeartes on mono audio at the 44100-Hz sample rate.
preproc_model_path = '/tmp/tfjs-sc-model/sc_preproc_model'
preproc_model = tf.keras.models.load_model(preproc_model_path)
preproc_model.summary()
preproc_model.input_shape
# Create some constants to be used later.
# Target sampling rate. It is required by the audio preprocessing model.
TARGET_SAMPLE_RATE = 44100
# The specific audio tensor length expected by the preprocessing model.
EXPECTED_WAVEFORM_LEN = preproc_model.input_shape[-1]
# Where the Speech Commands v0.02 dataset has been downloaded.
DATA_ROOT = "/tmp/speech_commands_v0.02"
WORDS = ("_background_noise_snippets_", "no", "yes")
# Unlike word examples, the noise samples in the Speech Commands v0.02 dataset
# are not divided into 1-second snippets. Instead, they are stored as longer
# recordings. Therefore we need to cut them up in to 1-second snippet .wav
# files.
noise_wav_paths = glob.glob(os.path.join(DATA_ROOT, "_background_noise_", "*.wav"))
snippets_dir = os.path.join(DATA_ROOT, "_background_noise_snippets_")
os.makedirs(snippets_dir, exist_ok=True)
def extract_snippets(wav_path, snippet_duration_sec=1.0):
basename = os.path.basename(os.path.splitext(wav_path)[0])
sample_rate, xs = wavfile.read(wav_path)
assert xs.dtype == np.int16
n_samples_per_snippet = int(snippet_duration_sec * sample_rate)
i = 0
while i + n_samples_per_snippet < len(xs):
snippet_wav_path = os.path.join(snippets_dir, "%s_%.5d.wav" % (basename, i))
snippet = xs[i : i + n_samples_per_snippet].astype(np.int16)
wavfile.write(snippet_wav_path, sample_rate, snippet)
i += n_samples_per_snippet
for noise_wav_path in noise_wav_paths:
print("Extracting snippets from %s..." % noise_wav_path)
extract_snippets(noise_wav_path, snippet_duration_sec=1.0)
def resample_wavs(dir_path, target_sample_rate=44100):
"""Resample the .wav files in an input directory to given sampling rate.
The resampled waveforms are written to .wav files in the same directory with
file names that ends in "_44100hz.wav".
44100 Hz is the sample rate required by the preprocessing model. It is also
the most widely supported sample rate among web browsers and mobile devices.
For example, see:
https://developer.mozilla.org/en-US/docs/Web/API/AudioContextOptions/sampleRate
https://developer.android.com/ndk/guides/audio/sampling-audio
Args:
dir_path: Path to a directory that contains .wav files.
target_sapmle_rate: Target sampling rate in Hz.
"""
wav_paths = glob.glob(os.path.join(dir_path, "*.wav"))
resampled_suffix = "_%shz.wav" % target_sample_rate
for i, wav_path in tqdm.tqdm(enumerate(wav_paths)):
if wav_path.endswith(resampled_suffix):
continue
sample_rate, xs = wavfile.read(wav_path)
xs = xs.astype(np.float32)
xs = librosa.resample(xs, sample_rate, TARGET_SAMPLE_RATE).astype(np.int16)
resampled_path = os.path.splitext(wav_path)[0] + resampled_suffix
wavfile.write(resampled_path, target_sample_rate, xs)
for word in WORDS:
word_dir = os.path.join(DATA_ROOT, word)
assert os.path.isdir(word_dir)
resample_wavs(word_dir, target_sample_rate=TARGET_SAMPLE_RATE)
@tf.function
def read_wav(filepath):
file_contents = tf.io.read_file(filepath)
return tf.expand_dims(tf.squeeze(tf.audio.decode_wav(
file_contents,
desired_channels=-1,
desired_samples=TARGET_SAMPLE_RATE).audio, axis=-1), 0)
@tf.function
def filter_by_waveform_length(waveform, label):
return tf.size(waveform) > EXPECTED_WAVEFORM_LEN
@tf.function
def crop_and_convert_to_spectrogram(waveform, label):
cropped = tf.slice(waveform, begin=[0, 0], size=[1, EXPECTED_WAVEFORM_LEN])
return tf.squeeze(preproc_model(cropped), axis=0), label
@tf.function
def spectrogram_elements_finite(spectrogram, label):
return tf.math.reduce_all(tf.math.is_finite(spectrogram))
def get_dataset(input_wav_paths, labels):
"""Get a tf.data.Dataset given input .wav files and their labels.
The returned dataset emits 2-tuples of `(spectrogram, label)`, wherein
- `spectrogram` is a tensor of dtype tf.float32 and shape [43, 232, 1].
It is z-normalized (i.e., have a mean of ~0.0 and variance of ~1.0).
- `label` is a tensor of dtype tf.int32 and shape [] (scalar).
Args:
input_wav_paths: Input audio .wav file paths as a list of string.
labels: integer labels (class indices) of the input .wav files. Must have
the same lengh as `input_wav_paths`.
Returns:
A tf.data.Dataset object as described above.
"""
ds = tf.data.Dataset.from_tensor_slices(input_wav_paths)
# Read audio waveform from the .wav files.
ds = ds.map(read_wav)
ds = tf.data.Dataset.zip((ds, tf.data.Dataset.from_tensor_slices(labels)))
# Keep only the waveforms longer than `EXPECTED_WAVEFORM_LEN`.
ds = ds.filter(filter_by_waveform_length)
# Crop the waveforms to `EXPECTED_WAVEFORM_LEN` and convert them to
# spectrograms using the preprocessing layer.
ds = ds.map(crop_and_convert_to_spectrogram)
# Discard examples that contain infinite or NaN elements.
ds = ds.filter(spectrogram_elements_finite)
return ds
input_wav_paths_and_labels = []
for i, word in enumerate(WORDS):
wav_paths = glob.glob(os.path.join(DATA_ROOT, word, "*_%shz.wav" % TARGET_SAMPLE_RATE))
print("Found %d examples for class %s" % (len(wav_paths), word))
labels = [i] * len(wav_paths)
input_wav_paths_and_labels.extend(zip(wav_paths, labels))
random.shuffle(input_wav_paths_and_labels)
input_wav_paths, labels = ([t[0] for t in input_wav_paths_and_labels],
[t[1] for t in input_wav_paths_and_labels])
dataset = get_dataset(input_wav_paths, labels)
# Show some example spectrograms for inspection.
fig = plt.figure(figsize=(40, 100))
dataset_iter = iter(dataset)
num_spectrograms_to_show = 10
for i in range(num_spectrograms_to_show):
ax = fig.add_subplot(1, num_spectrograms_to_show, i + 1)
spectrogram, label = next(dataset_iter)
spectrogram = spectrogram.numpy()
label = label.numpy()
plt.imshow(np.flipud(np.squeeze(spectrogram, -1).T), aspect=0.2)
ax.set_title("Example of \"%s\"" % WORDS[label])
ax.set_xlabel("Time frame #")
if i == 0:
ax.set_ylabel("Frequency bin #")
# The amount of data we have is relatively small. It fits into typical host RAM
# or GPU memory. For better training performance, we preload the data and
# put it into numpy arrays:
# - xs: The audio features (normalized spectrograms).
# - ys: The labels (class indices).
print(
"Loading dataset and converting data to numpy arrays. "
"This may take a few minutes...")
xs_and_ys = list(dataset)
xs = np.stack([item[0] for item in xs_and_ys])
ys = np.stack([item[1] for item in xs_and_ys])
print("Done.")
tfjs_model_json_path = '/tmp/tfjs-sc-model/model.json'
# Load the Speech Commands model. Weights are loaded along with the topology,
# since we train the model from scratch. Instead, we will perform transfer
# learning based on the model.
orig_model = tfjs.converters.load_keras_model(tfjs_model_json_path, load_weights=True)
# Remove the top Dense layer and add a new Dense layer of which the output
# size fits the number of sound classes we care about.
model = tf.keras.Sequential(name="TransferLearnedModel")
for layer in orig_model.layers[:-1]:
model.add(layer)
model.add(tf.keras.layers.Dense(units=len(WORDS), activation="softmax"))
# Freeze all but the last layer of the model. The last layer will be fine-tuned
# during transfer learning.
for layer in model.layers[:-1]:
layer.trainable = False
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["acc"])
model.summary()
# Train the model.
model.fit(xs, ys, batch_size=256, validation_split=0.3, shuffle=True, epochs=50)
# Convert the model to TensorFlow.js Layers model format.
tfjs_model_dir = "/tmp/tfjs-model"
tfjs.converters.save_keras_model(model, tfjs_model_dir)
# Create the metadata.json file.
metadata = {"words": ["_background_noise_"] + WORDS[1:], "frameSize": model.input_shape[-2]}
with open(os.path.join(tfjs_model_dir, "metadata.json"), "w") as f:
json.dump(metadata, f)
!ls -lh /tmp/tfjs_model
```
To deploy this model to the web, you can use the
[speech-commands NPM package](https://www.npmjs.com/package/@tensorflow-models/speech-commands).
The model.json and metadata.json should be hosted together with the two weights (.bin) files in the same HTTP/HTTPS directory.
Then the custom model can be loaded in JavaScript with:
```js
import * as tf from '@tensorflow/tfjs';
import * as speechCommands from '@tensorflow-models/speech-commands';
const recognizer = speechCommands.create(
'BROWSER_FFT',
null,
'http://test.com/my-audio-model/model.json', // URL to the custom model's model.json
'http://test.com/my-audio-model/metadata.json' // URL to the custom model's metadata.json
);
```
```
# Convert the model to TFLite.
# We need to combine the preprocessing model and the newly trained 3-class model
# so that the resultant model will be able to preform STFT and spectrogram
# calculation on mobile devices (i.e., without web browser's WebAudio).
combined_model = tf.keras.Sequential(name='CombinedModel')
combined_model.add(preproc_model)
combined_model.add(model)
combined_model.build([None, EXPECTED_WAVEFORM_LEN])
combined_model.summary()
tflite_output_path = '/tmp/tfjs-sc-model/combined_model.tflite'
converter = tf.lite.TFLiteConverter.from_keras_model(combined_model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
with open(tflite_output_path, 'wb') as f:
f.write(converter.convert())
print("Saved tflite file at: %s" % tflite_output_path)
```
| true |
code
| 0.747979 | null | null | null | null |
|
Visualisation des différentes statistiques de Dbnary
=============
```
import datetime
# PLotting
import bqplot as bq
# Data analys
import numpy as np
from IPython.display import clear_output
from ipywidgets import widgets
from pandasdatacube import *
ENDPOINT: str = "http://kaiko.getalp.org/sparql"
PREFIXES: dict[str] = {'dbnary': 'http://kaiko.getalp.org/dbnary#',
'dbnstats': 'http://kaiko.getalp.org/dbnary/statistics/',
'lime': 'http://www.w3.org/ns/lemon/lime#'}
HTML_COLORS = ["red", "blue", "cyan", "pink", "lime", "purple", "orange", "fuchsia", 'Teal', 'Navy', 'Maroon', 'Olive',
'Gray', 'Lime', 'Silver', 'Green', 'Black']
```
### Classe qui retourne un DataFrame des résultats d'une requête SPARQL et autes fonctions utilitaires
```
def transformation_date(date: int) -> datetime.datetime:
"""
Function that transform a date of typr str (YYYYMMDD) to a datetime object
"""
if int(date[6:]) == 0: # if the date do'nt existv
return datetime.datetime(year=int(date[:4]), month=int(date[4:6]), day=int(date[6:]) + 1)
return datetime.datetime(year=int(date[:4]), month=int(date[4:6]), day=int(date[6:]))
```
### On commence par chercher tout les différents types de datasets et on va proposer à l'utilisateur de choisir quel dataset télécharger
### Traitement des certains Datasets particulier, le code ci-dessous n'est pas généralisable
#### 1. dbnaryNymRelationsCube
```
dataset: str = "dbnstats:dbnaryNymRelationsCube"
dimensions: list[str] = ['dbnary:wiktionaryDumpVersion', 'dbnary:nymRelation', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:count']
dtypes: dict[str] = {'count': int}
data1: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index()
relations1: np.ndarray = data1['nymRelation'].unique() # All type of relation in this cube
labels1: list[str] = [item.split('#')[-1] for item in relations1]
data1 = data1.pivot_table(columns='nymRelation', index=['wiktionaryDumpVersion', 'observationLanguage'],
aggfunc=max).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
data1["wiktionaryDumpVersion"] = data1["wiktionaryDumpVersion"].map(transformation_date)
out1 = widgets.Output()
choice1 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event1(obj):
with out1:
clear_output()
if choice1.value == "pays":
user_choice = widgets.Dropdown(options=list(data1["observationLanguage"].unique()), description="Choix:")
choosed_data = data1[data1["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data["count"][relations1].T,
stroke_width=1, display_legend=True, labels=labels1, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Différentes relations lexicales dans l'extraction {user_choice.value}")
def edit_graph(obj):
choosed_data = data1[data1["observationLanguage"] == user_choice.value]
line.y = choosed_data["count"][relations1].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"Différentes relations lexicales dans l'extraction {user_choice.value}"
if choice1.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data1["wiktionaryDumpVersion"].unique()],
description="Choix:", value=max(data1["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data1[data1["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data["count"][relations1].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=labels1,
color_mode='element',
display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data1[data1["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data["count"][relations1].T
fig.title = f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(pd.DataFrame(
data1[data1["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]["count"]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice1.observe(event1, 'value')
display(choice1, out1)
event1(None)
```
#### 2. dbnaryStatisticsCube
```
dataset: str = "dbnstats:dbnaryStatisticsCube"
dimensions: list[str] = ['dbnary:observationLanguage', 'dbnary:wiktionaryDumpVersion']
mesures: list[str] = ['dbnary:lexicalEntryCount', 'dbnary:lexicalSenseCount', 'dbnary:pageCount', 'dbnary:translationsCount']
dtypes: dict[str] = {"lexicalEntryCount": int, "translationsCount": int, "lexicalSenseCount": int, "pageCount": int}
data2: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
categories2: list[str] = ["lexicalEntryCount", "translationsCount", "lexicalSenseCount", "pageCount"]
data2["wiktionaryDumpVersion"] = data2["wiktionaryDumpVersion"].map(transformation_date)
out2 = widgets.Output()
choice2 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event2(obj):
with out2:
clear_output()
if choice2.value == "pays":
user_choice = widgets.Dropdown(options=list(data2["observationLanguage"].unique()), description="Choix:")
choosed_data = data2[data2["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data[categories2].T, stroke_width=1,
display_legend=True, labels=categories2, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y],
title=f"Nombre d'éléments dans l'extraction {user_choice.value}", animation_duration=1000)
def edit_graph(obj):
choosed_data = data2[data2["observationLanguage"] == user_choice.value]
line.y = choosed_data[categories2].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"Nombre d'éléments dans l'extraction {user_choice.value}"
if choice2.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data2["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data2["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data2[data2["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data[categories2].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=categories2,
color_mode='element', display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data2[data2["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data[categories2].T
fig.title = f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(
pd.DataFrame(data2[data2["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice2.observe(event2, 'value')
display(choice2, out2)
event2(None)
```
#### 3. dbnaryTranslationsCube
```
dataset: str = "dbnstats:dbnaryTranslationsCube"
dimensions: list[str] = ['lime:language', 'dbnary:wiktionaryDumpVersion', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:count']
dtypes: dict[str] = {'count': int}
data3: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
relations3: np.ndarray = data3['language'].unique()
relations3 = relations3[relations3 != "number_of_languages"]
labels3: list[str] = [item.split('#')[-1] for item in relations3]
data3["wiktionaryDumpVersion"] = data3["wiktionaryDumpVersion"].map(transformation_date)
data3 = data3.pivot_table(columns='language', index=['wiktionaryDumpVersion', 'observationLanguage'],
aggfunc=max).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
out3 = widgets.Output()
choice3 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event3(obj):
with out3:
clear_output()
if choice3.value == "pays":
user_choice = widgets.Dropdown(options=list(data3["observationLanguage"].unique()), description="Choix:")
choosed_data = data3[data3["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
y_sc2 = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data["count"][relations3].T,
stroke_width=1, display_legend=True, labels=labels3, scales={'x': x_ord, 'y': y_sc})
line1 = bq.Lines(x=choosed_data["wiktionaryDumpVersion"],
y=choosed_data["count"]["number_of_languages"].values, scales={'x': x_ord, 'y': y_sc2},
stroke_width=1, display_legend=True, labels=["Number of languages"], colors=['green'],
line_style="dashed")
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
ax_y2 = bq.Axis(scale=y_sc2, orientation='vertical', grid_lines='solid', label='Nombre de langues',
label_offset='+50', side="right", label_color="green")
fig = bq.Figure(marks=[line, line1], axes=[ax_x, ax_y, ax_y2], animation_duration=1000,
title=f"Nombre de traductions dans l'extraction {user_choice.value}")
def edit_graph(obj):
choosed_data = data3[data3["observationLanguage"] == user_choice.value]
line.y = choosed_data["count"][relations3].T
line.x = choosed_data["wiktionaryDumpVersion"]
line1.x = choosed_data["wiktionaryDumpVersion"]
line1.y = choosed_data["count"]["number_of_languages"].values
fig.title = f"Nombre de traductions dans l'extraction {user_choice.value}"
if choice3.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data3["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data3["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
y_sc2 = bq.LinearScale()
choosed_data = data3[data3["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data["count"][relations3].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=labels3,
color_mode='element',
display_legend=True, colors=HTML_COLORS)
line = bq.Lines(x=x, y=choosed_data["count"]["number_of_languages"].values, scales={'x': x_ord, 'y': y_sc2},
stroke_width=1, display_legend=True, labels=["Number of languages"], colors=["green"])
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
ax_y2 = bq.Axis(scale=y_sc2, orientation='vertical', grid_lines='solid', label='Nombre de langues',
label_offset='+50', side="right", label_color="green")
fig = bq.Figure(marks=[bar, line], axes=[ax_x, ax_y, ax_y2], animation_duration=1000,
legend_location="top-left",
title=f"Nombre de traductions dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data3[data3["wiktionaryDumpVersion"] == user_choice.value].sort_values(
by="observationLanguage")
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data["count"][relations3].T
line.x = bar.x
line.y = choosed_data["count"]["number_of_languages"].values
fig.title = f"Nombre de traductions lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(pd.DataFrame(
data3[data3["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]["count"]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice3.observe(event3, 'value')
display(choice3, out3)
event3(None)
```
#### 4. enhancementConfidenceDataCube
```
dataset: str = "dbnstats:enhancementConfidenceDataCube"
dimensions: list[str] = ['dbnary:wiktionaryDumpVersion', 'dbnary:enhancementMethod', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:precisionMeasure', 'dbnary:recallMeasure', 'dbnary:f1Measure']
dtypes: dict[str] = {"precisionMeasure": float, "recallMeasure": float, "f1Measure": float}
data4t: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(
by=['wiktionaryDumpVersion', 'observationLanguage'])
categories4: list[str] = ["precisionMeasure", "recallMeasure", "f1Measure"]
data4t["wiktionaryDumpVersion"] = data4t["wiktionaryDumpVersion"].map(transformation_date)
out4 = widgets.Output()
choice4 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
choice4bis = widgets.ToggleButtons(options=[('Aléatoire', 'random'), ('Dbnary tversky', 'dbnary_tversky')],
description='Méthode d\'amélioration:',
disabled=False)
def event4(obj):
with out4:
clear_output()
data4 = data4t[data4t["enhancementMethod"] == choice4bis.value]
if choice4.value == "pays":
user_choice = widgets.Dropdown(options=list(data4["observationLanguage"].unique()), description="Choix:")
choosed_data = data4[data4["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data[categories4].T, stroke_width=1,
display_legend=True, labels=categories4, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Précision de la prédiction du contexte de traduction dans l'extraction du {user_choice.value}")
def edit_graph(obj):
choosed_data = data4[data4["observationLanguage"] == user_choice.value]
line.y = choosed_data[categories4].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"Précision de la prédiction du contexte de traduction dans l'extraction du {user_choice.value}"
if choice4.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data4["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data4["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data4[data4["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data[categories4].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=categories4,
color_mode='element', display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Précision de la prédiction du contexte de traduction dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data4[data4["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data[categories4].T
fig.title = f"Précision de la prédiction du contexte de traduction dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(
pd.DataFrame(data4[data4["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice4.observe(event4, 'value')
choice4bis.observe(event4, 'value')
display(choice4, choice4bis, out4)
event4(None)
```
#### 5. translationGlossesCube
```
dataset: str = "dbnstats:translationGlossesCube"
dimensions: list[str] = ['dbnary:wiktionaryDumpVersion', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:translationsWithNoGloss', 'dbnary:translationsWithSenseNumber', 'dbnary:translationsWithSenseNumberAndTextualGloss', 'dbnary:translationsWithTextualGloss']
dtypes: dict[str] = {"translationsWithSenseNumber": float, "translationsWithSenseNumberAndTextualGloss": float, "translationsWithTextualGloss": float, "translationsWithNoGloss": float}
data5: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(
by=['wiktionaryDumpVersion', 'observationLanguage'])
categories5: list[str] = ["translationsWithSenseNumber", "translationsWithSenseNumberAndTextualGloss",
"translationsWithTextualGloss", "translationsWithNoGloss"]
data5["wiktionaryDumpVersion"] = data5["wiktionaryDumpVersion"].map(transformation_date)
out5 = widgets.Output()
choice5 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event5(obj):
with out5:
clear_output()
if choice5.value == "pays":
user_choice = widgets.Dropdown(options=list(data5["observationLanguage"].unique()), description="Choix:")
choosed_data = data5[data5["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data[categories5].T, stroke_width=1,
display_legend=True, labels=categories5, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y], title=f"{user_choice.value}", animation_duration=1000)
def edit_graph(obj):
choosed_data = data5[data5["observationLanguage"] == user_choice.value]
line.y = choosed_data[categories5].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"{user_choice.value}"
if choice5.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data5["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data5["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data5[data5["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data[categories5].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=categories5,
color_mode='element', display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y],
title=f"{np.datetime_as_string(user_choice.value, unit='D')}", animation_duration=1000)
def edit_graph(obj):
choosed_data = data5[data5["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data[categories5].T
fig.title = f"{np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(
pd.DataFrame(data5[data5["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice5.observe(event5, 'value')
display(choice5, out5)
event5(None)
```
| true |
code
| 0.503845 | null | null | null | null |
|
<a href="https://colab.research.google.com/github/daanishrasheed/DS-Unit-2-Applied-Modeling/blob/master/DS_Sprint_Challenge_7.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
_Lambda School Data Science, Unit 2_
# Applied Modeling Sprint Challenge: Predict Chicago food inspections 🍔
For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019.
[See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset.
According to [Chicago Department of Public Health — Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), "Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls."
#### Your challenge: Predict whether inspections failed
The target is the `Fail` column.
- When the food establishment failed the inspection, the target is `1`.
- When the establishment passed, the target is `0`.
#### Run this cell to install packages in Colab:
```
%%capture
import sys
if 'google.colab' in sys.modules:
# Install packages in Colab
!pip install category_encoders==2.*
!pip install eli5
!pip install pandas-profiling==2.*
!pip install pdpbox
!pip install shap
```
#### Run this cell to load the data:
```
import pandas as pd
train_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'
test_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'
train = pd.read_csv(train_url)
test = pd.read_csv(test_url)
assert train.shape == (51916, 17)
assert test.shape == (17306, 17)
```
### Part 1: Preprocessing
You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.
_To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._
### Part 2: Modeling
**Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.
Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**
_To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._
### Part 3: Visualization
Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types:
- Confusion Matrix
- Permutation Importances
- Partial Dependence Plot, 1 feature isolation
- Partial Dependence Plot, 2 features interaction
- Shapley Values
_To earn a score of 3 for this part, make four of these visualization types._
## Part 1: Preprocessing
> You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.
```
train.head(35)
n = train["Violations"].str.split(" - ", n = 1, expand = True)
train.drop(columns =["Violations"], inplace = True)
train['Violations'] = n[0]
train['Violations'].value_counts()
s = train['Violations'].str.split("|", n = 1, expand = True)
train['Violations'] = s[0]
train.head(1)
n = test["Violations"].str.split(" - ", n = 1, expand = True)
test.drop(columns =["Violations"], inplace = True)
test['Violations'] = n[0]
test['Facility Type'].value_counts()
s = test['Violations'].str.split("|", n = 1, expand = True)
test['Violations'] = s[0]
train.head(1)
target = 'Fail'
features = ['Facility Type', 'Risk', 'Inspection Type', 'Violations']
```
## Part 2: Modeling
> **Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.
>
> Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**
```
from sklearn.model_selection import train_test_split
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['Fail'], random_state=42)
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
y_test = test[target]
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
transformers = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer()
)
X_train_transformed = transformers.fit_transform(X_train)
X_val_transformed = transformers.transform(X_val)
X_val_transformed = pd.DataFrame(X_val_transformed, columns=X_val.columns)
rf = RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
rf.fit(X_train_transformed, y_train)
print('Validation Accuracy', rf.score(X_val_transformed, y_val))
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from xgboost import XGBClassifier
processor = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median')
)
X_train_processed = processor.fit_transform(X_train)
X_val_processed = processor.transform(X_val)
eval_set = [(X_train_processed, y_train),
(X_val_processed, y_val)]
model = XGBClassifier(n_estimators=1000, n_jobs=-1)
model.fit(X_train_processed, y_train, eval_set=eval_set, eval_metric='auc',
early_stopping_rounds=10)
from sklearn.metrics import roc_auc_score
X_test_processed = processor.transform(X_test)
class_index = 1
y_pred_proba = model.predict_proba(X_test_processed)[:, class_index]
print(f'Test ROC AUC')
print(roc_auc_score(y_test, y_pred_proba)) # Ranges from 0-1, higher is better
```
## Part 3: Visualization
> Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types:
>
> - Permutation Importances
> - Partial Dependence Plot, 1 feature isolation
> - Partial Dependence Plot, 2 features interaction
> - Shapley Values
```
%matplotlib inline
from pdpbox.pdp import pdp_isolate, pdp_plot
feature='Risk'
encoder = transformers.named_steps['ordinalencoder']
for item in encoder.mapping:
if item['col'] == feature:
feature_mapping = item['mapping']
feature_mapping = feature_mapping[feature_mapping.index.dropna()]
category_names = feature_mapping.index.tolist()
category_codes = feature_mapping.values.tolist()
isolated = pdp_isolate(
model=rf,
dataset=X_val_transformed,
model_features=X_val.columns,
feature=feature,
cust_grid_points=category_codes
)
fig, axes = pdp_plot(isolated, feature_name=feature,
plot_lines=True, frac_to_plot=0.01)
from pdpbox.pdp import pdp_interact, pdp_interact_plot
features = ['Risk', 'Inspection Type']
years_grid = [0, 5, 10, 15, 20, 25, 30]
interaction = pdp_interact(
model=rf,
dataset=X_val_transformed,
model_features=X_val.columns,
features=features,
cust_grid_points=[category_codes, years_grid]
)
pdp_interact_plot(interaction, plot_type='grid', feature_names=features);
from sklearn.metrics import accuracy_score
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
```
| true |
code
| 0.509581 | null | null | null | null |
|
# How to Win in the Data Science Field
## A. Business Understanding
This project aims to answer the question: "How does one win in the Data Science field?"
To gain insight on this main inquiry, I focused on addressing the following:
- Are there major differences in salary among the different data science roles?
- What are the essential technical skills to do well in data science?
- Does educational background play a huge part?
- How much does continuous learning on online platforms help?
## B. Data Understanding
For this project I have chosen to use the 2019 Kaggle ML & DS Survey raw data. I think this is a good dataset choice for the following reasons:
- The Kaggle Community is the biggest data science and machine learning community, therefore would have a good representation of data scientist professionals.
- It features a lot of relevant variables, from salary, demographics, to characteristics and habits of data science professionals in the community.
### Data Access and Exploration
The first step is to import all the needed libraries.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import textwrap
%matplotlib inline
```
We then import the dataset to be used for the analysis.
```
# Import data
df = pd.read_csv('./multiple_choice_responses.csv')
df.head()
df.shape
```
There was a total of 19.7K data science professionals in the survey, and 246 fields corresponding to their responses to the survey. There are missing values, but we'll deal with them later depending on the analysis that will be implemented.
## C. Preparing the Data
### Cleaning the data
We do some necessary filtering to the data with the following rationale:
- Filtering among professionals / employed only because we are concerned about salary outcomes
- Focusing among US residents only to lessen the variation in pay due to region
- Focusing among professionals with salary >=30K USD only to most likely capture full-time employees
```
# Filter data among professionals only
df = df[~df.Q5.isin(['Student', 'Not employed']) & df.Q5.notnull()]
# Filter data among residents of the US only
df = df[df.Q3.isin(['United States of America'])]
# Filter data among annual salary of >=30K
df = df[~df.Q10.isin(['$0-999','1,000-1,999','2,000-2,999','3,000-3,999','4,000-4,999','5,000-7,499','7,500-9,999','10,000-19,999','20,000-29,999']) & df.Q10.notnull()]
# Recode some of the salary bins
df.loc[df['Q10'].isin(['300,000-500,000','> $500,000']), 'Q10'] = '>= $300,000'
# Shape of the dataframe
df.shape
```
From these filtering, we get the final sample size of 2,013 US Data Science Professionals, earning an annual wage of >=30K USD.
### Missing Values
As this analysis is highly descriptive and I will not employ any statistical modelling, I will address the missing values by simply dropping them from the computed percentages.
### Function Creation
I created a few helper functions for charts to be used all throughout the analysis.
Most of my charts are going to be bar plots and heatmaps. I created the functions depending on the data type (single respons and multiple response variables.
```
def barplots_single_answer(q_number, x_title, y_title, chart_title, order=None):
'''
INPUT:
q_number - question number for the variable of interest. It should be a single-answer question.
x_title - desired title of the x-axis
y_title - desired title of the y-axis
chart_title - desired main title
order_rows - desired sorting of the rows (will default to descending according to frequency of answers)
OUTPUT:
A barplot that shows the frequency in % for the variable of interest
This function prepares the data for the visualization and draws the bar plot.
'''
cat_values = round((df[pd.notnull(df[q_number])][q_number].value_counts()/len(df[pd.notnull(df[q_number])][q_number])) * 100,1)
cat_values = cat_values.reset_index().rename(columns = {'index':q_number, q_number:'pct'})
f, ax = plt.subplots(figsize=(8,8))
sns.barplot(x = 'pct', y = q_number, data=cat_values, color='dodgerblue', order=order)
ax.set_xlabel(x_title)
ax.set_ylabel(y_title)
plt.title(chart_title, fontsize = 14, fontweight ='bold')
print(cat_values)
def barplots_heatmap_single_answer(q_number, bar_chart_title, heatmap_title, order_rows = False):
'''
INPUT:
q_number - question number for the variable of interest. It should be a single-answer question.
bar_chart_title - desired title of the frequency bar chart
heatmap_title - desired title of the heatmap chart
order_rows - desired sorting of the rows (will default to descending according to frequency of answers)
OUTPUT:
Two charts: A barplot that shows the frequency in % for the variable of interest, and a heatmap
that visually correlates the variable of interest with salary range.
Table reference for the percentages
This function prepares the data for the visualization and provides the two visualizations specified.
'''
# Value count for the variable of interest
cat_values = df[pd.notnull(df[q_number])][q_number].value_counts()
# Set a threshold of 20 records for category to be included in plotting, otherwise it will distort the normalized heatmap
cat_values = cat_values[cat_values>=20]
cat_values = round((cat_values/len(df[pd.notnull(df[q_number])][q_number])) * 100,1)
if(order_rows == False):
cat_values = cat_values
else:
cat_values = cat_values.reindex(index = order_rows)
cat_values = cat_values.reset_index().rename(columns = {'index':q_number, q_number:'pct'})
# Sort order for the salary bins
order_col = ['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000']
y_labels = cat_values[q_number]
# Crosstabs for the salary and variable of interest
crosstab = pd.crosstab(df[q_number],df['Q10'], normalize='index')
crosstab = crosstab.reindex(order_col, axis="columns")
if(order_rows == False):
crosstab = crosstab.reindex(y_labels, axis="rows")
else:
crosstab = crosstab.reindex(order_rows, axis="rows")
# Set-up subplots
fig = plt.figure(figsize=(14,6))
grid = plt.GridSpec(1, 10, wspace=10, hspace=1)
plt.subplot(grid[0, :3])
# Left plot (barplot)
ax1 = sns.barplot(x = 'pct', y = q_number, data=cat_values, color='dodgerblue', order=None)
plt.title(bar_chart_title, fontsize = 14, fontweight ='bold')
ax1.set_xlabel('Percentage %')
ax1.set_ylabel('')
# Text-wrapping of y-labels
f = lambda x: textwrap.fill(x.get_text(), 27)
ax1.set_yticklabels(map(f, ax1.get_yticklabels()))
# Right plot (heatmap)
plt.subplot(grid[0, 4:])
ax2 = sns.heatmap(crosstab, cmap="Blues", cbar=False)
plt.title(heatmap_title, fontsize = 14, fontweight ='bold')
ax2.set_xlabel('Yearly Salary')
ax2.set_ylabel('')
ax2.set_yticklabels(map(f, ax2.get_yticklabels()))
print(cat_values)
def barplots_heatmap_multi_answer(multi_question_list, bar_chart_title, heatmap_title, order_rows = False):
'''
INPUT:
multi_question_list - a list of fields containing the response for a multiple answer-type question
bar_chart_title - desired title of the frequency bar chart
heatmap_title - desired title of the heatmap chart
order_rows - desired sorting of the rows (will default to descending according to frequency of answers)
OUTPUT:
Two charts: A barplot that shows the frequency in % for the variable of interest, and a heatmap
that visually correlates the variable of interest with salary range.
Table reference for the percentages
This function prepares the data for the visualization and provides the two visualizations specified.
'''
multi_question = multi_question_list
df_store = []
for question in (multi_question):
df_temp = df[question].value_counts()
df_store.append(df_temp)
df_multi = pd.concat(df_store)
df_multi = pd.DataFrame(df_multi).reset_index().rename(columns = {'index':multi_question[0], 0:'pct'})
df_multi = df_multi[df_multi['pct']>=20]
df_multi['pct'] = round(df_multi['pct']/sum(df_multi['pct']) * 100,1)
if(order_rows == False):
df_multi = df_multi.sort_values('pct', ascending=False)
else:
df_multi = df_multi.reindex(index = order_rows)
# Sort order for the salary bins
order_col = ['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000']
y_labels = df_multi[multi_question[0]]
# Crosstabs for the salary and variable of interest
df_store_xtab = []
for question in (multi_question):
df_temp_xtab = pd.crosstab(df[question],df['Q10'], normalize='index')
df_store_xtab.append(df_temp_xtab)
df_multi_xtab = pd.concat(df_store_xtab)
df_multi_xtab = df_multi_xtab.reindex(order_col, axis="columns")
if(order_rows == False):
df_multi_xtab = df_multi_xtab.reindex(y_labels, axis="rows")
else:
df_multi_xtab = df_multi_xtab.reindex(order_rows, axis="rows")
# Set-up subplots
#fig = plt.figure(figsize=(14,6))
fig = plt.figure(figsize=(14,8))
grid = plt.GridSpec(1, 10, wspace=10, hspace=1)
plt.subplot(grid[0, :3])
# Left plot (barplot)
ax1 = sns.barplot(x = 'pct', y = multi_question[0], data=df_multi, color='dodgerblue', order=None)
plt.title(bar_chart_title, fontsize = 14, fontweight ='bold')
ax1.set_xlabel('Percentage %')
ax1.set_ylabel('')
# Text-wrapping of y-labels
f = lambda x: textwrap.fill(x.get_text(), 27)
ax1.set_yticklabels(map(f, ax1.get_yticklabels()))
# Right plot (heatmap)
plt.subplot(grid[0, 4:])
ax2 = sns.heatmap(df_multi_xtab, cmap="Blues", cbar=False)
plt.title(heatmap_title, fontsize = 14, fontweight ='bold')
ax2.set_xlabel('Yearly Salary')
ax2.set_ylabel('')
ax2.set_yticklabels(map(f, ax2.get_yticklabels()))
print(df_multi)
```
## D. Analysis
### Question 1: Are there major differences in salary among the different data science roles?
We first look at the salary distribution of the sample. Most of the data science professionals have salaries that fall within the $100K-200K range.
#### Chart 1: Salary Distribution (Q10) - Bar Chart
```
barplots_single_answer('Q10', 'Percentage %', 'Salary Range', 'Annual Salary Distribution',
['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000'])
```
#### Chart 2: Data Practitioners Distribution (Q5) - Bar Chart
```
barplots_heatmap_single_answer('Q5', 'Current Data Role (%)', 'Annual Salary by Current Data Role')
```
Interpretation:
- Data Scientists are heavy on the 100K-200K USD range which reflects our entire Kaggler sample. This makes sense because Data Scientist is the top profession at 34%.
- There is an obvious discrepancy between a data scientist and a data analyst salary, with the former showing a heavier concentration on the 100K-200K USD range, and the latter somewhere within 60K-125K. It seems that data scientists are paid much more than analysts.
- Other professions such as Statisticians and Database Engineers tend to have more variation in pay, while Data Engineers are more concentrated in the 120K-125K range.
### Question 2: What are the essential technical skills to do well in data science?
While the questionnaire is very detailed in terms of the technical skills asked among the Kagglers, I decided to focus on a few main items (so as not to bore the readers):
- "What programming languages do you use on a regular basis?"
- From the above question, I derive how many programming languages they regularly use
- Primary data analysis tools used
#### Chart 3: Programming Languages Used
```
barplots_heatmap_multi_answer(['Q18_Part_1', 'Q18_Part_2', 'Q18_Part_3', 'Q18_Part_4', 'Q18_Part_5', 'Q18_Part_6', 'Q18_Part_7', 'Q18_Part_8',
'Q18_Part_9', 'Q18_Part_10', 'Q18_Part_11', 'Q18_Part_12'],
'Programming Languages Used (%)',
'Annual Salary by Programming Languages Used',
order_rows = False)
```
Interpretation:
- Python is the most popular language; SQL and R are also popular
- Software engineering-oriented languages such as Java, C++, and C have more dense representation in the 150K-200K range.
- Other noteworthy languages that relate to higher pay are Matlab, Typescript, and Bash.
I also looked at the percentage of the sample who do not code at all:
```
# How many do not code at all?
df['Q18_Part_11'].value_counts()/len(df)*100
```
Only a small subset of the population does not code, which is not surprising given that these are Kagglers.
I also ran an analysis to check how many programming languages do these data science professionals use:
```
lang_list = ['Q18_Part_1', 'Q18_Part_2', 'Q18_Part_3', 'Q18_Part_4', 'Q18_Part_5', 'Q18_Part_6', 'Q18_Part_7', 'Q18_Part_8',
'Q18_Part_9', 'Q18_Part_10','Q18_Part_12']
order_col = ['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000']
df['Count_Languages'] = df[lang_list].apply(lambda x: x.count(), axis=1)
# Group by salary range, get the average count of programming language used
table_lang_salary = df[['Count_Languages','Q10']].groupby(['Q10']).mean()
table_lang_salary = table_lang_salary.reindex(order_col, axis="rows").reset_index()
# Average number of programming languages used
table_lang_salary['Count_Languages'].mean()
```
On the average, they use 2-3 languages.
But how does this correlate with salary? To answer this question, I created this bar chart:
#### Chart 4: Number of Programming Languages Used
```
f, ax = plt.subplots(figsize=(5,8))
ax = sns.barplot(x='Count_Languages', y="Q10",
data=table_lang_salary, color='dodgerblue')
plt.title('Salary Range vs. \n How Many Programming Languages Used', fontsize = 14, fontweight ='bold')
ax.set_xlabel('Avg Languages Used')
ax.set_ylabel('Annual Salary Range')
```
Interpretation:
Plotting the number of languages used according to salary range, we see that the number of languages used tend to increase as pay increases — up to the 125K-150K point. So yes, it may be worth learning more than 1.
Apart from coding, I also looked at other tools that data science professionals use based on this question:
"What is the primary tool that you use at work or school to analyze data?"
#### Chart 5: Primary Data Analysis Tools
```
barplots_heatmap_single_answer('Q14', 'Primary Data Analysis Tool (%)', 'Annual Salary by Data Analysis Tool')
```
Interpretation:
- Local development environments are the most popular tools with half of the sample using it.
- Cloud-based software users have a large salary leverage though - those who use it appear to have a higher earning potential, most likely at 150K-200K, and even a high concentration of professionals earning more than 300K USD.
- There is a large variation in pay among basic and advanced statistical software users.
### Question 3: Does educational background play a huge part?
#### Chart 6. Highest level of educational attainment (Q4) - Bar chart and salary heatmap side by side
```
barplots_heatmap_single_answer('Q4', 'Highest Educational Attainment (%)', 'Annual Salary by Educational Attainment',
order_rows=['Doctoral degree', 'Master’s degree', 'Professional degree', 'Bachelor’s degree',
'Some college/university study without earning a bachelor’s degree'])
```
Interpretation:
- Data science professionals tend to be a highly educated group, with 72% having either a Master’s Degree or a PhD.
- The salary heatmaps do not really show anything remarkable, except that Professional Degrees have a high concentration in the 150K-250K USD bracket. This group only constitutes 1.3% of the sample, hence I would say this is inconclusive.
### Question 4: How much does continuous learning on online platforms help?
To answer this question, I referred to these items in the survey:
- "On which platforms have you begun or completed data science courses?"
- "Who/what are your favorite media sources that report on data science topics?"
First, I looked at the online platforms and computed for the percentage of those who learned through this medium (excluding formal university education):
```
# Compute for Percentage of Kagglers who learned through online platforms
platform_list = ['Q13_Part_1', 'Q13_Part_2', 'Q13_Part_3', 'Q13_Part_4', 'Q13_Part_5', 'Q13_Part_6',
'Q13_Part_7', 'Q13_Part_8', 'Q13_Part_9', 'Q13_Part_12']
df['Count_Platform'] = df[platform_list].apply(lambda x: x.count(), axis=1)
len(df[df['Count_Platform'] > 0]) / len(df['Count_Platform'])
```
Interpretation: A stunning majority or 82% learn data science from these platforms.
On the specific online platforms:
#### Chart 7. Platforms where learn data science (Q13) - Bar chart and salary heatmap side by side
```
barplots_heatmap_multi_answer(['Q13_Part_1', 'Q13_Part_2', 'Q13_Part_3', 'Q13_Part_4', 'Q13_Part_5', 'Q13_Part_6',
'Q13_Part_7', 'Q13_Part_8', 'Q13_Part_9', 'Q13_Part_11', 'Q13_Part_12'],
'Platforms Used to Learn Data Science(%)',
'Annual Salary by Platforms Used')
```
Interpretation:
- Coursera is by far the most popular, followed by Datacamp, Udemy, and Kaggle Courses.
- Interestingly, Fast.ai skewed heavily on the higher income levels 125K-150K.
- DataQuest on the other hand are much more spread over the lower and middle income levels, which suggests that beginners tend to use this site more.
Apart from online courses, I also looked at other online media sources based on this question:
"Who/what are your favorite media sources that report on data science topics?"
#### Chart 8. Favorite Media Sources (Q12) - Bar chart and salary heatmap side by side
```
barplots_heatmap_multi_answer(['Q12_Part_1', 'Q12_Part_2', 'Q12_Part_3', 'Q12_Part_4', 'Q12_Part_5', 'Q12_Part_6',
'Q12_Part_7', 'Q12_Part_8', 'Q12_Part_9', 'Q12_Part_10', 'Q12_Part_11', 'Q12_Part_12'],
'Favorite Data Science Media Sources (%)',
'Annual Salary by Media Sources',
order_rows = False)
```
Interpretation:
- Blogs are most popular with 21% choosing this as their favorite data science topic source.
- I did not see much pattern from the salary heatmap — most are just bunched within the 100K-200K USD range.
- Curiously, Hacker News appears to have more followers on the higher end with 150K-200K salaries.
## Conclusion
### To win in the data science field (AND if you define winning as having a high pay):
- Code! Learning more languages will probably help. Apart from Python and R consider adding other non-data science languages such as C++, Java, and Typescript into your toolkit.
- Cloud-based technologies are worth learning. Get ready to explore those AWS, GCP, and Azure platforms for big data.
- Continuously upskill and update through MOOCs and online courses, and through media such as blogs and technology news.
| true |
code
| 0.500793 | null | null | null | null |
|
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Note: `Facet Grids and Trellis Plots` are available in version <b>2.0.12+</b><br>
Run `pip install plotly --upgrade` to update your Plotly version
```
import plotly
plotly.__version__
```
#### Facet by Column
A `facet grid` is a generalization of a scatterplot matrix where we can "facet" a row and/or column by another variable. Given some tabular data, stored in a `pandas.DataFrame`, we can plot one variable against another to form a regular scatter plot, _and_ we can pick a third faceting variable to form panels along the rows and/or columns to segment the data even further, forming a bunch of panels. We can also assign a coloring rule or a heatmap based on a color variable to color the plot.
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
fig = ff.create_facet_grid(
mpg,
x='displ',
y='cty',
facet_col='cyl',
)
py.iplot(fig, filename='facet by col')
```
#### Facet by Row
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
fig = ff.create_facet_grid(
mpg,
x='displ',
y='cty',
facet_row='cyl',
marker={'color': 'rgb(86, 7, 100)'},
)
py.iplot(fig, filename='facet by row')
```
#### Facet by Row and Column
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
fig = ff.create_facet_grid(
mpg,
x='displ',
y='cty',
facet_row='cyl',
facet_col='drv',
marker={'color': 'rgb(234, 239, 155)'},
)
py.iplot(fig, filename='facet by row and col')
```
#### Color by Categorical Variable
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
fig = ff.create_facet_grid(
mtcars,
x='mpg',
y='wt',
facet_col='cyl',
color_name='cyl',
color_is_cat=True,
)
py.iplot(fig, filename='facet - color by categorical variable')
```
#### Custom Colormap
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
x='total_bill',
y='tip',
color_name='sex',
show_boxes=False,
marker={'size': 10, 'opacity': 1.0},
colormap={'Male': 'rgb(165, 242, 242)', 'Female': 'rgb(253, 174, 216)'}
)
py.iplot(fig, filename='facet - custom colormap')
```
#### Label Variable Name:Value
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
fig = ff.create_facet_grid(
mtcars,
x='mpg',
y='wt',
facet_col='cyl',
facet_col_labels='name',
facet_row_labels='name',
)
py.iplot(fig, filename='facet - label variable name')
```
#### Custom Labels
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
fig = ff.create_facet_grid(
mtcars,
x='wt',
y='mpg',
facet_col='cyl',
facet_col_labels={4: '$2^2 = 4$', 6: '$\\frac{18}{3} = 6$', 8: '$2\cdot4 = 8$'},
marker={'color': 'rgb(240, 100, 2)'},
)
py.iplot(fig, filename='facet - custom labels')
```
#### Plot in 'ggplot2' style
To learn more about ggplot2, check out http://ggplot2.tidyverse.org/reference/facet_grid.html
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
x='total_bill',
y='tip',
facet_row='sex',
facet_col='smoker',
marker={'symbol': 'circle-open', 'size': 10},
ggplot2=True
)
py.iplot(fig, filename='facet - ggplot2 style')
```
#### Plot with 'scattergl' traces
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
grid = ff.create_facet_grid(
mpg,
x='class',
y='displ',
trace_type='scattergl',
)
py.iplot(grid, filename='facet - scattergl')
```
#### Plot with Histogram Traces
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
x='total_bill',
y='tip',
facet_row='sex',
facet_col='smoker',
trace_type='histogram',
)
py.iplot(fig, filename='facet - histogram traces')
```
#### Other Trace Types
Facet Grids support `scatter`, `scattergl`, `histogram`, `bar` and `box` trace types. More trace types coming in the future.
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
y='tip',
facet_row='sex',
facet_col='smoker',
trace_type='box',
)
py.iplot(fig, filename='facet - box traces')
```
#### Reference
```
help(ff.create_facet_grid)
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'facet-and-trellis-plots.ipynb', 'python/facet-plots/', 'Facet and Trellis Plots',
'How to make Facet and Trellis Plots in Python with Plotly.',
title = 'Python Facet and Trellis Plots | plotly',
redirect_from ='python/trellis-plots/',
has_thumbnail='true', thumbnail='thumbnail/facet-trellis-thumbnail.jpg',
language='python',
display_as='statistical', order=10.2)
```
| true |
code
| 0.70576 | null | null | null | null |
|
# Введение
Данные интерактивные тетради основаны на языке Python.
Для выполнения кода выберите ячейку с кодом и нажмите `Ctrl + Enter`.
```
from platform import python_version
print("Используемая версия Python:", python_version())
```
Ячейки подразумевают последовательное исполнение.
```
l = [1, 2, 3]
l[0]
type(l)
help(l)
```
## Математический аппарат
В этих интерактивных тетрадях используется математический аппарат, основанный на парах вектор-кватернион.
Вектор (`Vector`) представлен тремя чиселами, кватернион (`Quaternion`) - четыремя.
Пара вектор-кватернион (`Transformation`) соостоит из вектора и кватерниона и описывает последовательные перемещение и поворот.
$$ T =
\begin{bmatrix}
[v_x, v_y, v_z] \\
[q_w, q_x, q_y, q_z]
\end{bmatrix}
$$
Математический аппарат расположен в файле [kinematics.py](../edit/kinematics.py)
### Vector
Вектор - тройка чисел, описывает перемещение:
$$ v = [v_x, v_y, v_z] $$
```
from kinematics import Vector
```
Создание вектора требует трех чисел:
```
v1 = Vector(1, 2, 3)
v2 = Vector(-2, 4, -3)
```
Вектора можно складывать поэлементно:
```
v1 + v2
```
А также умножать на скаляр:
```
2.5 * v1
```
Нулевой вектор создается через `Vector.zero()`:
```
Vector.zero()
```
### Quaternion
Кватернион - четверка чисел, описывает поворот:
$$ q = [q_w, q_x, q_y, q_z] $$
```
from kinematics import Quaternion
from numpy import pi
```
Кватернион создается из угла и оси поворота:
```
q1 = Quaternion.from_angle_axis(0.5 * pi, Vector(0, 0, 1))
q2 = Quaternion.from_angle_axis(0.5 * pi, Vector(1, 0, 0))
print(q1)
print(q2)
```
Перемножение кватернионов соответствует последовательному приложению поворотов, в данном случае - повороту вокруг оси, проходящей через точку `(1, 1, 1)` на угол 120 градусов:
```
q1 * q2
Quaternion.from_angle_axis(2 / 3 * pi, Vector(1, 1, 1).normalized())
```
Поворот вектора сокращен до оператора `*`:
```
q = Quaternion.from_angle_axis(pi / 2, Vector(0, 0, 1))
q * Vector(1, 2, 3)
```
Кватернион нулевого поворота создается `Quaternion.identity()`:
```
Quaternion.identity() * Vector(1, 2, 3)
```
### Transform
```
from kinematics import Transform
```
Пара вектор-кватернион собирается из вектора и кватерниона:
```
t1 = Transform(v1, q1)
t2 = Transform(v2, q2)
```
Пара состоит из смещения и поворота:
```
t1.translation
t1.rotation
```
Пара с нулевыми смещением и поворотом создается через `Transform.identity()`:
```
Transform.identity()
```
Суммирование двух пар описывет последовательное применение смещения - поворота - смещения - поворота:
```
t1 + t2
```
Суммирование пары и ветора описывает применение преобразования, записанного в паре к вектору:
```
t1 + Vector(1, 0, 0)
```
## Графика
Подключим магию для работы с графикой:
```
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
from IPython.display import HTML
import graphics
%matplotlib notebook
```
Отрисовка систем координат производится через `graphics.axis`.
Преобразование цепочки в отдельные массивы точек `X, Y, Z` производится через `graphics.chain_to_points`.
```
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_xlim([-3, 3]); ax.set_ylim([-3, 3]); ax.set_zlim([-3, 3]);
graphics.axis(ax, Transform.identity(), 3)
graphics.axis(ax, t1)
graphics.axis(ax, t1 + t2)
x, y, z = graphics.chain_to_points([Transform.identity(), t1, t1 + t2])
ax.plot(x, y, z)
fig.show()
```
## Анимация
Анимация будет сохраняться в переменную, например в `ani`, которую потом можно будет отобразить в виде видеоролика через `HTML(ani.to_jshtml())`.
Перед сохранением в виде ролика можно заранее повернуть сцену мышкой.
Обратите внимание что перерисовка каждого кадра требует работы ядра.
Для остановки нажмите кнопку выключения в правом верхнем углу трехмерной сцены.
```
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_xlim([-1, 1]); ax.set_ylim([-1, 1]); ax.set_zlim([0, 2 * pi])
l, = ax.plot([], [], [])
t = np.arange(1, 2 * pi, 0.1)
frames = 100
def animate(i):
offs = i / frames * 2 * pi
z = t
q = Quaternion.from_angle_axis(t + offs, Vector(0, 0, 1))
v = q * Vector(1, 0, 0)
x = v.x
y = v.y
l.set_data_3d(x, y, z)
ani = animation.FuncAnimation(
fig,
animate,
frames=frames,
interval=100
)
```
Не забудьте выключить пересчет модели кнопкой в правом верхнем углу трехмерной сцены.
```
HTML(ani.to_jshtml())
```
Полученый таким образом ролик можно сохранить в составе всей тетради и выкачать локальную копию через `File -> Download as -> Notebook (.ipynb)`.
## Символьные вычисления
Для работы с символьными вычислениями используется пакет `sympy`.
```
import sympy as sp
x = sp.symbols("x")
x
```
`sympy` позволяет описывать деревья вычислений:
```
v = sp.sin(x) ** 2 + sp.cos(x) ** 2
v
```
И упрощать их:
```
sp.simplify(v)
u = sp.cos(x) ** 2 - sp.sin(x) ** 2
u
sp.simplify(u)
```
Можно легко дифференцировать выражения:
```
t = sp.symbols("t")
f = sp.sin(t + 2 * x ** 2)
f
```
Производная по $t$:
```
sp.diff(f, t)
```
Производная по $x$:
```
sp.diff(f, x)
```
Для того, чтобы описать кватернион в системе `sympy`, нужно передать `sympy`(`sp`) как последний агрумент в `Quaternion.from_angle_axis`:
```
a, b, c = sp.symbols("a, b, c")
angle = sp.symbols("alpha")
q = Quaternion.from_angle_axis(angle, Vector(0, 0, 1), sp)
v = Vector(a, b, c)
rotated = q * v
sp.simplify(rotated.x)
sp.simplify(rotated.y)
sp.simplify(rotated.z)
```
А еще можно решать уравнения:
```
alpha, beta = sp.symbols("alpha, beta")
t0 = Transform(
Vector.zero(),
Quaternion.from_angle_axis(alpha, Vector(0, 0, 1), sp)
)
t1 = t0 + Transform(
Vector(beta, 0, 0),
Quaternion.identity()
)
target_x = t1.translation.x
target_x
target_y = t1.translation.y
target_y
x, y = sp.symbols("x, y")
solution = sp.solve(
[
sp.simplify(target_x) - x,
sp.simplify(target_y) - y
],
[
alpha,
beta
]
)
```
Первое решение для $\alpha$:
```
solution[0][0]
```
Первое решение для $\beta$:
```
solution[0][1]
```
Действительно, если подставить решение, в, например, $y$, получим следущее:
```
sp.simplify(
t1.translation.y.replace(alpha, solution[0][0]).replace(beta, solution[0][1])
)
```
Для $x$ такой красоты (пока) не произойдет, придется упрощать вручную:
```
sp.simplify(
t1.translation.x.replace(alpha, solution[0][0]).replace(beta, solution[0][1])
)
```
Возможно стоит использовать свое собственное решение, например:
$$ \alpha = \tan^{-1}(y, x) $$
$$ \beta = \sqrt{x^2 + y^2} $$
```
own_alpha = sp.atan2(y, x)
own_beta = sp.sqrt(x ** 2 + y ** 2)
sp.simplify(t1.translation.x.replace(alpha, own_alpha).replace(beta, own_beta))
sp.simplify(t1.translation.y.replace(alpha, own_alpha).replace(beta, own_beta))
```
| true |
code
| 0.586108 | null | null | null | null |
|
# Dragon Real Estate -Price Prediction
```
#load the house dataset
import pandas as pd
housing=pd.read_csv("data.csv")
#sample of first 5 data
housing.head()
#housing information
housing.info()
#or find missing value
housing.isnull().sum()
print(housing["CHAS"].value_counts())
housing.describe()
%matplotlib inline
# data visualization
import matplotlib.pyplot as plt
housing.hist(bins=50,figsize=(20,15))
plt.show()
##train test spliting
import numpy as np
def split_train_test(data,test_ratio):
np.random.seed(42)
shuffled=np.random.permutation(len(data))
print(shuffled)
test_set_size=int(len(data) * test_ratio)
test_indices=shuffled[:test_set_size]
train_indices=shuffled[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set,test_set=split_train_test(housing,0.2)
print(f"Rows in train set:{len(train_set)}\nRoows in test set:{len(test_set)}\n")
#train the data
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(housing,test_size=0.2,random_state=42)
print(f"Rows in train set:{len(train_set)}\nRoows in test set:{len(test_set)}\n")
from sklearn.model_selection import StratifiedShuffleSplit
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index,test_index in split.split(housing,housing["CHAS"]):
strat_train_set=housing.loc[train_index]
strat_test_set=housing.loc[test_index]
strat_test_set
strat_test_set.describe()
strat_test_set.info()
strat_test_set["CHAS"].value_counts()
strat_train_set["CHAS"].value_counts()
95/7
376/28
housing=strat_train_set.copy()
```
# looking for corelation
```
corr_matrix=housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes=["MEDV","RM","ZN","LSTAT"]
scatter_matrix(housing[attributes],figsize=(12,8))
housing.plot(kind="scatter",x="RM",y="MEDV",alpha=0.8)
```
# TRYING OUT ATTRIBUTE COMBINATIONS
```
housing["TAXRM"]=housing["TAX"]/housing["RM"]
housing.head()
corr_matrix=housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
housing.plot(kind="scatter",x="TAXRM",y="MEDV",alpha=0.8)
housing=strat_train_set.drop("MEDV",axis=1)
housing_labels=strat_train_set["MEDV"].copy()
#if some missing attributes is present so what we do???
#1.get rid of the missing data points
#2.get rid of the whole attribute
#3. set th evalue to some value(0,mean or median)
#1....
#a=housing.dropna(subset=["RM"])
#2....
#housing.drop("RM",axis=1)
#3.....
#median=housing["RM"].median()
#housing["RM"].fillna(median)
from sklearn.impute import SimpleImputer
imputer=SimpleImputer(strategy="median")
imputer.fit(housing)
imputer.statistics_
X=imputer.transform(housing)
housing_tr=pd.DataFrame(X,columns=housing.columns)
housing_tr.describe()
```
# feature scalling
```
#min max scalling
#Standarzitaion
```
# creating pipeline
```
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
my_pipeline=Pipeline([
("imputer",SimpleImputer(strategy="median")),
#.....add as many as you want in your pipeline
("std_scaler",StandardScaler()),
])
housing_num_tr=my_pipeline.fit_transform(housing_tr)
housing_num_tr
housing_num_tr.shape
```
# selecting a desired model for dragon real estate
```
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
#model=LinearRegression()
#model=DecissionTreeRegressor()
model=RandomForestRegressor()
model.fit(housing_num_tr,housing_labels)
some_data=housing.iloc[:5]
some_labels=housing_labels.iloc[:5]
prepared_data=my_pipeline.transform(some_data)
model.predict(prepared_data)
list(some_labels)
```
# evaluating the model
```
from sklearn.metrics import mean_squared_error
housing_predictions=model.predict(housing_num_tr)
mse=mean_squared_error(housing_labels,housing_predictions)
rmse=np.sqrt(mse)
rmse
```
# using better evaluation technique- cross validation
```
from sklearn.model_selection import cross_val_score
scores=cross_val_score(model,housing_num_tr,housing_labels,scoring="neg_mean_squared_error",cv=10)
rmse_scores=np.sqrt(-scores)
rmse_scores
def print_scores(scores):
print("Scores:",scores)
print("Mean:",scores.mean())
print("Standard deviation:",scores.std())
print_scores(rmse_scores)
```
quiz:convert this notebook into a python file and run the pipeline using visual studio code
# saving the model
```
from joblib import dump, load
dump(model, 'Dragon.joblib')
```
##testing the model on test data
```
X_test=strat_test_set.drop("MEDV",axis=1)
Y_test=strat_test_set["MEDV"].copy()
X_test_prepared=my_pipeline.transform(X_test)
final_predictions=model.predict(X_test_prepared)
final_mse=mean_squared_error(Y_test,final_predictions)
final_rmse=np.sqrt(final_mse)
print(final_predictions,list(Y_test))
final_rmse
prepared_data[0]
```
##using the model
```
from joblib import dump, load
import numpy as np
model=load('Dragon.joblib')
features=np.array([[-0.43942006, 3.12628155, -1.12165014, -0.27288841, -1.42262747,
-0.24141041, -1.31238772, 2.61111401, -1.0016859 , -0.5778192 ,
-0.97491834, 0.41164221, -0.86091034]])
model.predict(features)
```
| true |
code
| 0.545165 | null | null | null | null |
|
```
%matplotlib inline
```
# Probability Calibration for 3-class classification
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
```
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
```
| true |
code
| 0.677287 | null | null | null | null |
|
# VIPERS SHAM Project
This notebook is part of the VIPERS-SHAM project:
http://arxiv.org/abs/xxxxxxx
Copyright 2019 by Ben Granett, [email protected]
All rights reserved.
This file is released under the "MIT License Agreement". Please see the LICENSE
file that should have been included as part of this package.
```
%matplotlib inline
import os
from matplotlib import pyplot as plt
plt.style.use("small.style")
from matplotlib.ticker import FormatStrFormatter,ScalarFormatter, MultipleLocator
from matplotlib import colors,cm
import logging
logging.basicConfig(level=logging.INFO)
from scipy import interpolate, integrate
import numpy as np
import growthcalc
import load
import emulator
samples = ['sdss','L1','L2','L3','L4']
redshifts = {'sdss':.06, 'L1':0.6, 'L2':0.7, 'L3':0.8, 'L4':0.9}
rmin = 1
n_components = 2
thresh = 0.1
def chi2_svd(d, cmat, thresh=0.1):
""" """
u,s,v = np.linalg.svd(cmat)
cut = np.abs(s).max()*thresh
o = np.abs(s)>cut
s = s[o]
v = v[o]
d_ = np.dot(v, d)
chi2 = np.sum(d_**2/s)
return chi2
def limits(x, y, t=1):
best = y.argmin()
x0 = x[best]
ybest = y[best]
thresh = ybest + t
yup = y[best:]
b = best + yup.searchsorted(thresh)
ydown = y[:best][::-1]
a = best - ydown.searchsorted(thresh)
if a < 0:
a = None
if b >= len(x):
b = None
return best, a, b
r_sdss,wp_sdss,cov_sdss = load.load_sdss()
sel = r_sdss > rmin
r_sdss = r_sdss[sel]
wp_sdss = wp_sdss[sel]
cov_sdss = cov_sdss[sel,:][:,sel]
data = [(r_sdss, wp_sdss, cov_sdss)]
for sample in samples[1:]:
r,wp = np.loadtxt('../data/vipers/wp_sM{sample}.txt'.format(sample=sample[1]), unpack=True)
cmat = np.loadtxt('../data/vipers/cov_{sample}.txt'.format(sample=sample))
sel = r > rmin
r = r[sel]
wp = wp[sel]
cmat = cmat[sel,:][:,sel]
data.append((r,wp,cmat))
shamdata = {}
for sample in ['sdss','L1','L2','L3','L4']:
sham = load.load_sham(sample=sample, template="../data/sham400/nz_{sample}/wp_snap{snapshot:7.5f}.txt")
snapshots = sham.keys()
snapshots.sort()
for key in snapshots:
r, wp = sham[key]
sel = r > rmin
r = r[sel]
wp = wp[sel]
if not sample in shamdata:
shamdata[sample] = []
shamdata[sample].append((key, r, wp))
a_samples = []
interpolators = []
for key in samples:
y = []
x = []
for a,r,w in shamdata[key]:
sel = r > rmin
r = r[sel]
y.append(w[sel])
x.append(a)
y = np.array(y)
x = np.array(x)
f = emulator.WpInterpolator(x, r, y, n_components)
interpolators.append(f)
a_samples.append(1./(1+redshifts[key]))
a_samples = np.array(a_samples)
G = growthcalc.Growth(amax=10)
plt.figure(figsize=(9,3))
markers = ('.','*','*','*','*')
left = plt.subplot(121)
right = plt.subplot(122)
left.set_xlabel("Snapshot redshift")
left.set_ylabel("$\chi^2$")
left.grid(True)
left.set_yscale('log')
left.yaxis.set_major_formatter(FormatStrFormatter('%g'))
left.xaxis.set_major_locator(MultipleLocator(0.2))
left.xaxis.set_minor_locator(MultipleLocator(0.1))
right.yaxis.set_minor_locator(MultipleLocator(0.1))
right.xaxis.set_minor_locator(MultipleLocator(0.1))
right.set_ylabel("Snapshot redshift")
right.set_xlabel("Sample redshift")
right.grid(True)
right.set_xlim(0,1.1)
right.set_ylim(0,1.1)
right2 = right.twinx()
right2.set_ylabel("$\sigma_8(z)$")
lab_sig8 = np.arange(0.3,1.01,0.05)
lab_z = G.fid_inv(lab_sig8)
zz = np.linspace(-0.3,1.5,100)
for gamma in [0.4, 0.55, 0.7, 0.85]:
z_w = G.fid_inv(G(zz, gamma=gamma))
l, = right.plot(zz, z_w, c='grey', lw=1, zorder=5)
right.text(1.1, 1.15, "$\gamma=%3.2f$"%0.4, color='k', ha='right',va='center', rotation=25,zorder=5,fontsize=12)
right.text(1.1, 1.1, "$%3.2f$"%0.55, color='k', ha='right',va='center', rotation=24,zorder=5,fontsize=12)
right.text(1.1, 0.99, "$%3.2f$"%0.7, color='k', ha='right',va='center', rotation=22,zorder=5,fontsize=12)
right.text(1.1, 0.81,"$%3.2f$"%0.85, color='k', ha='right',va='center', rotation=20,zorder=5,fontsize=12)
print zip(lab_z,lab_sig8)
right2.set_yticks(lab_z)
right2.set_yticklabels("%3.2f"%x for x in lab_sig8)
right2.set_ylim(0, 1.2)
right2.set_xlim(-0.3, 1.5)
right.set_xlim(0,1.1)
right.set_ylim(-0.3,1.5)
right.set_xticks([0.2,0.4,0.6,0.8,1.])
for i,sample in enumerate(samples):
f = interpolators[i]
chi2 = []
r,wp,cmat = data[i]
for z in zz:
wpsham = f(1./(1+z))
d = wp - wpsham
c = chi2_svd(d, cmat, thresh=thresh)
chi2.append(c)
chi2 = np.array(chi2)
like = np.exp(-0.5*(chi2-chi2.min()))
print "min chi2",sample,chi2.min()
lines = left.plot(zz,chi2)
chi2_ = []
zcent = []
for asham,rsham,wpsham in shamdata[sample]:
d = wp - wpsham
c = chi2_svd(d, cmat, thresh=thresh)
chi2_.append(c)
zcent.append(1./asham - 1)
chi2_ = np.array(chi2_)
print "min chi2",sample,chi2_.min()
left.scatter(zcent,chi2_, marker=markers[i], color=lines[0].get_color(),zorder=10)
j = chi2.argmin()
if sample=='sdss':
left.text(-0.05,1.5,"SDSS",color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
right.text(.08, -0.08, "SDSS", color=lines[0].get_color(),va='center',ha='left',fontsize=12)
elif sample=='L1':
left.text(zz[-1],chi2[-1]*1.1,'M1',color=lines[0].get_color(),va='bottom',ha='right',fontsize=12)
right.text(0.6,0.25,"M1", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
elif sample=='L2':
left.text(zz[j]+0.08,chi2[j],'M2',color=lines[0].get_color(),va='bottom',ha='left',fontsize=12)
right.text(0.7,0.35,"M2", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
elif sample=='L3':
left.text(zz[j], chi2[j]*0.9,'M3',color=lines[0].get_color(),va='top',ha='center',fontsize=12)
right.text(0.8,0.35,"M3", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
elif sample=='L4':
left.text(zz[50],chi2[50]*1.1,'M4',color=lines[0].get_color(),va='bottom',ha='left',fontsize=12)
right.text(0.9,0.6,"M4", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
a,b,c = limits(zz, chi2)
zobs = redshifts[sample]
if b is None: # upper limit
logging.warning("upper limit! %s %s %s",a,b,c)
pass
elif c is None: # lower limit
logging.warning("lower limit! %s %s %s",a,b,c)
plt.arrow(zobs, zz[b], 0, 1.2-zz[b], lw=2.5, head_width=.015, head_length=0.03, color=lines[0].get_color(), zorder=10)
else:
right.plot([zobs, zobs], [zz[b], zz[c]], lw=3,color=lines[0].get_color(), zorder=10)
right.scatter(zobs, zz[a], marker=markers[i], color=lines[0].get_color(),zorder=10)
right.set_yticks([-0.2,0,0.2,0.4,0.6,0.8,1.0,1.2,1.4])
left.set_ylim(0.04, 50)
right.set_ylim(-0.3,1.5)
right2.set_ylim(-0.3,1.5)
plt.subplots_adjust(left=0.07,right=.92, bottom=0.18)
plt.savefig("../figs/fig8.pdf")
```
| true |
code
| 0.356111 | null | null | null | null |
|
## UTAH FORGE PROJECT'S MISSION
Enable cutting-edge research and drilling and technology testing, as well as to allow scientists to identify a replicable, commercial pathway to EGS. In addition to the site itself, the FORGE effort will include a robust instrumentation, data collection, and data dissemination component to capture and share data and activities occurring at FORGE in real time. The innovative research, coupled with an equally-innovative collaboration and management platform, is truly a first of its kind endeavor. More details here https://utahforge.com/
#### The data used in this repository comes from the public data provided by Utah FORGE https://gdr.openei.org/submissions/1111
##### Some functions adapted from https://sainosmichelle.github.io/elements.html
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import lasio
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
```
#### Read main logs in ft
```
main = lasio.read('./localUTAHFORGEDATA/58-32_main.las')
dfmain = main.df()
print(dfmain.index)
dfmain.head(5)
```
#### Read sonic logs in ft
```
sonic = lasio.read('./localUTAHFORGEDATA/58-32_sonic.las')
dfsonicall = sonic.df()
dfsonicall['VpVs']= ((1/dfsonicall['DTCO'])/(1/dfsonicall['DTSM']))
dfsonic = dfsonicall[['DT1R','DT2','DT2R','DT4P','DT4S','DTCO','DTRP','DTRS','DTSM','ITT','PR','SPHI','VpVs']]
print(dfsonic.index)
```
#### Merge main and sonic logs (not repeated curves) using pandas
```
all_logs = pd.concat([dfmain, dfsonic], axis=1, sort=False)
#all_logs.info()
fig, ax = plt.subplots(figsize=(25,8))
sns.heatmap(all_logs.isnull(), ax=ax, cmap="magma")
plt.grid()
plt.show()
```
#### Calculations based on publication "Well-log based prediction of thermal conductivity of sedimentary successions: a case study from the North German Basin. Fuchs, S., and Foster, A. Geophysical Journal International. 2014. 196, pg 291-311. doi: 10.1093/gji/ggt382
```
#calculate Vsh from GR formula Vsh=(subdata.GR_EDTC-grmin)/(grmax-grmin)
all_logs['Vsh'] = all_logs['GR'] - min(all_logs['GR'])/(max(all_logs['GR'])- min(all_logs['GR']))
#calculate NPHI matrix from NPHI porosity and DEN porosity neu_m=subdata.NPOR-subdata.DPHZ
all_logs['NPOR_m'] = (all_logs['NPOR']) - (all_logs['DPHZ'])
#calculate eq10
#Matrix-TC equation derived from regression analysis for clastic rock types
all_logs['eq10'] = (5.281-(2.961*all_logs['NPOR_m'])-(2.797*all_logs['Vsh']))/-272.15
#calculate eq11
#Bulk-TC equation derived from regression analysis for subsurface data
all_logs['eq11'] = (4.75-(4.19*all_logs['NPOR'])-(1.81*all_logs['Vsh']))/-272.15
#all_logs.info()
#read discrete data - conversion to ft - depth equal to lower depth interval
tops = pd.read_csv('s3://geotermaldata/S3UTAHFORGEDATA/58-32_tops.csv')
#Thermal Conductivity
TC_coredata = pd.read_csv ('s3://geotermaldata/S3UTAHFORGEDATA/58-32_thermal_conductivity_data.csv')
TC_coredata['Depth'] = (3.28084*TC_coredata['Lower Depth Interval (m)'])
TC_coredata['Matrix_TC']=TC_coredata['matrix thermal conductivity (W/m deg C)']
TC_coredata.set_index('Depth', inplace=True)
#XRD lab data
XRD_coredata = pd.read_csv ('s3://geotermaldata/S3UTAHFORGEDATA/58-32_xray_diffraction_data.csv')
XRD_coredata = XRD_coredata.replace('tr',0)
XRD_coredata['Depth'] = (3.28084*XRD_coredata['Lower Depth Range (m)'])
XRD_coredata.set_index('Depth', inplace=True)
#TC_coredata.tail(15)
XRD_coredata.head()
#basic plot to inspect data
def make_layout_tc (log_df, XRD, TC):
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=6, sharey=True, squeeze=True, figsize=(15, 15), gridspec_kw={'wspace': 0.25})
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.975, top=0.7, wspace=0.2, hspace=0.2)
axs[0].set_ylabel('Depth (ft)')
axs[0].invert_yaxis()
axs[0].get_xaxis().set_visible(False)
# First track GR/SP/CALI logs to display
ax1 = axs[0].twiny()
ax1.plot(log_df.GR, log_df.index, '-', color='#2ea869', linewidth=0.5)
ax1.set_xlim(0,450)
ax1.set_xlabel('GR (API)', color='#2ea869')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[0].twiny()
ax2.plot(log_df.SP, log_df.index, '-', color='#0a0a0a', linewidth=0.7)
ax2.set_xlim(-200,200)
ax2.set_xlabel('SP(mV)', color='#0a0a0a')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[0].twiny()
ax3.plot(log_df.DCAL, log_df.index, '--', color='#9da4a1', linewidth=0.5)
ax3.set_xlim(-5,15)
ax3.set_xlabel('DCAL (in)', color='#9da4a1')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[0].get_xaxis().set_visible(False)
# Second track RHOB/NPHI/PEF logs to display
ax1 = axs[1].twiny()
ax1.plot(log_df.RHOZ, log_df.index, '-', color='#ea0606', linewidth=0.5)
ax1.set_xlim(1.5,3.0)
ax1.set_xlabel('RHOB (g/cm3)', color='#ea0606')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[1].twiny()
ax2.plot(log_df.NPHI, log_df.index, '-', color='#1577e0', linewidth=0.5)
ax2.set_xlim(1,0)
ax2.set_xlabel('NPHI (v/v)', color='#1577e0')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[1].twiny()
ax3.plot(log_df.PEFZ, log_df.index, '-', color='#1acb20', linewidth=0.5)
ax3.set_xlim(0,15)
ax3.set_xlabel('PEFZ (b/e)', color='#1acb20')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[1].get_xaxis().set_visible(False)
# Third track Resistivities
ax1 = axs[2].twiny()
ax1.plot(log_df.AT10, log_df.index, '-', color='#d6dbd7', linewidth=0.5)
ax1.set_xlim(0.2,20000)
ax1.set_xlabel('AT10 (ohm.m)', color='#d6dbd7')
ax1.set_xscale('log')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[2].twiny()
ax2.plot(log_df.AT30, log_df.index, '-', color='#0a0a0a', linewidth=0.5)
ax2.set_xlim(0.2,20000)
ax2.set_xlabel('AT30 (ohm.m)', color='#0a0a0a')
ax2.set_xscale('log')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[2].twiny()
ax3.plot(log_df.AT90, log_df.index, '-', color='#ea0606', linewidth=0.5)
ax3.set_xlim(0.2,20000)
ax3.set_xlabel('AT90 (ohm.m)', color='#ea0606')
ax3.set_xscale('log')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[2].get_xaxis().set_visible(False)
# Forth track Sonic
ax1 = axs[3].twiny()
ax1.plot(log_df.DTSM, log_df.index, '-', color='#9da4a1', linewidth=0.5)
ax1.set_xlim(200,40)
ax1.set_xlabel('DTS (us/ft)', color='#9da4a1')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[3].twiny()
ax2.plot(log_df.DTCO, log_df.index, '-', color='#0a0a0a', linewidth=0.5)
ax2.set_xlim(200,40)
ax2.set_xlabel('DTC (us/ft)', color='#0a0a0a')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[3].twiny()
ax3.plot(log_df.VpVs, log_df.index, '-', color='#e1093f', linewidth=0.5)
ax3.set_xlim(1,3)
ax3.set_xlabel('VpVs (unitless)', color='#e1093f')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[3].get_xaxis().set_visible(False)
# Fifth track XRD to display
ax1 = axs[4].twiny()
ax1.plot(XRD.Quartz, XRD.index, 'o', color='#eac406')
ax1.set_xlim(0,100)
ax1.set_xlabel('Quartz %', color='#eac406')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[4].twiny()
ax2.plot(XRD['K-feldspar'], XRD.index, 'o', color='#05a9f0')
ax2.set_xlim(0,100)
ax2.set_xlabel('K-feldspar %', color='#05a9f0')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[4].twiny()
ax3.plot(XRD['Illite'], XRD.index, 'o', color='#94898c')
ax3.set_xlim(0,100)
ax3.set_xlabel('Illite %', color='#94898c')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[4].get_xaxis().set_visible(False)
# Sixth track Temp/TC to display
ax1 = axs[5].twiny()
ax1.plot(TC.Matrix_TC, TC.index, 'o', color='#6e787c')
ax1.set_xlim(0,5)
ax1.set_xlabel('Matrix TC Measured W/mC', color='#6e787c')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.075))
ax2 = axs[5].twiny()
ax2.plot(log_df.CTEM, log_df.index, '-', color='#ed8712')
ax2.set_xlim(0,300)
ax2.set_xlabel('Temp degF', color='#ed8712')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.0))
ax2.grid(True)
axs[5].get_xaxis().set_visible(False)
fig.suptitle('Well Data for UTAH FORGE 58-32',weight='bold', fontsize=20, y=0.9);
plt.show()
make_layout_tc (all_logs, XRD_coredata, TC_coredata)
all_logs.to_csv('./localUTAHFORGEDATA/all_logs.csv')
```
| true |
code
| 0.349013 | null | null | null | null |
|
##### Copyright 2018 The TF-Agents Authors.
### Get Started
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/tf_agents/colabs/1_dqn_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/agents/blob/master/tf_agents/colabs/1_dqn_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
```
# Note: If you haven't installed the following dependencies, run:
!apt-get install xvfb
!pip install 'gym==0.10.11'
!pip install imageio
!pip install PILLOW
!pip install pyglet
!pip install pyvirtualdisplay
!pip install tf-agents-nightly
!pip install tf-nightly
```
## Introduction
This example shows how to train a [DQN (Deep Q Networks)](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) agent on the Cartpole environment using the TF-Agents library.

We will walk you through all the components in a Reinforcement Learning (RL) pipeline for training, evaluation and data collection.
## Setup
```
import base64
import imageio
import IPython
import matplotlib
import matplotlib.pyplot as plt
import PIL.Image
import pyvirtualdisplay
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.agents.dqn import q_network
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.environments import trajectory
from tf_agents.metrics import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
tf.compat.v1.enable_v2_behavior()
# Set up a virtual display for rendering OpenAI gym environments.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
```
## Hyperparameters
```
env_name = 'CartPole-v0' # @param
num_iterations = 20000 # @param
initial_collect_steps = 1000 # @param
collect_steps_per_iteration = 1 # @param
replay_buffer_capacity = 100000 # @param
fc_layer_params = (100,)
batch_size = 64 # @param
learning_rate = 1e-3 # @param
log_interval = 200 # @param
num_eval_episodes = 10 # @param
eval_interval = 1000 # @param
```
## Environment
Environments in RL represent the task or problem that we are trying to solve. Standard environments can be easily created in TF-Agents using `suites`. We have different `suites` for loading environments from sources such as the OpenAI Gym, Atari, DM Control, etc., given a string environment name.
Now let us load the CartPole environment from the OpenAI Gym suite.
```
env = suite_gym.load(env_name)
```
We can render this environment to see how it looks. A free-swinging pole is attached to a cart. The goal is to move the cart right or left in order to keep the pole pointing up.
```
#@test {"skip": true}
env.reset()
PIL.Image.fromarray(env.render())
```
The `time_step = environment.step(action)` statement takes `action` in the environment. The `TimeStep` tuple returned contains the environment's next observation and reward for that action. The `time_step_spec()` and `action_spec()` methods in the environment return the specifications (types, shapes, bounds) of the `time_step` and `action` respectively.
```
print 'Observation Spec:'
print env.time_step_spec().observation
print 'Action Spec:'
print env.action_spec()
```
So, we see that observation is an array of 4 floats: the position and velocity of the cart, and the angular position and velocity of the pole. Since only two actions are possible (move left or move right), the `action_spec` is a scalar where 0 means "move left" and 1 means "move right."
```
time_step = env.reset()
print 'Time step:'
print time_step
action = 1
next_time_step = env.step(action)
print 'Next time step:'
print next_time_step
```
Usually we create two environments: one for training and one for evaluation. Most environments are written in pure python, but they can be easily converted to TensorFlow using the `TFPyEnvironment` wrapper. The original environment's API uses numpy arrays, the `TFPyEnvironment` converts these to/from `Tensors` for you to more easily interact with TensorFlow policies and agents.
```
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
```
## Agent
The algorithm that we use to solve an RL problem is represented as an `Agent`. In addition to the DQN agent, TF-Agents provides standard implementations of a variety of `Agents` such as [REINFORCE](http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf), [DDPG](https://arxiv.org/pdf/1509.02971.pdf), [TD3](https://arxiv.org/pdf/1802.09477.pdf), [PPO](https://arxiv.org/abs/1707.06347) and [SAC](https://arxiv.org/abs/1801.01290).
The DQN agent can be used in any environment which has a discrete action space. To create a DQN Agent, we first need a `Q-Network` that can learn to predict `Q-Values` (expected return) for all actions given an observation from the environment.
We can easily create a `Q-Network` using the specs of the observations and actions. We can specify the layers in the network which, in this example, is the `fc_layer_params` argument set to a tuple of `ints` representing the sizes of each hidden layer (see the Hyperparameters section above).
```
q_net = q_network.QNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params)
```
We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.
```
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter = tf.compat.v2.Variable(0)
tf_agent = dqn_agent.DqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
td_errors_loss_fn=dqn_agent.element_wise_squared_loss,
train_step_counter=train_step_counter)
tf_agent.initialize()
```
## Policies
In TF-Agents, policies represent the standard notion of policies in RL: given a `time_step` produce an action or a distribution over actions. The main method is `policy_step = policy.step(time_step)` where `policy_step` is a named tuple `PolicyStep(action, state, info)`. The `policy_step.action` is the `action` to be applied to the environment, `state` represents the state for stateful (RNN) policies and `info` may contain auxiliary information such as log probabilities of the actions.
Agents contain two policies: the main policy that is used for evaluation/deployment (agent.policy) and another policy that is used for data collection (agent.collect_policy).
```
eval_policy = tf_agent.policy
collect_policy = tf_agent.collect_policy
```
We can also independently create policies that are not part of an agent. For example, a random policy:
```
random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(),
train_env.action_spec())
```
## Metrics and Evaluation
The most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows.
```
#@test {"skip": true}
def compute_avg_return(environment, policy, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
compute_avg_return(eval_env, random_policy, num_eval_episodes)
# Please also see the metrics module for standard implementations of different
# metrics.
```
## Replay Buffer
In order to keep track of the data collected from the environment, we will use the TFUniformReplayBuffer. This replay buffer is constructed using specs describing the tensors that are to be stored, which can be obtained from the agent using `tf_agent.collect_data_spec`.
```
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity)
```
For most agents, the `collect_data_spec` is a `Trajectory` named tuple containing the observation, action, reward etc.
## Data Collection
Now let us execute the random policy in the environment for a few steps and record the data (observations, actions, rewards etc) in the replay buffer.
```
#@test {"skip": true}
def collect_step(environment, policy):
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
# Add trajectory to the replay buffer
replay_buffer.add_batch(traj)
for _ in range(initial_collect_steps):
collect_step(train_env, random_policy)
# This loop is so common in RL, that we provide standard implementations of
# these. For more details see the drivers module.
```
In order to sample data from the replay buffer, we will create a `tf.data` pipeline which we can feed to the agent for training later. We can specify the `sample_batch_size` to configure the number of items sampled from the replay buffer. We can also optimize the data pipline using parallel calls and prefetching.
In order to save space, we only store the current observation in each row of the replay buffer. But since the DQN Agent needs both the current and next observation to compute the loss, we always sample two adjacent rows for each item in the batch by setting `num_steps=2`.
```
# Dataset generates trajectories with shape [Bx2x...]
dataset = replay_buffer.as_dataset(
num_parallel_calls=3, sample_batch_size=batch_size, num_steps=2).prefetch(3)
iterator = iter(dataset)
```
## Training the agent
The training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.
The following will take ~5 minutes to run.
```
#@test {"skip": true}
%%time
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
tf_agent.train = common.function(tf_agent.train)
# Reset the train step
tf_agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
returns = [avg_return]
for _ in range(num_iterations):
# Collect a few steps using collect_policy and save to the replay buffer.
for _ in range(collect_steps_per_iteration):
collect_step(train_env, tf_agent.collect_policy)
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = tf_agent.train(experience)
step = tf_agent.train_step_counter.numpy()
if step % log_interval == 0:
print('step = {0}: loss = {1}'.format(step, train_loss.loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
print('step = {0}: Average Return = {1}'.format(step, avg_return))
returns.append(avg_return)
```
## Visualization
### Plots
We can plot return vs global steps to see the performance of our agent. In `Cartpole-v0`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 200, the maximum possible return is also 200.
```
#@test {"skip": true}
steps = range(0, num_iterations + 1, eval_interval)
plt.plot(steps, returns)
plt.ylabel('Average Return')
plt.xlabel('Step')
plt.ylim(top=250)
```
### Videos
It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab.
```
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
```
The following code visualizes the agent's policy for a few episodes:
```
num_episodes = 3
video_filename = 'imageio.mp4'
with imageio.get_writer(video_filename, fps=60) as video:
for _ in range(num_episodes):
time_step = eval_env.reset()
video.append_data(eval_py_env.render())
while not time_step.is_last():
action_step = tf_agent.policy.action(time_step)
time_step = eval_env.step(action_step.action)
video.append_data(eval_py_env.render())
embed_mp4(video_filename)
```
| true |
code
| 0.787656 | null | null | null | null |
|
# Solving 10 Queens using pygenetic
In this example we are going to walk through the usage of GAEngine to solve the N-Queens problem
The objective would be to place queens on single board such that all are in safe position
<b>Each configuration of board represents a potential candidate solution for the problem</b>
## 1. Chromosome Representation
<img src="nQueens-Chromosome.png" style="width:700px;">
For the given chess board, the chromosome is encoded as the row number in which each the queen is present in each column of the chess board. It can also be encoded as the column number in which each the queen is present in each row of the chess board (as done in this code)
This can be easily achieved by using the `RangeFactory` of `pygenetic`. <br/>
The `RangeFactory` takes the following parameters
* minValue = minimum value a gene can take = 0 <br/>
* maxValue = minimum value a gene can take = 9 <br/>
* duplicates = if duplicates are allowed = False <br/>
* noOfGenes = number of genes in the chromosome = 10
```
from pygenetic import ChromosomeFactory
factory = ChromosomeFactory.ChromosomeRangeFactory(minValue=0,maxValue=9,noOfGenes=10,duplicates=False)
```
You can also check if the factory works as expected by calling `createChromosome` function and observing the chromosome produced by the factory
```
# Code to test if factory works as expected
for i in range(5):
print('Chromosome created: ', factory.createChromosome())
```
## 2. Fitness function
Fitness for a given chromosome is the number of non-intersecting queens for that given chess board configuration. Hence the highest fitness for a N X N chess board is N. Hence, we have a maximization GA problem with the aim of achieving fitness value N.
We can easily define such fitness functions in python taking a chromosome as a parameter
```
def fitness(board):
fitness = 0
for i in range(len(board)):
isSafe = True
for j in range(len(board)):
if i!=j:
# Shouldn't be present on same row/diagonal
if board[i] == board[j] or abs(board[i] - board[j])==abs(i-j):
isSafe = False
break
if(isSafe==True):
fitness += 1
return fitness
```
We need then create a `GAEngine` instance from the `pygenetic` package and set the following
* `factory` = the range factory instance we had intially created
* `population_size = 500` would be a good number for this problem
* `cross_prob = 0.8`
* `mut_prob = 0.2`
* `fitness_type = ('equal', 10)` since our objective in this GA is to achieve the fitness value of 10
```
from pygenetic import GAEngine
ga = GAEngine.GAEngine(factory,population_size=500,fitness_type=('equal',10),mut_prob = 0.2,cross_prob = 0.8)
```
We can now add the fitness function we had defined to this `GAEngine` instance
```
ga.setFitnessHandler(fitness)
```
## 3. Determing other attributes of the GA
Many Standard Crossover, Mutation, Selection and Fitness functions are present in the `Utils` module of the `pygenetic` package.
```
from pygenetic import Utils
```
### Crossover
Traditional crossover methods such as 1-point, 2-point crossover cannot be used since it create duplicate genes in the offsprings. In the popularly used `distinct` crossover, the first half of the chromosome is kept the same while the second half is obtained by sequentially traversing the second chromosome and adding elements only if that element is not already present.
<img src="nQueens-crossover.png" style="width:700px;">
This can be done using the `addCrossoverHandler` of the pygenetic module which takes as parameters
* crossover_function = the crossover function to be used
* weight = the weightage the crossover function needs to be given (mainly used when multiple crossovers are added)
```
ga.addCrossoverHandler(Utils.CrossoverHandlers.distinct, 4)
```
### Mutation
The use of the mutation technique of `swap` as shown in the diagram also ensures that each element in the chromosome is a unique number and that there are no duplicates. This is a suitable for mutation function for this problem
<img src="nQueens-mutation.png" style="width:700px">
This can be done using the `addMutationHandler` of the pygenetic module which takes as parameters
* mutation_function = the mutation function to be used
* weight = the weightage the mutation function needs to be given (mainly used when multiple mutations are added)
```
ga.addMutationHandler(Utils.MutationHandlers.swap,2)
```
## Selection
The selection function `best` chooses the best (1 - cross_prob) percent of the population. Hence, this function is one of the possible selection handlers which can be used in our genetic algorithm
```
ga.setSelectionHandler(Utils.SelectionHandlers.best)
```
## 4. Time to Evolve
This can be easily done using the `evolve` function of the GAEngine instance. It takes the `noOfIterations` as a parameter. Let's evolve it for 100 generations
```
ga.evolve(100)
```
We can get the best member by using the `best_fitness` attribute of the `GAEngine`.
It returns a tuple having
* chromsome having best fitness
* best fitness value
```
best = ga.best_fitness
print(best)
```
We can decode the chromosome into a chess board accordingly
```
def print_board(chromosome):
for i in chromosome:
for x in range(i):
print("-",end=' ')
print('Q', end=' ')
for x in range(len(chromosome)-i-1):
print("-",end=' ')
print()
print('Best Board is')
print_board(ga.best_fitness[0])
```
## 5. Plotting the Statistics
- The functionality for plotting the best, worst, average fitness values across iterations is present in `plot_statistics` function of statistics.py module. The function takes a list of attributes to be plotted.
- These attributes can be `best-fitness`,`worst-fitness`,`avg-fitness`, `'diversity`, `mutation_rate`
- The diversity and mutation rate values over iterations can also be visualized
```
import matplotlib.pyplot as plt
fig = ga.statistics.plot_statistics(['best-fitness','worst-fitness','avg-fitness'])
plt.show()
fig = ga.statistics.plot_statistics(['diversity'])
plt.show()
fig = ga.statistics.plot_statistics(['mutation_rate'])
plt.show()
```
| true |
code
| 0.234889 | null | null | null | null |
|

## Data-X: Titanic Survival Analysis
Data from: https://www.kaggle.com/c/titanic/data
**Authors:** Several public Kaggle Kernels, edits by Alexander Fred Ojala & Kevin Li
<img src="data/Titanic_Variable.png">
# Note
Install xgboost package in your pyhton enviroment:
try:
```
$ conda install py-xgboost
```
```
'''
# You can also install the package by running the line below
# directly in your notebook
''';
#!conda install py-xgboost --y
```
## Import packages
```
# No warnings
import warnings
warnings.filterwarnings('ignore') # Filter out warnings
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB # Gaussian Naive Bayes
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier #stochastic gradient descent
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
# Plot styling
sns.set(style='white', context='notebook', palette='deep')
plt.rcParams[ 'figure.figsize' ] = 9 , 5
```
### Define fancy plot to look at distributions
```
# Special distribution plot (will be used later)
def plot_distribution( df , var , target , **kwargs ):
row = kwargs.get( 'row' , None )
col = kwargs.get( 'col' , None )
facet = sns.FacetGrid( df , hue=target , aspect=4 , row = row , col = col )
facet.map( sns.kdeplot , var , shade= True )
facet.set( xlim=( 0 , df[ var ].max() ) )
facet.add_legend()
plt.tight_layout()
```
## References to material we won't cover in detail:
* **Gradient Boosting:** http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/
* **Naive Bayes:** http://scikit-learn.org/stable/modules/naive_bayes.html
* **Perceptron:** http://aass.oru.se/~lilien/ml/seminars/2007_02_01b-Janecek-Perceptron.pdf
## Input Data
```
train_df = pd.read_csv('data/train.csv')
test_df = pd.read_csv('data/test.csv')
combine = [train_df, test_df]
# NOTE! When we change train_df or test_df the objects in combine
# will also change
# (combine is only a pointer to the objects)
# combine is used to ensure whatever preprocessing is done
# on training data is also done on test data
```
# Exploratory Data Anlysis (EDA)
We will analyze the data to see how we can work with it and what makes sense.
```
train_df
print(train_df.columns)
# preview the data
train_df.head(10)
# General data statistics
train_df.describe()
# Data Frame information (null, data type etc)
train_df.info()
```
### Comment on the Data
<div class='alert alert-info'>
`PassengerId` is a random number and thus does not contain any valuable information. `Survived, Passenger Class, Age Siblings Spouses, Parents Children` and `Fare` are numerical values -- so we don't need to transform them, but we might want to group them (i.e. create categorical variables). `Sex, Embarked` are categorical features that we need to map to integer values. `Name, Ticket` and `Cabin` might also contain valuable information.
</div>
# Preprocessing Data
```
# check dimensions of the train and test datasets
print("Shapes Before: (train) (test) = ", \
train_df.shape, test_df.shape)
# Drop columns 'Ticket', 'Cabin', need to do it for both test
# and training
train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
print("Shapes After: (train) (test) =", train_df.shape, test_df.shape)
# Check if there are null values in the datasets
print(train_df.isnull().sum())
print()
print(test_df.isnull().sum())
```
# Data Preprocessing
```
train_df.head(5)
```
### Hypothesis
The Title of the person is a feature that can predict survival
```
# List example titles in Name column
train_df.Name[:5]
# from the Name column we will extract title of each passenger
# and save that in a column in the dataset called 'Title'
# if you want to match Titles or names with any other expression
# refer to this tutorial on regex in python:
# https://www.tutorialspoint.com/python/python_reg_expressions.htm
# Create new column called title
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.',\
expand=False)
# Double check that our titles makes sense (by comparing to sex)
pd.crosstab(train_df['Title'], train_df['Sex'])
# same for test set
pd.crosstab(test_df['Title'], test_df['Sex'])
# We see common titles like Miss, Mrs, Mr, Master are dominant, we will
# correct some Titles to standard forms and replace the rarest titles
# with single name 'Rare'
for dataset in combine:
dataset['Title'] = dataset['Title'].\
replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr',\
'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') #Mademoiselle
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') #Madame
# Now that we have more logical titles, and a few groups
# we can plot the survival chance for each title
train_df[['Title', 'Survived']].groupby(['Title']).mean()
# We can also plot it
sns.countplot(x='Survived', hue="Title", data=train_df, order=[1,0])
plt.xticks(range(2),['Made it','Deceased']);
# Title dummy mapping
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Title)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
train_df = train_df.drop(['Name', 'Title', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name', 'Title'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
```
## Gender column
```
# Map Sex to binary categories
for dataset in combine:
dataset['Sex'] = dataset['Sex'] \
.map( {'female': 1, 'male': 0} ).astype(int)
train_df.head()
```
### Handle missing values for age
We will now guess values of age based on sex (male / female)
and socioeconomic class (1st,2nd,3rd) of the passenger.
The row indicates the sex, male = 0, female = 1
More refined estimate than only median / mean etc.
```
guess_ages = np.zeros((2,3),dtype=int) #initialize
guess_ages
# Fill the NA's for the Age columns
# with "qualified guesses"
for idx,dataset in enumerate(combine):
if idx==0:
print('Working on Training Data set\n')
else:
print('-'*35)
print('Working on Test Data set\n')
print('Guess values of age based on sex and pclass of the passenger...')
for i in range(0, 2):
for j in range(0,3):
guess_df = dataset[(dataset['Sex'] == i) \
&(dataset['Pclass'] == j+1)]['Age'].dropna()
# Extract the median age for this group
# (less sensitive) to outliers
age_guess = guess_df.median()
# Convert random age float to int
guess_ages[i,j] = int(age_guess)
print('Guess_Age table:\n',guess_ages)
print ('\nAssigning age values to NAN age values in the dataset...')
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) \
& (dataset.Pclass == j+1),'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
print()
print('Done!')
train_df.head()
# Split into age bands and look at survival rates
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False)\
.mean().sort_values(by='AgeBand', ascending=True)
# Plot distributions of Age of passangers who survived
# or did not survive
plot_distribution( train_df , var = 'Age' , target = 'Survived' ,\
row = 'Sex' )
# Change Age column to
# map Age ranges (AgeBands) to integer values of categorical type
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']=4
train_df.head()
# Note we could just run
# dataset['Age'] = pd.cut(dataset['Age'], 5,labels=[0,1,2,3,4])
# remove AgeBand from before
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
```
# Create variable for Family Size
How did the number of people the person traveled with impact the chance of survival?
```
# SibSp = Number of Sibling / Spouses
# Parch = Parents / Children
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
# Survival chance with FamilySize
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# Plot it, 1 is survived
sns.countplot(x='Survived', hue="FamilySize", data=train_df, order=[1,0]);
# Binary variable if the person was alone or not
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
# We will only use the binary IsAlone feature for further analysis
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head()
# We can also create new features based on intuitive combinations
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(8)
```
# Port the person embarked from
Let's see how that influences chance of survival
```
# To replace Nan value in 'Embarked', we will use the mode
# in 'Embaraked'. This will give us the most frequent port
# the passengers embarked from
freq_port = train_df.Embarked.dropna().mode()[0]
freq_port
# Fill NaN 'Embarked' Values in the datasets
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# Let's plot it
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0]);
# Create categorical dummy variables for Embarked values
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Embarked)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
# Drop Embarked
for dataset in combine:
dataset.drop('Embarked', axis=1, inplace=True)
```
## Handle continuous values in the Fare column
```
# Fill the NA values in the Fares column with the median
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
test_df.head()
# q cut will find ranges equal to the quantile of the data
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & \
(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & \
(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head
```
## Finished
```
train_df.head(7)
# All features are approximately on the same scale
# no need for feature engineering / normalization
test_df.head(7)
# Check correlation between features
# (uncorrelated features are generally more powerful predictors)
colormap = plt.cm.viridis
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train_df.astype(float).corr().round(2)\
,linewidths=0.1,vmax=1.0, square=True, cmap=colormap, \
linecolor='white', annot=True);
```
# Next Up: Machine Learning!
Now we will Model, Predict, and Choose algorithm for conducting the classification
Try using different classifiers to model and predict. Choose the best model from:
* Logistic Regression
* KNN
* SVM
* Naive Bayes
* Decision Tree
* Random Forest
* Perceptron
* XGBoost
## Setup Train and Validation Set
```
X = train_df.drop("Survived", axis=1) # Training & Validation data
Y = train_df["Survived"] # Response / Target Variable
# Since we don't have labels for the test data
# this won't be used. It's only for Kaggle Submissions
X_submission = test_df.drop("PassengerId", axis=1).copy()
print(X.shape, Y.shape)
# Split training and test set so that we test on 20% of the data
# Note that our algorithms will never have seen the validation
# data during training. This is to evaluate how good our estimators are.
np.random.seed(1337) # set random seed for reproducibility
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2)
print(X_train.shape, Y_train.shape)
print(X_val.shape, Y_val.shape)
```
## Scikit-Learn general ML workflow
1. Instantiate model object
2. Fit model to training data
3. Let the model predict output for unseen data
4. Compare predicitons with actual output to form accuracy measure
# Logistic Regression
```
logreg = LogisticRegression() # instantiate
logreg.fit(X_train, Y_train) # fit
Y_pred = logreg.predict(X_val) # predict
acc_log = round(logreg.score(X_val, Y_val) * 100, 2) # evaluate
acc_log
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_val)
acc_svc = round(svc.score(X_val, Y_val) * 100, 2)
acc_svc
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_val)
acc_knn = round(knn.score(X_val, Y_val) * 100, 2)
acc_knn
# Perceptron
perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred = perceptron.predict(X_val)
acc_perceptron = round(perceptron.score(X_val, Y_val) * 100, 2)
acc_perceptron
# XGBoost
gradboost = xgb.XGBClassifier(n_estimators=1000)
gradboost.fit(X_train, Y_train)
Y_pred = gradboost.predict(X_val)
acc_perceptron = round(gradboost.score(X_val, Y_val) * 100, 2)
acc_perceptron
# Random Forest
random_forest = RandomForestClassifier(n_estimators=1000)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_val)
acc_random_forest = round(random_forest.score(X_val, Y_val) * 100, 2)
acc_random_forest
# Look at importnace of features for random forest
def plot_model_var_imp( model , X , y ):
imp = pd.DataFrame(
model.feature_importances_ ,
columns = [ 'Importance' ] ,
index = X.columns
)
imp = imp.sort_values( [ 'Importance' ] , ascending = True )
imp[ : 10 ].plot( kind = 'barh' )
print ('Training accuracy Random Forest:',model.score( X , y ))
plot_model_var_imp(random_forest, X_train, Y_train)
# How to create a Kaggle submission:
Y_submission = random_forest.predict(X_submission)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_submission
})
submission.to_csv('titanic.csv', index=False)
```
# Legacy code (not used anymore)
```python
# Map title string values to numbers so that we can make predictions
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Handle missing values
train_df.head()
```
```python
# Drop the unnecessary Name column (we have the titles now)
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
```
```python
# Create categorical dummy variables for Embarked values
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
```
| true |
code
| 0.555194 | null | null | null | null |
|
Diodes
===
The incident flux and the current that is generated by a photodiode subjected to it are related by
$$
\begin{equation}
\begin{split}
I(A)=&\sum_{i,j}P_{i,j}(W)R_{j}(A/W)+D(A)\\
P_{i,j}(W)=&I_{i,j}(Hz)E_{j}(\text{keV})\\
R_{j}(A/W)=&\frac{e(C)}{E_{h}(\text{keV})}[1-e^{-\mu(E_{j})\rho d}]
\end{split}
\end{equation}
$$
where P the incident power, R the spectral responsivity, D the dark current, $E_i$ the energy of the incident photon, $E_j$ the energy of the detected photon, $E_{h}$ the energy to create an electron-hole pair, $I_{i,j}$ the detected flux of line $j$ due to line $i$ and diode density $\rho$, mass attenuation coefficient $\mu$ and thickness $d$.
The relationship between the detected flux and the flux at the sample position is given by
$$
\begin{equation}
I_{i,j}(Hz)=I_{0}(Hz) w_i Y_{i,j} = I_{s}(Hz)\frac{w_i Y_{i,j}}{\sum_k w_k T_{s}(E_{k})}
\end{equation}
$$
with the following factors
* $I_0$: total flux before detection
* $I_s$: the total flux seen by the sample
* $T_s$: the "transmission" between source and sample (product of several transmission factors and optics efficiency)
* $w_k$: the fraction of primary photons with energy $E_{k}$
* $Y_{i,j}$: the "rate" of detected line $j$ due to source line $i$ (not including detector attenuation)
The line fractions at the sample position are
$$
\begin{equation}
\begin{split}
I_{i,s}=& I_0 w_i T_{s}(E_{i})\\
w_{i,s} =& \frac{I_{i,s}}{\sum_k I_{k,s}} = \frac{w_i T_{s}(E_{i})}{\sum_k w_k T_{s}(E_{k})}
\end{split}
\end{equation}
$$
The relationship between the flux reaching the sample and the current measured by a pn-diode can be summarized as
$$
\begin{equation}
\begin{split}
I(A)=&I_{s}(Hz)C_s(C)+D(A)\\
C_s(C) =& \frac{\sum_{i,j} w_i Y_{i,j}C_j}{\sum_k w_k T_{s}(E_{k})}\\
C_j(C) =& E_{j}(\text{keV})\frac{e(C)}{E_{h}(\text{keV})}[1-e^{-\mu(E_{j})\rho d}]\\
\end{split}
\end{equation}
$$
where $C_s$ the charge generated per photon reaching the sample and $C_j$ the charge generated per photon reaching the diode. A simplified relationship with a lookup table can be used
$$
\begin{equation}
C_s(C) = \sum_i w_i \mathrm{LUT}(E_i)
\end{equation}
$$
Finally in order to allow a fast read-out, current is converted to frequency by an oscillator
$$
\begin{equation}
I(\text{Hz})=\frac{F_{\text{max}}(Hz)}{V_{\text{max}}(V)}
\frac{V_{\text{max}}(V)}{I_{\text{max}}(A)}I(A)+F_{0}(Hz)
\end{equation}
$$
where $F_{\text{max}}$ the maximal frequency that can be detected, $F_{0}$ a fixed offset, $V_{\text{max}}$ the maximal output voltage of the ammeter and input voltage of the oscillator, $\frac{V_{\text{max}}(V)}{I_{\text{max}}(A)}$ the "gain" of the ammeter. Sometimes $I_{\text{max}}(A)$ is referred to as the diode "gain".
Absolute diode
--------------
An absolute diode has a spectral responsivity $R(A/W)$ which behaves as theoretically expected
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from spectrocrunch.detectors import diode
det = diode.factory("sxm_ptb")
print(det)
det.model = False
energy = det.menergy
R = det.spectral_responsivity(energy)
plt.plot(energy,R,marker='o',linestyle="",label='measured')
det.model = True
energy = np.linspace(1,10,100)
R = det.spectral_responsivity(energy)
plt.plot(energy,R,label='model')
plt.legend()
plt.xlabel('Energy (keV)')
plt.ylabel('Spectral responsivity (A/W)')
plt.show()
```
Calibrated diode
----------------
The spectral responsivity $R(A/W)$ of a calibrated diode is determined by the difference in response with an absolute diode
```
det = diode.factory("sxm_idet",npop=4)
print(det)
det.model = False
energy = det.menergy
R = det.spectral_responsivity(energy)
plt.plot(energy,R,marker='o',linestyle="",label='measured')
det.model = True
energy = np.linspace(1,12,100)
R = det.spectral_responsivity(energy)
plt.plot(energy,R,label='model')
det.model = False
R = det.spectral_responsivity(energy)
plt.plot(energy,R,label='used')
plt.legend(loc='best')
plt.xlabel('Energy (keV)')
plt.ylabel('Spectral responsivity (A/W)')
plt.show()
```
Direct detection
------------------
When $Y_{i,j}$ only contains transmission factors (i.e. not fluorescence/scattering from a secondary target) the diode measures $I_s$ directly. The relationship between flux $I_s(Hz)$ and diode response $I(Hz)$ is determined by the spectral responsivity which should be known (absolute diode) or calibrated (with respect to an absolute diode):
```
from spectrocrunch.optics import xray as xrayoptics
atmpath = xrayoptics.Filter(material="vacuum", thickness=1)
det = diode.factory("sxm_idet",optics=[atmpath])
print(det)
Is = 3e9
energy = np.linspace(2,10,100)
for gain in [1e7]:
for model in [False,True]:
det.gain = gain
det.model = model
I = det.fluxtocps(energy[:, np.newaxis], Is).to("Hz").magnitude
plt.plot(energy,I,label="{:.0e} V/A{}".\
format(gain," (model)" if model else ""))
plt.gca().axhline(y=det.oscillator.Fmax.to("Hz").magnitude,label="Fmax",\
color='k',linestyle='--')
plt.title("Flux = {:.0e} Hz".format(Is))
plt.legend(loc='best')
plt.xlabel('Energy (keV)')
plt.ylabel('Response (Hz)')
plt.show()
```
Indirect detection
------------------
The conversion factors $Y_{i,j}$ can be calculated from the cross-sections of the secondary target.
### Without optics
When the indirect diode measures the flux downstream from the optics (or there are no optics at all), the relationship between flux and measured count-rate is known (because $T_s$ is known):
```
atmpath = xrayoptics.Filter(material="vacuum", thickness=1)
iodet = diode.factory("sxm_iodet1",optics=[atmpath])
iodet.gain = 1e9
print(iodet.geometry)
Is = 5e9
energy = np.linspace(1,10,50)
I = iodet.fluxtocps(energy[:, np.newaxis], Is).to("Hz").magnitude
plt.plot(energy,I)
plt.gca().axhline(y=iodet.oscillator.Fmax.to("Hz").magnitude,label="Fmax",\
color='k',linestyle='--')
plt.title("Flux = {:.0e} Hz".format(Is))
plt.xlabel('Energy (keV)')
plt.ylabel('Response (Hz)')
plt.show()
```
### With optics
In case the indirect diode is upstream from the optics, transmission $T_s$ needs to be calibrated with a direct diode. This is done by measuring a changing flux at fixed energy, e.g. by scanning a series of attenuators. The flux is calculated from the direct diode and used to calibrate the response of the indirect diode:
```
iodet = diode.factory("sxm_iodet1",optics=[atmpath, "KB"])
iodet.gain = 1e8
idet = diode.factory("sxm_idet")
idet.gain = 1e6
energy = 7
idetresp = np.linspace(3e4,5e4,100)
fluxmeas = idet.cpstoflux(energy,np.random.poisson(idetresp))
iodetresp = np.random.poisson(np.linspace(2e5,3e5,100))
fitinfo = iodet.calibrate(iodetresp,fluxmeas,energy,caliboption="optics")
print(iodet.geometry)
plt.plot(fluxmeas,iodetresp,marker='o',linestyle="")
plt.plot(fluxmeas,iodet.fluxtocps(energy,fluxmeas))
label = "\n".join(["{} = {}".format(k,v) for k,v in fitinfo.items()])
plt.annotate(label,xy=(0.5,0.1),xytext=(0.5,0.1),\
xycoords="axes fraction",textcoords="axes fraction")
plt.title("Gain = {:~.0e}".format(iodet.gain))
plt.xlabel('Flux (Hz)')
plt.ylabel('Response (Hz)')
plt.show()
```
Note that the slope is $C_s(C)$ (the charge generated per photon reaching the sample, expressed here in units of elementary charge) and the intercept $D(A)$ (the dark current of the diode).
### Manual calibration
Calibration can also be done manually for a single flux-reponse pair. The response is expected to be $I(Hz)$ but it can also be $I(A)$. If you want a linear energy interpolation, calibration can also be simplified in which case it simply stores a lookup table for $C_s(C)$.
```
#Specify quantities manually with units:
#from spectrocrunch.patch.pint import ureg
#current = ureg.Quantity(1e-8,"A")
for simple in [True, False]:
iodet = diode.factory("sxm_iodet1",optics=[atmpath, "KB"],simplecalibration=simple)
iodet.gain = 1e8
# Calibrate with Hz-Hz pair
cps = 100000
flux = 1e9
energy = 6
iodet.calibrate(cps,flux,energy,caliboption="optics")
current = iodet.fluxtocurrent(energy,flux)
# Calibrate with A-Hz pair
energy = 10
current *= 0.5
iodet.calibrate(current,flux,energy,caliboption="optics")
label = "C$_s$ table" if simple else "Calibrated T$_s$"
print(label)
print(iodet)
print("")
energy = np.linspace(6,10,10)
response = [iodet.fluxtocps(en,flux).magnitude for en in energy]
plt.plot(energy,response,label=label)
plt.legend()
plt.title("Gain = {:~.0e}, Flux = {:.0e}".format(iodet.Rout,flux))
plt.xlabel('Energy (keV)')
plt.ylabel('Response (Hz)')
plt.show()
```
| true |
code
| 0.618492 | null | null | null | null |
|
# Accessing C Struct Data
This notebook illustrates the use of `@cfunc` to connect to data defined in C.
## Via CFFI
Numba can map simple C structure types (i.e. with scalar members only) into NumPy structured `dtype`s.
Let's start with the following C declarations:
```
from cffi import FFI
src = """
/* Define the C struct */
typedef struct my_struct {
int i1;
float f2;
double d3;
float af4[7];
} my_struct;
/* Define a callback function */
typedef double (*my_func)(my_struct*, size_t);
"""
ffi = FFI()
ffi.cdef(src)
```
We can create `my_struct` data by doing:
```
# Make a array of 3 my_struct
mydata = ffi.new('my_struct[3]')
ptr = ffi.cast('my_struct*', mydata)
for i in range(3):
ptr[i].i1 = 123 + i
ptr[i].f2 = 231 + i
ptr[i].d3 = 321 + i
for j in range(7):
ptr[i].af4[j] = i * 10 + j
```
Using `numba.core.typing.cffi_utils.map_type` we can convert the `cffi` type into a Numba `Record` type.
```
from numba.core.typing import cffi_utils
cffi_utils.map_type(ffi.typeof('my_struct'), use_record_dtype=True)
```
The function type can be mapped in a signature:
```
sig = cffi_utils.map_type(ffi.typeof('my_func'), use_record_dtype=True)
sig
```
and `@cfunc` can take that signature directly:
```
from numba import cfunc, carray
@cfunc(sig)
def foo(ptr, n):
base = carray(ptr, n) # view pointer as an array of my_struct
tmp = 0
for i in range(n):
tmp += base[i].i1 * base[i].f2 / base[i].d3 + base[i].af4.sum()
return tmp
```
Testing the cfunc via the `.ctypes` callable:
```
addr = int(ffi.cast('size_t', ptr))
print("address of data:", hex(addr))
result = foo.ctypes(addr, 3)
result
```
## Manually creating a Numba `Record` type
Sometimes it is useful to create a `numba.types.Record` type directly. The easiest way is to use the `Record.make_c_struct()` method. Using this method, the field offsets are calculated from the natural size and alignment of prior fields.
In the example below, we will manually create the *my_struct* structure from above.
```
from numba import types
my_struct = types.Record.make_c_struct([
# Provides a sequence of 2-tuples i.e. (name:str, type:Type)
('i1', types.int32),
('f2', types.float32),
('d3', types.float64),
('af4', types.NestedArray(dtype=types.float32, shape=(7,)))
])
my_struct
```
Here's another example to demonstrate the offset calculation:
```
padded = types.Record.make_c_struct([
('i1', types.int32),
('pad0', types.int8), # padding bytes to move the offsets
('f2', types.float32),
('pad1', types.int8), # padding bytes to move the offsets
('d3', types.float64),
])
padded
```
Notice how the byte at `pad0` and `pad1` moves the offset of `f2` and `d3`.
A function signature can also be created manually:
```
new_sig = types.float64(types.CPointer(my_struct), types.uintp)
print('signature:', new_sig)
# Our new signature matches the previous auto-generated one.
print('signature matches:', new_sig == sig)
```
| true |
code
| 0.31668 | null | null | null | null |
|
# Final Project
## Daniel Blessing
## Can we use historical data from professional league of legends games to try and predict the results of future contests?
## Load Data
```
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier # ensemble models we're trying out
from sklearn.model_selection import train_test_split # train test split for CV
from sklearn.metrics import accuracy_score, f1_score # two evalutaion metrics for binary classification
from sklearn.pipeline import Pipeline # robust pipeline
from sklearn.impute import *
from sklearn.preprocessing import *
from sklearn.compose import * # preprocessing importance
from sklearn.base import BaseEstimator #for randomized CV
from sklearn.model_selection import RandomizedSearchCV
# data pocessing
import numpy as np
import pandas as pd
# load data from local else download it from github
filename = 'Dev.csv'
remote_location = 'https://raw.githubusercontent.com/Drblessing/predcting_LoL/master/Lol.csv'
try:
# Local version
df = pd.read_csv(filename,index_col = 0)
except FileNotFoundError or ParserError:
# Grab the remote file and save it
df = pd.read_csv(remote_location,index_col = 0)
df.to_csv(filename)
# create X,y datasets and train_test split them
y = df['bResult']
df = df.drop(columns = ['bResult'])
X = df
X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, shuffle=True,random_state = 42)
# Legacy Feature Engineering
'''pregame_data = Lol[['bResult','blueTopChamp','blueJungleChamp','blueMiddleChamp','blueADCChamp','blueSupportChamp',
'blueBans','redTopChamp','redJungleChamp','redMiddleChamp','redADCChamp','redSupportChamp','redBans']]
# process list of bans into individual columns
pregame_data_b = pregame_data.assign(names=pregame_data.blueBans.str.split(","))
pregame_data_r = pregame_data.assign(names=pregame_data.redBans.str.split(","))
blue_bans = pregame_data_b.names.apply(pd.Series)
red_bans = pregame_data_r.names.apply(pd.Series)
blue_names = {0: "b1", 1: "b2",2:"b3",3:"b4",4:"b5"}
red_names = {0:"r1",1:"r2",2:"r3",3:"r4",4:"r5"}
blue_bans = blue_bans.rename(columns=blue_names)
red_bans = red_bans.rename(columns=red_names)
pregame_data = pd.concat([pregame_data, blue_bans,red_bans], axis=1)
# drop legacy columns
pregame_data = pregame_data.drop(columns = ['blueBans','redBans'])
# define y and drop it
y = pregame_data['bResult']
pregame_data = pregame_data.drop(columns = ['bResult'])
# fix blue bans strings
pregame_data.b1 = pregame_data.b1.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b2 = pregame_data.b2.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b3 = pregame_data.b3.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b4 = pregame_data.b4.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b5 = pregame_data.b5.str.replace('[','').str.replace(']','').str.replace("'",'')
# fix red bans strings
pregame_data.r1 = pregame_data.r1.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r2 = pregame_data.r2.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r3 = pregame_data.r3.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r4 = pregame_data.r4.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r5 = pregame_data.r5.str.replace('[','').str.replace(']','').str.replace("'",'')''';
```
## Visuatlizations
```
# visualizations
import matplotlib.pyplot as plt
from collections import Counter
x = ['Blue win','Red win']
heights = [0,0]
heights[0] = sum(y)
heights[1] = len(y) - sum(y)
plt.bar(x,heights);
plt.ylabel('Number of Games won');
plt.xlabel('Team');
plt.title('Number of wins by team color in competitive LoL 2015-2017');
# check general accuracy of naive model
bw = sum(y)/len(y)
print(f'Percentage of games won by blue team: {bw*100:.2f} %')
# load champs
champs = Counter(X['blueADCChamp'])
l = champs.keys()
v = champs.values()
# get rid of one off champs
l = [l_ for l_ in l if champs[l_] > 1]
v = [v_ for v_ in v if v_ > 1]
plt.pie(v,labels=l);
plt.title('Distribution of ADC champs for competitive Lol 2015-2017')
```
## Model Building
```
# define categorical variables, all of our data
categorical_columns = (X.dtypes == object)
# impute missing values and hot encode categories
cat_pipe = Pipeline([('imputer', SimpleImputer(strategy = 'constant', fill_value='Unknown', add_indicator=True)),
('ohe', OneHotEncoder(handle_unknown='ignore'))])
# process categorical variables
preprocessing = ColumnTransformer([('categorical', cat_pipe, categorical_columns)], remainder='passthrough')
# Helper class for RandomizedSearchCV
class DummyEstimator(BaseEstimator):
"Pass through class, methods are present but do nothing."
def fit(self): pass
def score(self): pass
# create pipeline
pipe = Pipeline(steps = [('preprocessing', preprocessing),
('clf', DummyEstimator())])
search_space = [
{'clf': [ExtraTreesClassifier(n_jobs=-1,random_state=42)],
'clf__criterion': ['gini', 'entropy'],
'clf__min_samples_leaf': np.linspace(1, 30, 5, dtype=int),
'clf__bootstrap': [True, False],
'clf__class_weight': [None, 'balanced', 'balanced_subsample'],
'clf__n_estimators': np.linspace(50, 500, 8, dtype=int)},
{'clf': [RandomForestClassifier(n_jobs=-1,random_state=42)],
'clf__criterion': ['gini', 'entropy'],
'clf__min_samples_leaf': np.linspace(1, 10, 4, dtype=int),
'clf__bootstrap': [True, False],
'clf__class_weight': [None, 'balanced', 'balanced_subsample'],
'clf__n_estimators': np.linspace(50, 300, 5, dtype=int)}]
gs = RandomizedSearchCV(pipe,
search_space,
scoring='accuracy', # accuracy for game prediction
n_iter=30,
cv=5,
n_jobs=-1)
gs.fit(X, y);
gs.best_score_, gs.best_params_
# Results:
'''
(0.5510498687664042,
{'clf__n_estimators': 178,
'clf__min_samples_leaf': 30,
'clf__criterion': 'gini',
'clf__class_weight': None,
'clf__bootstrap': True,
'clf': ExtraTreesClassifier(bootstrap=True, min_samples_leaf=30, n_estimators=178,
n_jobs=-1, random_state=42)})'''
```
## Evaluation Metric
```
pipe = Pipeline([('preprocessing', preprocessing),
('clf', ExtraTreesClassifier(
bootstrap = True,
min_samples_leaf = 15,
n_estimators = 114,
n_jobs = -1,
criterion = 'gini',
class_weight = None,
random_state=42))])
pipe.fit(X_train,y_train);
y_pred = pipe.predict(X_val)
accuracy = accuracy_score(y_val,y_pred)
f1 = f1_score(y_val,y_pred)
print(f"accuracy: {accuracy:,.6f}")
print(f"f1: {f1:,.6f}")
```
## Results
```
print(f'Model accuracy: {accuracy*100:.2f} %')
print(f'Naive accuracy: {bw*100:.2f} %')
print(f'Prediction improvement from model: {abs(bw-accuracy)*100:.2f} %')
```
| true |
code
| 0.495972 | null | null | null | null |
|
[source](../../api/alibi_detect.ad.adversarialae.rst)
# Adversarial Auto-Encoder
## Overview
The adversarial detector follows the method explained in the [Adversarial Detection and Correction by Matching Prediction Distributions](https://arxiv.org/abs/2002.09364) paper. Usually, autoencoders are trained to find a transformation $T$ that reconstructs the input instance $x$ as accurately as possible with loss functions that are suited to capture the similarities between x and $x'$ such as the mean squared reconstruction error. The novelty of the adversarial autoencoder (AE) detector relies on the use of a classification model-dependent loss function based on a distance metric in the output space of the model to train the autoencoder network. Given a classification model $M$ we optimise the weights of the autoencoder such that the [KL-divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between the model predictions on $x$ and on $x'$ is minimised. Without the presence of a reconstruction loss term $x'$ simply tries to make sure that the prediction probabilities $M(x')$ and $M(x)$ match without caring about the proximity of $x'$ to $x$. As a result, $x'$ is allowed to live in different areas of the input feature space than $x$ with different decision boundary shapes with respect to the model $M$. The carefully crafted adversarial perturbation which is effective around x does not transfer to the new location of $x'$ in the feature space, and the attack is therefore neutralised. Training of the autoencoder is unsupervised since we only need access to the model prediction probabilities and the normal training instances. We do not require any knowledge about the underlying adversarial attack and the classifier weights are frozen during training.
The detector can be used as follows:
* An adversarial score $S$ is computed. $S$ equals the K-L divergence between the model predictions on $x$ and $x'$.
* If $S$ is above a threshold (explicitly defined or inferred from training data), the instance is flagged as adversarial.
* For adversarial instances, the model $M$ uses the reconstructed instance $x'$ to make a prediction. If the adversarial score is below the threshold, the model makes a prediction on the original instance $x$.
This procedure is illustrated in the diagram below:

The method is very flexible and can also be used to detect common data corruptions and perturbations which negatively impact the model performance. The algorithm works well on tabular and image data.
## Usage
### Initialize
Parameters:
* `threshold`: threshold value above which the instance is flagged as an adversarial instance.
* `encoder_net`: `tf.keras.Sequential` instance containing the encoder network. Example:
```python
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(32, 32, 3)),
Conv2D(32, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2D(64, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2D(256, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Flatten(),
Dense(40)
]
)
```
* `decoder_net`: `tf.keras.Sequential` instance containing the decoder network. Example:
```python
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(40,)),
Dense(4 * 4 * 128, activation=tf.nn.relu),
Reshape(target_shape=(4, 4, 128)),
Conv2DTranspose(256, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2DTranspose(64, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2DTranspose(3, 4, strides=2, padding='same',
activation=None, kernel_regularizer=l1(1e-5))
]
)
```
* `ae`: instead of using a separate encoder and decoder, the AE can also be passed as a `tf.keras.Model`.
* `model`: the classifier as a `tf.keras.Model`. Example:
```python
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(output_dim, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
* `hidden_layer_kld`: dictionary with as keys the number of the hidden layer(s) in the classification model which are extracted and used during training of the adversarial AE, and as values the output dimension for the hidden layer. Extending the training methodology to the hidden layers is optional and can further improve the adversarial correction mechanism.
* `model_hl`: instead of passing a dictionary to `hidden_layer_kld`, a list with tf.keras models for the hidden layer K-L divergence computation can be passed directly.
* `w_model_hl`: Weights assigned to the loss of each model in `model_hl`. Also used to weight the K-L divergence contribution for each model in `model_hl` when computing the adversarial score.
* `temperature`: Temperature used for model prediction scaling. Temperature <1 sharpens the prediction probability distribution which can be beneficial for prediction distributions with high entropy.
* `data_type`: can specify data type added to metadata. E.g. *'tabular'* or *'image'*.
Initialized adversarial detector example:
```python
from alibi_detect.ad import AdversarialAE
ad = AdversarialAE(
encoder_net=encoder_net,
decoder_net=decoder_net,
model=model,
temperature=0.5
)
```
### Fit
We then need to train the adversarial detector. The following parameters can be specified:
* `X`: training batch as a numpy array.
* `loss_fn`: loss function used for training. Defaults to the custom adversarial loss.
* `w_model`: weight on the loss term minimizing the K-L divergence between model prediction probabilities on the original and reconstructed instance. Defaults to 1.
* `w_recon`: weight on the mean squared error reconstruction loss term. Defaults to 0.
* `optimizer`: optimizer used for training. Defaults to [Adam](https://arxiv.org/abs/1412.6980) with learning rate 1e-3.
* `epochs`: number of training epochs.
* `batch_size`: batch size used during training.
* `verbose`: boolean whether to print training progress.
* `log_metric`: additional metrics whose progress will be displayed if verbose equals True.
* `preprocess_fn`: optional data preprocessing function applied per batch during training.
```python
ad.fit(X_train, epochs=50)
```
The threshold for the adversarial score can be set via ```infer_threshold```. We need to pass a batch of instances $X$ and specify what percentage of those we consider to be normal via `threshold_perc`. Even if we only have normal instances in the batch, it might be best to set the threshold value a bit lower (e.g. $95$%) since the the model could have misclassified training instances leading to a higher score if the reconstruction picked up features from the correct class or some instances might look adversarial in the first place.
```python
ad.infer_threshold(X_train, threshold_perc=95, batch_size=64)
```
### Detect
We detect adversarial instances by simply calling `predict` on a batch of instances `X`. We can also return the instance level adversarial score by setting `return_instance_score` to True.
The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys:
* `is_adversarial`: boolean whether instances are above the threshold and therefore adversarial instances. The array is of shape *(batch size,)*.
* `instance_score`: contains instance level scores if `return_instance_score` equals True.
```python
preds_detect = ad.predict(X, batch_size=64, return_instance_score=True)
```
### Correct
We can immediately apply the procedure sketched out in the above diagram via ```correct```. The method also returns a dictionary with `meta` and `data` keys. On top of the information returned by ```detect```, 3 additional fields are returned under `data`:
* `corrected`: model predictions by following the adversarial detection and correction procedure.
* `no_defense`: model predictions without the adversarial correction.
* `defense`: model predictions where each instance is corrected by the defense, regardless of the adversarial score.
```python
preds_correct = ad.correct(X, batch_size=64, return_instance_score=True)
```
## Examples
### Image
[Adversarial detection on CIFAR10](../../examples/ad_ae_cifar10.nblink)
| true |
code
| 0.929376 | null | null | null | null |
|
# **Testing for Stuctural Breaks in Time Series Data with a Chow Test**
## **I. Introduction**
I've written a bit on forecasting future stock prices and distributions of future stock prices. I'm proud of the models I built for those articles, but they will eventually be no more predictive than a monkey throwing darts at a board. Perhaps they'll perform worse.
This will happen because the underlying system, of which we are modeling an aspect, will change. For an extreme example, a company whose stock we are trying to model goes out of business. The time series just ends. For a more subtle example, let's look at the relationship between oil prices and dollar exchange rates.
I took historical real USD exchange rates measured against a broad basket of currencies and oil prices (WTI) going from January 1986 to February 2019 and indexed them to January 2000. I then took the natural logarithm of each, because this would give us the growth rate if we differenced the data and is a common transformation with time series data (and for dealing with skewed variables in non-time series analysis).
As you can see, they appear inversely related over time. When one goes up, the other goes down. This makes sense because when people outside the US want to buy oil, they often need to use USD for the transaction. Oil prices rise and they need to exchange more of their domestic currency to buy the same amount. This in turn strengthens the dollar and the exchange rate goes down as demand for USD increases and supply of foreign currencies increase. (An exchange rate of 1 means it takes 1 USD to buy 1 unit of foreign currency. If it is 2, it takes 2 USD to buy 1 unit of foreign currency. If it is 0.5, 1 USD buys 2 units of the foreign currency).
But, does the inverse relationship remain constant over time? Are there periods where a movement in one corresponds to a larger movement in the other relative to other times? This type of change in the relationship between oil prices and USD exchange rates could occur for a variety of reasons. For example, a major currency crisis across a region driving up demand for safe USD, while reducing demand for oil as the economy weakens. Perhaps a bunch of currencies disappear and one major one forms as the countries join a monetary union, like the EU.
```
# for linear algebra and mathematical functions
import numpy as np
# for dataframe manipulation
import pandas as pd
# for data visualization
import matplotlib.pyplot as plt
# for setting plot size
import matplotlib as mpl
# for changing the plot size in the Jupyter Notebook output
%matplotlib inline
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# reads in data on historical oil prices and dollar exchange rates
full_data = pd.read_csv('Oil Data.csv')
# generates a variable for the growth rate of the Real Trade Weighted U.S. Dollar Index:
# Broad, Goods indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['TWEXBPA'].values)
full_data['broad_r'] = list(full_data.TWEXBPA / index_value)
full_data['ebroad_r'] = np.log(full_data.broad_r)
# generates a variable for the growth rate of the Real Trade Weighted U.S. Dollar Index:
# Major Currencies, Goods indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['TWEXMPA'].values)
full_data['major_r'] = list(full_data.TWEXMPA / index_value)
full_data['emajor_r'] = np.log(full_data.major_r)
# generates a variable for the growth rate of the Real Trade Weighted U.S. Dollar Index:
# Other Important Trading Partners, Goods indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['TWEXOPA'].values)
full_data['oitp_r'] = list(full_data.TWEXOPA / index_value)
full_data['eoitp_r'] = np.log(full_data.oitp_r)
# generates a variable for the growth rate of Crude Oil Prices: West Texas Intermediate
# (WTI) - Cushing, Oklahoma indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['MCOILWTICO'].values)
# adjusts for inflation prior to indexing to January 2000
full_data['po_r'] = full_data.MCOILWTICO / (full_data.Fred_CPIAUCNS / 100) / index_value
full_data['epo_r'] = np.log(full_data.po_r)
# creates a column for month
full_data.Date = pd.to_datetime(full_data.Date)
full_data['month'] = full_data.Date.map(lambda x: x.month)
# creates a list of all the variables of interest
variables_to_keep = ['epo_r', 'Date', 'month', 'ebroad_r', 'emajor_r', 'eoitp_r']
# creates a new dataframe containing only the variables of interest
my_data = full_data[variables_to_keep]
# creates dummy variables for each month, dropping January to avoid multicollinearity
my_data = pd.concat([my_data, pd.get_dummies(my_data.month, drop_first = True)], axis = 1)
# sets the Date as the index
my_data.index = pd.DatetimeIndex(my_data.Date)
# drops these columns for a tidy data set
my_data = my_data.drop(['month', 'Date'], axis = 1)
# the code below plots the real oil price growth rate with the USD vs Broad Currency Basket
# exchange growth rate
# Create some mock data
time = my_data.index
epo_r = my_data.epo_r
ebroad_r = my_data.ebroad_r
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Date (Monthly)')
ax1.set_ylabel('Natural Log of Oil Prices', color = color)
ax1.plot(time, epo_r, color=color)
ax1.tick_params(axis = 'y', labelcolor = color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Natural Log of USD Exchange Rate vs. Broad Currency Basket',
color = color) # we already handled the x-label with ax1
ax2.plot(time, ebroad_r, color = color)
ax2.tick_params(axis = 'y', labelcolor = color)
plt.title('Natural Log of Oil Prices and USD Exchange Rates indexed to January 2000')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
```
## **II. Detecting a Suspected Break at a Known Date: The Chow Test**
The Chow Test tests if the true coefficients in two different regression models are equal. The null hypothesis is they are equal and the alternative hypothesis is they are not. Another way of saying this is that the null hypothesis is the model before the possible break point is the same as the model after the possible break point. The alternative hypothesis is the model fitting each periods are different.
It formally tests this by performing an F-test on the Chow Statistic which is (RSS_pooled - (RSS1 + RSS2))/(number of independent variables plus 1 for the constant) divided by (RSS1 + RSS2)/(Number of observations in subsample 1 + Number of observations in subsample 2 - 2*(number of independent variables plus 1 for the constant).
The models in each of the models (pooled, 1, 2) must have normally distributed error with mean 0, as well as independent and identically distributed errors, to satisfy the Gauss-Markov assumptions.
I use the Chow test to test for a structural break at the introduction of the Euro in January 1999. This seems a reasonable possible structural break, because a handful of major currencies, and some minor ones, disappeared and a new very important currency was created. The creation of the Euro certainly qualifies as a major shock to currency markets and perhaps to the oil vs. dollar relationship.
```
#imports the chowtest package as ct, which is written by yours truly
import chowtest as ct
```
Translating the independently and identically distributed residual requirement to English translates as constant mean and variance without serial correlation in the residuals. To test for this, I tested for auto-correlation and heteroskedasticity in my residuals. I did the same tests on their growth rates (the difference in natural logs). I also plotted the residuals and estimated their mean.
The auto-correlation function plots strongly suggest that the residuals from the simple OLS model have strong auto-correlation, while the residuals from the OLS of the growth rates are not auto-correlated.
```
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# to reduce typing, I saved ebroad_r as X and epo_r as y
X = pd.DataFrame(my_data[['ebroad_r']])
y = pd.DataFrame(my_data.epo_r)
# to reduce typing, I saved the differenced ebroad_r as X_diff and epo_r as y_diff
X_diff = X.diff().dropna()
y_diff = y.diff().dropna()
# saves the residuals from the undifferenced X and y OLS model
un_diffed_resids = ct.linear_residuals(X, y).residuals
# saves the residuals from the differenced X and y OLS model
diffed_resids = ct.linear_residuals(X_diff, y_diff).residuals
# plots the ACF for the residuals of the OLS regression of epo_r on ebroad_r
pd.plotting.autocorrelation_plot(un_diffed_resids)
plt.show()
# plots the ACF for the residuals of the OLS regression of the differenced epo_r on
# differenced ebroad_r
pd.plotting.autocorrelation_plot(diffed_resids)
plt.show()
```
The Breusch-Pagan Test shows that heteroskedasticity is present in the OLS model. It is also present in the model of growth rates, but is much less severe.
```
from statsmodels.stats.diagnostic import het_breuschpagan
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the OLS model: ' +
str(het_breuschpagan(un_diffed_resids, X)[2]))
print('p-value for the Breusch-Pagan F-Test the OLS model: ' +
str(het_breuschpagan(un_diffed_resids, X)[3]))
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the growth rate OLS model: ' +
str(het_breuschpagan(diffed_resids, X_diff)[2]))
print('p-value for the Breusch-Pagan R-Test the growth rate OLS model: ' +
str(het_breuschpagan(diffed_resids, X_diff)[3]))
```
The histograms of residuals show a bell-curve shape to the residuals of the OLS model looking at growth rates. The histogram of residuals for the regular OLS model show a possibly double-humped shape.
```
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# plots the histogram of residuals
plt.hist(un_diffed_resids)
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# plots the histogram of residuals
plt.hist(diffed_resids)
```
The normality tests for the residuals from each model are both failures.
```
# imports the normality test from scipy.stats
from scipy.stats import normaltest
# performs the normality test on the residuals from the non-differenced OLS model
print(normaltest(un_diffed_resids))
# performs the normality test on the residuals from the differenced OLS model
print(normaltest(diffed_resids))
```
Despite failing the normality tests, the mean of the residuals of both models are essentially 0. The model of growth rates has residuals that are are independently distributed and bell-shaped based on the ACF plot, even though there is evidence of heteroskedasticity at the 0.05 significance level. For these reasons, I will proceed with my analysis using the growth rate model and assume my Chow Test result will be robust to the non-normality of residuals.
```
print('Mean of OLS residuals: ' + str(np.mean(un_diffed_resids)))
print('Mean of OLS model of growth rate residuals: ' + str(np.mean(diffed_resids)))
```
I come to the same conclusion for the models estimating before and after the split dates and proceed with the Chow Test.
```
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# creates split dates for our sample period
stop = '1999-01-01'
start = '1999-02-01'
# plots the ACF for the residuals of the OLS regression of the differenced epo_r on
# differenced ebroad_r
pd.plotting.autocorrelation_plot(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals)
plt.show()
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals,
X_diff.loc[:stop])[2]))
print('p-value for the Breusch-Pagan F-Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals,
X_diff.loc[:stop])[3]))
print('Mean of OLS model of growth rate residuals pre-Euro: ' +
str(np.mean(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals)))
# plots the histogram of residuals
plt.hist(ct.linear_residuals(X_diff.loc[:stop], y_diff.loc[:stop]).residuals)
plt.show
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# plots the ACF for the residuals of the OLS regression of the differenced epo_r on
# differenced ebroad_r
pd.plotting.autocorrelation_plot(ct.linear_residuals(X_diff[start:],
y_diff[start:]).residuals)
plt.show()
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[start:],
y_diff.loc[start:]).residuals,
X_diff.loc[start:])[2]))
print('p-value for the Breusch-Pagan F-Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[start:],
y_diff.loc[start:]).residuals,
X_diff.loc[start:])[3]))
print('Mean of OLS model of growth rate residuals pre-Euro: ' +
str(np.mean(ct.linear_residuals(X_diff.loc[start:],
y_diff.loc[start:]).residuals)))
# plots the histogram of residuals
plt.hist(ct.linear_residuals(X_diff.loc[start:], y_diff.loc[start:]).residuals)
```
The result of the Chow Test is a Chow Test statistic of about 4.24 tested against an F-distribution with 2 and 394 degrees of freedom. The p-value is about 0.0009, meaning if the models before and after the split date are actually the same and we did an infinite number of trials, 0.09% of our results would show this level of difference in the models due to sampling error. It is safe to say that the model of real oil price and dollar exchange growth rates is different pre-Euro and post-Euro introduction.
```
# performs the Chow Test
ct.ChowTest(X.diff().dropna(), y.diff().dropna(), stop, start)
```
| true |
code
| 0.665601 | null | null | null | null |
|
# Misc tests used for evaluating how well RSSI translates to distance
Note - this notebook still needs to be cleaned. We include it here so this work won't be lost
```
%matplotlib inline
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
onemeter_file_path = '../data/rssi_distance/expt4/expt_07_11_'
data = pd.read_csv(onemeter_file_path+'1m.dat', skiprows=1, skipfooter=1, parse_dates=True, sep=' ', header=None, names=['MAC', 'RSSI', 'TIME'])
macs = ['D9:CB:6E:1F:48:82,', 'F2:50:9D:7E:C8:0C,']
signal_strength_1m = []
for mac in macs:
signal_strength_1m.append(pd.rolling_mean(data[data['MAC'] == mac]['RSSI'], 5, min_periods=4))
# Get a threshold value of signal strength for inide and outside the case based on an accuracy level
def getThresholds(accuracy, arr_vals):
result = []
for i in xrange(len(arr_vals)):
sample_num = int((1-accuracy)*len(arr_vals[i]))
result.append(sorted(arr_vals[i])[sample_num])
return result
for badge in signal_strength_1m:
badge.plot(kind='hist', alpha=0.5)
plt.xlabel('Signal Strength')
plt.ylabel('Count')
plt.title('Distribution of signal values for 1 meter distance')
plt.legend(macs, loc='upper right')
[out_90, in_90] = getThresholds(0.9, [signal_strength_1m[0], signal_strength_1m[1]])
plt.axvline(x=out_90, linewidth=2.0, color='r')
plt.axvline(x=in_90, linewidth=2.0, color='r')
# D9 is outside the case and F2 is inside ... menttion on the graph
def getDistance(rssi, n, a):
return 10**((a-rssi)/(10*n))
```
### Compare the obtained constants against experiment conducted with multiple badges
### Combine signal distribution from multiple badge experiment as follows:
### Focus only on the 2 badges facing front in the top two levels (4 badges in total)
### Combine the data from 3 badges outside the case and treat the one outside the case separately
### Plot the distribution for the two groups of badges from the experiment and the function for distance using both these constants
```
multibadge_file_path = '../data/rssi_distance/expt6/expt_07_11_'
raw_data = []
for i in xrange(2,9,2):
raw_data.append(pd.read_csv(multibadge_file_path+str(i)+'f.dat', header=None, sep=' ', skiprows=1, skipfooter=1, parse_dates=True, names=['MAC', 'RSSI', 'TIME']))
macs = [['F2:50:9D:7E:C8:0C,'], ['D9:CB:6E:1F:48:82,', 'CD:A3:F0:C5:68:73,', 'D2:67:85:48:D5:EF,']]
for i,distance in enumerate(raw_data):
vals = []
for mac in macs:
temp = distance[distance['MAC'].isin(mac)]['RSSI']
vals.append(pd.rolling_mean(temp, 5, min_periods=4))
[inside, outside] = getThresholds(0.9, [vals[0], vals[1]])
plt.figure(figsize=(10,7))
for j,val in enumerate(vals):
val.plot(kind='hist', alpha=0.5)
plt.xlabel('Signal Strength')
plt.ylabel('Count')
plt.title('Signal Strength Distribution For ' + str(2*(i+1)) + ' ft')
plt.axvline(x=outside, linewidth=2.0, color='r', label='outside')
plt.axvline(x=inside, linewidth=2.0, color='purple', label='inside')
plt.legend(['outside', 'inside'], loc='upper left')
signal_level = range(-70,-50)
[outside, inside] = getThresholds(0.9, [signal_strength_1m[0], signal_strength_1m[1]])
distances = [[getDistance(level, 2.4, A)*3.33 for level in signal_level] for A in [outside, inside]]
for i in xrange(len(distances)):
plt.plot(signal_level, distances[i], linewidth=2.0)
plt.xlabel('Signal strength (dB)')
plt.ylabel('Distance (feet)')
plt.title("Variation of distance with RSSI value for different 'n'")
plt.legend(['outside', 'inside'], loc='upper right')
labels = ['inside', 'outside']
for i,mac in enumerate(macs):
vals = []
for distance in raw_data:
temp = distance[distance['MAC'].isin(mac)]['RSSI']
vals.append(pd.rolling_mean(temp, 5, min_periods=4))
thresholds = getThresholds(0.9, vals)
plt.figure(figsize=(10,7))
for j,val in enumerate(vals):
val.plot(kind='hist', alpha=0.5)
plt.axvline(x=thresholds[j], linewidth=2.0, color='red')
plt.xlabel('Signal Strength')
plt.ylabel('Count')
plt.title('Signal Strength Distribution For Groups of badges across 2ft-8ft : ' + labels[i])
```
## Analysis for experiment with all badges inside the case and the receiver outside the case
```
cased_badges_file_path = '../data/rssi_distance/expt7/expt_07_29_'
raw_data = []
for i in xrange(2,11,2):
raw_data.append(pd.read_csv(cased_badges_file_path+str(i)+'f.dat', header=None, sep=' ', skiprows=1, skipfooter=1, parse_dates=True, names=['MAC', 'RSSI', 'TIME']))
vals = []
for i,distance in enumerate(raw_data):
temp = distance['RSSI']
vals.append(pd.rolling_mean(temp, 5, min_periods=4))
thresholds = getThresholds(0.9, vals)
plt.figure(figsize=(10,7))
for j,val in enumerate(vals):
val.plot(kind='hist', alpha=0.5)
plt.axvline(x=thresholds[j], linewidth=2.0, color='red', label=str(j))
plt.xlabel('Signal Strength')
plt.ylabel('Count')
plt.title('Signal Strength Distribution For 4 Badges In a Case (Receiver Outside) across 2ft-10ft : ')
plt.legend(['t1', 't2', 't3', 't4', 't5','2ft', '4ft', '6ft', '8ft', '10ft'], loc='upper left')
```
## Analysis For Badges and Receiver Inside the Case
```
receiver_inside_file_path = '../data/rssi_distance/expt8/expt_07_29_'
raw_data = []
for i in xrange(2,11,2):
raw_data.append(pd.read_csv(receiver_inside_file_path+str(i)+'f.dat', header=None, sep=' ', skiprows=1, skipfooter=1, parse_dates=True, names=['MAC', 'RSSI', 'TIME']))
vals = []
for i,distance in enumerate(raw_data):
temp = distance['RSSI']
vals.append(pd.rolling_mean(temp, 5, min_periods=4))
thresholds = getThresholds(0.9, vals)
plt.figure(figsize=(10,7))
for j,val in enumerate(vals):
val.plot(kind='hist', alpha=0.5)
plt.axvline(x=thresholds[j], linewidth=2.0, color='red', label=str(j))
plt.xlabel('Signal Strength')
plt.ylabel('Count')
plt.title('Signal Strength Distribution For 4 Badges In a Case (Receiver Inside) across 2ft-10ft : ')
plt.legend(['t1', 't2', 't3', 't4', 't5','2ft', '4ft', '6ft', '8ft', '10ft'], loc='upper left')
```
| true |
code
| 0.396477 | null | null | null | null |
|
# Displacement controlled normal contact
***
In this notebook we will make a contact model which solves a normal contact problem with a specified displacement.
For normal contact problems with specified loads see the 'recreating the hertz solition numerically' example.
Here again we will use the hertz solution as an easy way to verify that we are getting sensible results.
First lets import everything we will need (no one actually writes these first it's just convention to put them at the top of the file)
```
%matplotlib inline
import slippy.surface as s # surface generation and manipulation
import slippy.contact as c # contact modelling
import numpy as np # numerical functions
import matplotlib.pyplot as plt # plotting
```
## Making the surfaces
In order to solve the problem the geometry must be set. As we are solving the hertz problem we will use an analytically defined round surface and an analytically defined flat surface. For your own analyses the geometry data can come from experimental data or be generated randomly. For more information on this see examples about the surface class and the analytical surfaces.
Importantly at least one of the surfaces must be discrete to be used in a contact model.
```
flat_surface = s.FlatSurface(shift=(0, 0))
round_surface = s.RoundSurface((1, 1, 1), extent=(0.006, 0.006),
shape=(255, 255), generate=True)
```
## Setting the materials
The material for each surface must also be set. This material controls the deformation behaviour of the surface, the fricition and wear behaviour must be set separately. Please see tutorials on adding sub models to the step for more information.
```
# set materials
steel = c.Elastic('Steel', {'E': 200e9, 'v': 0.3})
aluminum = c.Elastic('Aluminum', {'E': 70e9, 'v': 0.33})
flat_surface.material = aluminum
round_surface.material = steel
```
## Making a contact model
Now we have surfaces and materials, but we need a contact model to hold this information and control the solving of model steps, this will become more important when we have multiple steps but for now think of the contact model as a container that all of the information is put in.
```
# create model
my_model = c.ContactModel('model-1', round_surface, flat_surface)
```
## Making a model step and adding it to the model
A step defines what happens to the surfaces during a set period. Here we will add a step that sets the interferance between the surfaces from the point of first touching. The resulting loads on the surfaces and deflections at each point on the surface will be found.
This can then be combined with ther sub models to specify friction or wear behaviour etc. however in this example we will simply compare the results back to the hertz solution. In order to do this we will use the analytical hertz solver to generate a sensible interferance.
```
# Find the analytical result
analy_result = c.hertz_full([1, 1], [np.inf, np.inf], [200e9, 70e9], [0.3, 0.33], 100)
# Make the model step
my_step = c.StaticStep(step_name='This is the step name',
interference=analy_result['total_deflection'])
# Add the step to the model
my_model.add_step(my_step)
```
## Model steps
The steps of the model are stored in the steps property of the model, this is an ordered dictionary, with the keys being the same as the step names. To retrieve a step you can index thie dictionary with the step name. However, if you try to add two steps with the same name the first will be overwritten.
```
my_model.steps
```
## Solving the model
The entire model can then be solved using the solve method of the contact model. This will run through all the steps in order and return the model state at the end of the last step. Other information can be saved using output requests, but as we only have one step there is no need for this.
Before running, by default the model will data check it's self, this action checks that each step and sub model can be solved with the information from the current state. It dosen't check for numerical stabiltiy or accuracy. This can be skipped if necessary.
```
final_state = my_model.solve()
```
## Checking the model against the analytical result
Now lets check the model results against the analytical result from the hertz solution. Althoug this particular step directly sets the interferance, most steps in slippy are solved iteratively, so it is a good idea to check that the set parameter converged to the desired value:
```
print('Solution converged at: ', final_state['interference'], ' interferance')
print('Set interferance was:', analy_result['total_deflection'])
```
Lets check the maximum pressure, contact area and total load all match up with the analytical values:
```
print('Analytical total load: ', 100)
print('Numerical total load: ',
round_surface.grid_spacing**2*sum(final_state['loads_z'].flatten()))
print('Analytical max pressure: ', analy_result['max_pressure'])
print('Numerical max pressure: ', max(final_state['loads_z'].flatten()))
print('Analytical contact area: ', analy_result['contact_area'])
print('Numerical contact area: ',
round_surface.grid_spacing**2* sum(final_state['contact_nodes'].flatten()))
```
## Checking the form of the result
We can also check that the individual surface loads line up with the analytical solution:
```
fig, axes = plt.subplots(1, 3, figsize=(20, 4))
X,Y = round_surface.get_points_from_extent()
X,Y = X-X[-1,-1]/2 , Y-Y[-1,-1]/2
Z_n = final_state['loads_z']
axes[0].imshow(Z_n)
axes[0].set_title('Numerical Result')
R = np.sqrt(X**2+Y**2)
Z_a = analy_result['pressure_f'](R)
axes[1].imshow(Z_a)
axes[1].set_title('Analytical Result')
axes[2].imshow(np.abs(Z_a-Z_n))
for im in axes[2].get_images():
im.set_clim(0, analy_result['max_pressure'])
_ = axes[2].set_title('Absolute Error')
```
## Other items in the state dict
Other items can be listed from the state dict by the following code:
```
print(list(final_state.keys()))
```
* 'just_touching_gap' The gap between the surface at the point when they first touch
* 'surface_1_points' The points of the first surface which are in the solution domain
* 'surface_2_points' The points of the second surface which are in the solution domain
* 'time' The current modelled time which is 0 at the start of the model. This is used for submodels which introdce time dependent behaviour
* 'time_step' The current time step, again used for submodels
* 'new_step' Ture if this is the first substep in a model step
* 'off_set' The tangential displacement between the surfaces at the end of the step
* 'loads_z' The loads acting on each point of the surface
* 'total_displacement_z' The total displacement of each point of the surface pair
* 'surface_1_displacement_z' The displacement of each point on surface 1
* 'surface_2_displacement_z' The displacement of each point on surface 2
* 'contact_nodes' A boolean array showing which nodes are in contact, to find the percentage in contact simply take the mean of this array
* 'total_normal_load' The total load pressing the surfaces together
* 'interference' The distance the surfaces have pushed into eachother from the point of first touching
* 'converged' True if the step converged to a soltuion
* 'gap' The gap between the surfaces when loaded, including deformation
```
# getting the deformed gap between the surfaces
def_gap = (final_state['gap'])
plt.imshow(def_gap)
```
# Saving outputs
You can also save outputs from the model by using an output request, but this is not needed for single step models
| true |
code
| 0.637821 | null | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.