path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
chapters/chapter_4/4_2_mlp_surnames/4_2_Classifying_Surnames_with_an_MLP.ipynb | ###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import notebook
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp/vectorizer.json
model_storage/ch4/surname_mlp/model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.7435305690765381;
Test Accuracy: 47.875
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
McMahan -> Irish (p=0.55)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
How many of the top predictions to see? 5
Top 5 predictions:
===================
McMahan -> Irish (p=0.55)
McMahan -> Scottish (p=0.21)
McMahan -> Czech (p=0.05)
McMahan -> German (p=0.04)
McMahan -> English (p=0.03)
###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp\vectorizer.json
model_storage/ch4/surname_mlp\model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.8188700580596926;
Test Accuracy: 46.74999999999999
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: pablo
pablo -> Spanish (p=0.28)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: pablo
How many of the top predictions to see? 5
Top 5 predictions:
===================
pablo -> Spanish (p=0.28)
pablo -> Italian (p=0.22)
pablo -> French (p=0.10)
pablo -> Portuguese (p=0.09)
pablo -> Greek (p=0.09)
###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv = "D:\\DeepLearn\\NLPBook\\data\\surnames\\surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp\vectorizer.json
model_storage/ch4/surname_mlp\model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.7435305690765381;
Test Accuracy: 47.875
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: nathan
nathan -> Russian (p=0.14)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: nathan
How many of the top predictions to see? 4
Top 4 predictions:
===================
nathan -> Russian (p=0.14)
nathan -> English (p=0.14)
nathan -> Irish (p=0.12)
nathan -> Arabic (p=0.12)
###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
from google.colab import drive
drive.mount('/content/drive')
%cd drive/MyDrive/CSC-project/PyTorchNLPBook/
!pip install -r requirements.txt
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp/vectorizer.json
model_storage/ch4/surname_mlp/model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.7435305690765381;
Test Accuracy: 47.875
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
McMahan -> Irish (p=0.55)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
How many of the top predictions to see? 5
Top 5 predictions:
===================
McMahan -> Irish (p=0.55)
McMahan -> Scottish (p=0.21)
McMahan -> Czech (p=0.05)
McMahan -> German (p=0.04)
McMahan -> English (p=0.03)
###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp/vectorizer.json
model_storage/ch4/surname_mlp/model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.7435305690765381;
Test Accuracy: 47.875
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
McMahan -> Irish (p=0.55)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
How many of the top predictions to see? 5
Top 5 predictions:
===================
McMahan -> Irish (p=0.55)
McMahan -> Scottish (p=0.21)
McMahan -> Czech (p=0.05)
McMahan -> German (p=0.04)
McMahan -> English (p=0.03)
###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
# prediction_vector = self.fc2(F.dropout(intermediate_vector, p=0.5))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp\vectorizer.json
model_storage/ch4/surname_mlp\model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.8800991010665893;
Test Accuracy: 43.0625
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: borg
borg -> Portuguese (p=0.17)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: borg
How many of the top predictions to see? 5
Top 5 predictions:
===================
borg -> Portuguese (p=0.17)
borg -> German (p=0.16)
borg -> French (p=0.11)
borg -> English (p=0.11)
borg -> Italian (p=0.11)
###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp/vectorizer.json
model_storage/ch4/surname_mlp/model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.7435305690765381;
Test Accuracy: 47.875
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
McMahan -> Irish (p=0.55)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
How many of the top predictions to see? 5
Top 5 predictions:
===================
McMahan -> Irish (p=0.55)
McMahan -> Scottish (p=0.21)
McMahan -> Czech (p=0.05)
McMahan -> German (p=0.04)
McMahan -> English (p=0.03)
###Markdown
Classifying Surnames with a Multilayer Perceptron Imports
###Code
from argparse import Namespace
from collections import Counter
import json
import os
import string
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm_notebook
###Output
_____no_output_____
###Markdown
Data Vectorization classes The Vocabulary
###Code
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None, add_unk=True, unk_token="<UNK>"):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
add_unk (bool): a flag that indicates whether to add the UNK token
unk_token (str): the UNK token to add into the Vocabulary
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
self._add_unk = add_unk
self._unk_token = unk_token
self.unk_index = -1
if add_unk:
self.unk_index = self.add_token(unk_token)
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx,
'add_unk': self._add_unk,
'unk_token': self._unk_token}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
try:
index = self._token_to_idx[token]
except KeyError:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
###Output
_____no_output_____
###Markdown
The Vectorizer
###Code
class SurnameVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, surname_vocab, nationality_vocab):
"""
Args:
surname_vocab (Vocabulary): maps characters to integers
nationality_vocab (Vocabulary): maps nationalities to integers
"""
self.surname_vocab = surname_vocab
self.nationality_vocab = nationality_vocab
def vectorize(self, surname):
"""
Args:
surname (str): the surname
Returns:
one_hot (np.ndarray): a collapsed one-hot encoding
"""
vocab = self.surname_vocab
one_hot = np.zeros(len(vocab), dtype=np.float32)
for token in surname:
one_hot[vocab.lookup_token(token)] = 1
return one_hot
@classmethod
def from_dataframe(cls, surname_df):
"""Instantiate the vectorizer from the dataset dataframe
Args:
surname_df (pandas.DataFrame): the surnames dataset
Returns:
an instance of the SurnameVectorizer
"""
surname_vocab = Vocabulary(unk_token="@")
nationality_vocab = Vocabulary(add_unk=False)
for index, row in surname_df.iterrows():
for letter in row.surname:
surname_vocab.add_token(letter)
nationality_vocab.add_token(row.nationality)
return cls(surname_vocab, nationality_vocab)
@classmethod
def from_serializable(cls, contents):
surname_vocab = Vocabulary.from_serializable(contents['surname_vocab'])
nationality_vocab = Vocabulary.from_serializable(contents['nationality_vocab'])
return cls(surname_vocab=surname_vocab, nationality_vocab=nationality_vocab)
def to_serializable(self):
return {'surname_vocab': self.surname_vocab.to_serializable(),
'nationality_vocab': self.nationality_vocab.to_serializable()}
###Output
_____no_output_____
###Markdown
The Dataset
###Code
class SurnameDataset(Dataset):
def __init__(self, surname_df, vectorizer):
"""
Args:
surname_df (pandas.DataFrame): the dataset
vectorizer (SurnameVectorizer): vectorizer instatiated from dataset
"""
self.surname_df = surname_df
self._vectorizer = vectorizer
self.train_df = self.surname_df[self.surname_df.split=='train']
self.train_size = len(self.train_df)
self.val_df = self.surname_df[self.surname_df.split=='val']
self.validation_size = len(self.val_df)
self.test_df = self.surname_df[self.surname_df.split=='test']
self.test_size = len(self.test_df)
self._lookup_dict = {'train': (self.train_df, self.train_size),
'val': (self.val_df, self.validation_size),
'test': (self.test_df, self.test_size)}
self.set_split('train')
# Class weights
class_counts = surname_df.nationality.value_counts().to_dict()
def sort_key(item):
return self._vectorizer.nationality_vocab.lookup_token(item[0])
sorted_counts = sorted(class_counts.items(), key=sort_key)
frequencies = [count for _, count in sorted_counts]
self.class_weights = 1.0 / torch.tensor(frequencies, dtype=torch.float32)
@classmethod
def load_dataset_and_make_vectorizer(cls, surname_csv):
"""Load dataset and make a new vectorizer from scratch
Args:
surname_csv (str): location of the dataset
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
train_surname_df = surname_df[surname_df.split=='train']
return cls(surname_df, SurnameVectorizer.from_dataframe(train_surname_df))
@classmethod
def load_dataset_and_load_vectorizer(cls, surname_csv, vectorizer_filepath):
"""Load dataset and the corresponding vectorizer.
Used in the case in the vectorizer has been cached for re-use
Args:
surname_csv (str): location of the dataset
vectorizer_filepath (str): location of the saved vectorizer
Returns:
an instance of SurnameDataset
"""
surname_df = pd.read_csv(surname_csv)
vectorizer = cls.load_vectorizer_only(vectorizer_filepath)
return cls(surname_df, vectorizer)
@staticmethod
def load_vectorizer_only(vectorizer_filepath):
"""a static method for loading the vectorizer from file
Args:
vectorizer_filepath (str): the location of the serialized vectorizer
Returns:
an instance of SurnameVectorizer
"""
with open(vectorizer_filepath) as fp:
return SurnameVectorizer.from_serializable(json.load(fp))
def save_vectorizer(self, vectorizer_filepath):
"""saves the vectorizer to disk using json
Args:
vectorizer_filepath (str): the location to save the vectorizer
"""
with open(vectorizer_filepath, "w") as fp:
json.dump(self._vectorizer.to_serializable(), fp)
def get_vectorizer(self):
""" returns the vectorizer """
return self._vectorizer
def set_split(self, split="train"):
""" selects the splits in the dataset using a column in the dataframe """
self._target_split = split
self._target_df, self._target_size = self._lookup_dict[split]
def __len__(self):
return self._target_size
def __getitem__(self, index):
"""the primary entry point method for PyTorch datasets
Args:
index (int): the index to the data point
Returns:
a dictionary holding the data point's:
features (x_surname)
label (y_nationality)
"""
row = self._target_df.iloc[index]
surname_vector = \
self._vectorizer.vectorize(row.surname)
nationality_index = \
self._vectorizer.nationality_vocab.lookup_token(row.nationality)
return {'x_surname': surname_vector,
'y_nationality': nationality_index}
def get_num_batches(self, batch_size):
"""Given a batch size, return the number of batches in the dataset
Args:
batch_size (int)
Returns:
number of batches in the dataset
"""
return len(self) // batch_size
def generate_batches(dataset, batch_size, shuffle=True,
drop_last=True, device="cpu"):
"""
A generator function which wraps the PyTorch DataLoader. It will
ensure each tensor is on the write device location.
"""
dataloader = DataLoader(dataset=dataset, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
for data_dict in dataloader:
out_data_dict = {}
for name, tensor in data_dict.items():
out_data_dict[name] = data_dict[name].to(device)
yield out_data_dict
###Output
_____no_output_____
###Markdown
The Model: SurnameClassifier
###Code
class SurnameClassifier(nn.Module):
""" A 2-layer Multilayer Perceptron for classifying surnames """
def __init__(self, input_dim, hidden_dim, output_dim):
"""
Args:
input_dim (int): the size of the input vectors
hidden_dim (int): the output size of the first Linear layer
output_dim (int): the output size of the second Linear layer
"""
super(SurnameClassifier, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, output_dim)
def forward(self, x_in, apply_softmax=False):
"""The forward pass of the classifier
Args:
x_in (torch.Tensor): an input data tensor.
x_in.shape should be (batch, input_dim)
apply_softmax (bool): a flag for the softmax activation
should be false if used with the Cross Entropy losses
Returns:
the resulting tensor. tensor.shape should be (batch, output_dim)
"""
intermediate_vector = F.relu(self.fc1(x_in))
prediction_vector = self.fc2(intermediate_vector)
if apply_softmax:
prediction_vector = F.softmax(prediction_vector, dim=1)
return prediction_vector
###Output
_____no_output_____
###Markdown
Training Routine Helper functions
###Code
def make_train_state(args):
return {'stop_early': False,
'early_stopping_step': 0,
'early_stopping_best_val': 1e8,
'learning_rate': args.learning_rate,
'epoch_index': 0,
'train_loss': [],
'train_acc': [],
'val_loss': [],
'val_acc': [],
'test_loss': -1,
'test_acc': -1,
'model_filename': args.model_state_file}
def update_train_state(args, model, train_state):
"""Handle the training state updates.
Components:
- Early Stopping: Prevent overfitting.
- Model Checkpoint: Model is saved if the model is better
:param args: main arguments
:param model: model to train
:param train_state: a dictionary representing the training state values
:returns:
a new train_state
"""
# Save one model at least
if train_state['epoch_index'] == 0:
torch.save(model.state_dict(), train_state['model_filename'])
train_state['stop_early'] = False
# Save model if performance improved
elif train_state['epoch_index'] >= 1:
loss_tm1, loss_t = train_state['val_loss'][-2:]
# If loss worsened
if loss_t >= train_state['early_stopping_best_val']:
# Update step
train_state['early_stopping_step'] += 1
# Loss decreased
else:
# Save the best model
if loss_t < train_state['early_stopping_best_val']:
torch.save(model.state_dict(), train_state['model_filename'])
# Reset early stopping step
train_state['early_stopping_step'] = 0
# Stop early ?
train_state['stop_early'] = \
train_state['early_stopping_step'] >= args.early_stopping_criteria
return train_state
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
###Output
_____no_output_____
###Markdown
general utilities
###Code
def set_seed_everywhere(seed, cuda):
np.random.seed(seed)
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed_all(seed)
def handle_dirs(dirpath):
if not os.path.exists(dirpath):
os.makedirs(dirpath)
###Output
_____no_output_____
###Markdown
Settings and some prep work
###Code
args = Namespace(
# Data and path information
surname_csv="data/surnames/surnames_with_splits.csv",
vectorizer_file="vectorizer.json",
model_state_file="model.pth",
save_dir="model_storage/ch4/surname_mlp",
# Model hyper parameters
hidden_dim=300,
# Training hyper parameters
seed=1337,
num_epochs=100,
early_stopping_criteria=5,
learning_rate=0.001,
batch_size=64,
# Runtime options
cuda=False,
reload_from_files=False,
expand_filepaths_to_save_dir=True,
)
if args.expand_filepaths_to_save_dir:
args.vectorizer_file = os.path.join(args.save_dir,
args.vectorizer_file)
args.model_state_file = os.path.join(args.save_dir,
args.model_state_file)
print("Expanded filepaths: ")
print("\t{}".format(args.vectorizer_file))
print("\t{}".format(args.model_state_file))
# Check CUDA
if not torch.cuda.is_available():
args.cuda = False
args.device = torch.device("cuda" if args.cuda else "cpu")
print("Using CUDA: {}".format(args.cuda))
# Set seed for reproducibility
set_seed_everywhere(args.seed, args.cuda)
# handle dirs
handle_dirs(args.save_dir)
###Output
Expanded filepaths:
model_storage/ch4/surname_mlp/vectorizer.json
model_storage/ch4/surname_mlp/model.pth
Using CUDA: False
###Markdown
Initializations
###Code
if args.reload_from_files:
# training from a checkpoint
print("Reloading!")
dataset = SurnameDataset.load_dataset_and_load_vectorizer(args.surname_csv,
args.vectorizer_file)
else:
# create dataset and vectorizer
print("Creating fresh!")
dataset = SurnameDataset.load_dataset_and_make_vectorizer(args.surname_csv)
dataset.save_vectorizer(args.vectorizer_file)
vectorizer = dataset.get_vectorizer()
classifier = SurnameClassifier(input_dim=len(vectorizer.surname_vocab),
hidden_dim=args.hidden_dim,
output_dim=len(vectorizer.nationality_vocab))
###Output
Creating fresh!
###Markdown
Training loop
###Code
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
optimizer = optim.Adam(classifier.parameters(), lr=args.learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
mode='min', factor=0.5,
patience=1)
train_state = make_train_state(args)
epoch_bar = tqdm_notebook(desc='training routine',
total=args.num_epochs,
position=0)
dataset.set_split('train')
train_bar = tqdm_notebook(desc='split=train',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
dataset.set_split('val')
val_bar = tqdm_notebook(desc='split=val',
total=dataset.get_num_batches(args.batch_size),
position=1,
leave=True)
try:
for epoch_index in range(args.num_epochs):
train_state['epoch_index'] = epoch_index
# Iterate over training dataset
# setup: batch generator, set loss and acc to 0, set train mode on
dataset.set_split('train')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.0
running_acc = 0.0
classifier.train()
for batch_index, batch_dict in enumerate(batch_generator):
# the training routine is these 5 steps:
# --------------------------------------
# step 1. zero the gradients
optimizer.zero_grad()
# step 2. compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# step 4. use loss to produce gradients
loss.backward()
# step 5. use optimizer to take gradient step
optimizer.step()
# -----------------------------------------
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
# update bar
train_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
train_bar.update()
train_state['train_loss'].append(running_loss)
train_state['train_acc'].append(running_acc)
# Iterate over val dataset
# setup: batch generator, set loss and acc to 0; set eval mode on
dataset.set_split('val')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# step 3. compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.to("cpu").item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
val_bar.set_postfix(loss=running_loss, acc=running_acc,
epoch=epoch_index)
val_bar.update()
train_state['val_loss'].append(running_loss)
train_state['val_acc'].append(running_acc)
train_state = update_train_state(args=args, model=classifier,
train_state=train_state)
scheduler.step(train_state['val_loss'][-1])
if train_state['stop_early']:
break
train_bar.n = 0
val_bar.n = 0
epoch_bar.update()
except KeyboardInterrupt:
print("Exiting loop")
# compute the loss & accuracy on the test set using the best available model
classifier.load_state_dict(torch.load(train_state['model_filename']))
classifier = classifier.to(args.device)
dataset.class_weights = dataset.class_weights.to(args.device)
loss_func = nn.CrossEntropyLoss(dataset.class_weights)
dataset.set_split('test')
batch_generator = generate_batches(dataset,
batch_size=args.batch_size,
device=args.device)
running_loss = 0.
running_acc = 0.
classifier.eval()
for batch_index, batch_dict in enumerate(batch_generator):
# compute the output
y_pred = classifier(batch_dict['x_surname'])
# compute the loss
loss = loss_func(y_pred, batch_dict['y_nationality'])
loss_t = loss.item()
running_loss += (loss_t - running_loss) / (batch_index + 1)
# compute the accuracy
acc_t = compute_accuracy(y_pred, batch_dict['y_nationality'])
running_acc += (acc_t - running_acc) / (batch_index + 1)
train_state['test_loss'] = running_loss
train_state['test_acc'] = running_acc
print("Test loss: {};".format(train_state['test_loss']))
print("Test Accuracy: {}".format(train_state['test_acc']))
###Output
Test loss: 1.7435305690765381;
Test Accuracy: 47.875
###Markdown
Inference
###Code
def predict_nationality(surname, classifier, vectorizer):
"""Predict the nationality from a new surname
Args:
surname (str): the surname to classifier
classifier (SurnameClassifer): an instance of the classifier
vectorizer (SurnameVectorizer): the corresponding vectorizer
Returns:
a dictionary with the most likely nationality and its probability
"""
vectorized_surname = vectorizer.vectorize(surname)
vectorized_surname = torch.tensor(vectorized_surname).view(1, -1)
result = classifier(vectorized_surname, apply_softmax=True)
probability_values, indices = result.max(dim=1)
index = indices.item()
predicted_nationality = vectorizer.nationality_vocab.lookup_index(index)
probability_value = probability_values.item()
return {'nationality': predicted_nationality, 'probability': probability_value}
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
prediction = predict_nationality(new_surname, classifier, vectorizer)
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
McMahan -> Irish (p=0.55)
###Markdown
Top-K Inference
###Code
vectorizer.nationality_vocab.lookup_index(8)
def predict_topk_nationality(name, classifier, vectorizer, k=5):
vectorized_name = vectorizer.vectorize(name)
vectorized_name = torch.tensor(vectorized_name).view(1, -1)
prediction_vector = classifier(vectorized_name, apply_softmax=True)
probability_values, indices = torch.topk(prediction_vector, k=k)
# returned size is 1,k
probability_values = probability_values.detach().numpy()[0]
indices = indices.detach().numpy()[0]
results = []
for prob_value, index in zip(probability_values, indices):
nationality = vectorizer.nationality_vocab.lookup_index(index)
results.append({'nationality': nationality,
'probability': prob_value})
return results
new_surname = input("Enter a surname to classify: ")
classifier = classifier.to("cpu")
k = int(input("How many of the top predictions to see? "))
if k > len(vectorizer.nationality_vocab):
print("Sorry! That's more than the # of nationalities we have.. defaulting you to max size :)")
k = len(vectorizer.nationality_vocab)
predictions = predict_topk_nationality(new_surname, classifier, vectorizer, k=k)
print("Top {} predictions:".format(k))
print("===================")
for prediction in predictions:
print("{} -> {} (p={:0.2f})".format(new_surname,
prediction['nationality'],
prediction['probability']))
###Output
Enter a surname to classify: McMahan
How many of the top predictions to see? 5
Top 5 predictions:
===================
McMahan -> Irish (p=0.55)
McMahan -> Scottish (p=0.21)
McMahan -> Czech (p=0.05)
McMahan -> German (p=0.04)
McMahan -> English (p=0.03)
|
example/estimate_eff/README.ipynb | ###Markdown
ExampleCheck .This is an example given in thie book  section 8.2.First let's import necessary packages
###Code
import sys
from mcupy.graph import *
from mcupy.nodes import *
from mcupy.utils import *
from mcupy.core import ensemble_type
try:
import pydot
except(ImportError):
import pydot_ng as pydot
###Output
_____no_output_____
###Markdown
Create a graph object, which is used to hold nodes.
###Code
g=Graph()
###Output
_____no_output_____
###Markdown
Create some nodes
###Code
A=FixedUniformNode(0.001,1-1e-5).withTag("A")
B=FixedUniformNode(0.001,1-1e-5).withTag("B")
mu=FixedUniformNode(.001,100-1e-5).withTag("mu")
sigma=FixedUniformNode(.001,100-1e-5).withTag("sigma")
###Output
_____no_output_____
###Markdown
And some more nodes
###Code
for l in open('eff.txt'):
e1,nrec1,ninj1=l.split()
e1=float(e1)
nrec1=float(nrec1)
ninj1=float(ninj1)
E=C_(e1).inGroup("E")
ninj=C_(ninj1).inGroup("ninj")
eff=((B-A)*PhiNode((E-mu)/sigma)+A).inGroup("eff")
nrec=BinNode(eff,ninj).withObservedValue(nrec1).inGroup("nrec")
g.addNode(nrec)
###Output
_____no_output_____
###Markdown
Then let us check the topology of graph
###Code
display_graph(g)
###Output
_____no_output_____
###Markdown
It's correct.Then we'd like to perform several sampling and record the values.Before sampling, we need to decide which variables we need to monitor.
###Code
mA=g.getMonitor(A)
mB=g.getMonitor(B)
mSigma=g.getMonitor(sigma)
mMu=g.getMonitor(mu)
###Output
_____no_output_____
###Markdown
We need a variable to hold the results
###Code
result=[]
###Output
_____no_output_____
###Markdown
Then we perform the sampling for 1000 time for burning
###Code
for i in log_progress(range(1000)):
g.sample()
###Output
_____no_output_____
###Markdown
Then we perform 30000 sampling and record the results
###Code
for i in log_progress(range(30000)):
g.sample()
result.append([mA.get(),mB.get(),mMu.get(),mSigma.get()])
###Output
_____no_output_____
###Markdown
Then we plot the results.
###Code
%matplotlib inline
import seaborn
import scipy
result=scipy.array(result)
seaborn.jointplot(result[:,0],result[:,1],kind='hex')
seaborn.jointplot(result[:,0],result[:,2],kind='hex')
seaborn.jointplot(result[:,0],result[:,3],kind='hex')
seaborn.jointplot(result[:,1],result[:,2],kind='hex')
seaborn.jointplot(result[:,1],result[:,3],kind='hex')
seaborn.jointplot(result[:,2],result[:,3],kind='hex')
###Output
_____no_output_____
###Markdown
Now, mcupy has also implemented the ensemble-based sampler for graph, which is much more faster. To use it, first declare a data structure to store the ensemble as:
###Code
em=ensemble_type()
###Output
_____no_output_____
###Markdown
then, iteratively call the graph.ensemble_sample(em)
###Code
result=[]
for i in log_progress(range(100000)):
g.ensemble_sample(em)
result.append([em[0][0],em[0][1],em[0][2],em[0][3]])
result=scipy.array(result)
seaborn.jointplot(result[:,1],result[:,0],kind='hex')
seaborn.jointplot(result[:,1],result[:,2],kind='hex')
###Output
_____no_output_____ |
code/tests/old_builds/pymc3_pb_smoo_back.ipynb | ###Markdown
Adding a background to the simple peakbagI'm going to add a proper treatment of the mode frequencies. The remaining caveats are:- I will not impose a complex prior on linewidth- I will not impose a complex prior on mode heights- I am not accounting for any asphericities due to near-surface magnetic fieldsThe expected effect of this will be, in order:- Increased uncertainty on linewidths- Increased runtime as the mode heights are less constrained- Possible linewidth broadening or shifting of mode frequencies if there are significant frequency shifts
###Code
import numpy as np
import matplotlib.pyplot as plt
import lightkurve as lk
from astropy.units import cds
from astropy import units as u
import seaborn as sns
import mystyle as ms
import corner
import pystan
import pandas as pd
import pickle
import glob
from astropy.io import ascii
import os
import pymc3 as pm
import arviz
target = 3632418
mal = pd.read_csv('../../data/malatium.csv', index_col=0)
idx = np.where(mal.KIC == target)[0][0]
star = mal.loc[idx]
kic = star.KIC
numax_ = star.numax
dnu_ = star.dnu
sfile = glob.glob('../../data/*{}*.pow'.format(kic))
data = ascii.read(sfile[0]).to_pandas()
ff, pp = data['col1'].values, data['col2'].values
# Read in the fit data
cad = pd.read_csv('../../data/cadmium.csv', index_col=0)
cad = cad.loc[cad.KIC == target]
# Read in the mode locs
cop = pd.read_csv('../../data/copper.csv',index_col=0)
cop = cop[cop.l != 3]
modelocs = cop[cop.KIC == str(kic)].Freq.values#[15:27]
elocs = cop[cop.KIC == str(kic)].e_Freq.values#[15:27]
modeids = cop[cop.KIC == str(kic)].l.values#[15:27]
overtones = cop[cop.KIC == str(kic)].n.values#[15:27]
lo = modelocs.min() - .25*dnu_
hi = modelocs.max() + .25*dnu_
sel = (ff > lo) & (ff < hi)
f = ff[sel]
p = pp[sel]
###Output
_____no_output_____
###Markdown
Rebin the data
###Code
# def rebin(f, p, binsize=10):
# m = int(len(p)/binsize)
# bin_f = f[:m*binsize].reshape((m, binsize)).mean(1)
# bin_p = p[:m*binsize].reshape((m, binsize)).mean(1)
# return bin_f, bin_p
# print('Length: {}'.format(len(f)))
# f, p = rebin(f, p, binsize=10)
# print('Length: {}'.format(len(f)))
def harvey(f, a, b, c):
harvey = 0.9*a**2/b/(1.0 + (f/b)**c);
return harvey
def get_apodization(freqs, nyquist):
x = (np.pi * freqs) / (2 * nyquist)
return (np.sin(x)/x)**2
def get_background(f, a, b, c, d, j, k, white, scale, nyq):
background = np.zeros(len(f))
background += get_apodization(f, nyq) * scale\
* (harvey(f, a, b, 4.) + harvey(f, c, d, 4.) + harvey(f, j, k, 2.))\
+ white
return background
try:
backdir = glob.glob('/home/oliver/PhD/mnt/RDS/malatium/backfit/'
+str(kic)+'/*_fit.pkl')[0]
with open(backdir, 'rb') as file:
backfit = pickle.load(file)
labels=['loga','logb','logc','logd','logj','logk','white','scale','nyq']
res = np.array([np.median(backfit[label]) for label in labels])
res[0:6] = 10**res[0:6]
phi_ = np.array([np.median(backfit[label]) for label in labels])
phi_sigma = pd.DataFrame(backfit)[labels].cov()
phi_cholesky = np.linalg.cholesky(phi_sigma)
model = get_background(ff, *res)
m = get_background(f, *res)
except IndexError:
print('Run RDS')
pg = lk.Periodogram(ff*u.microhertz, pp*(cds.ppm**2 / u.microhertz))
ax = pg.plot(alpha=.5)
ax.plot(f, p)
ax.plot(ff, model)
plt.scatter(modelocs, [15]*len(modelocs),c=modeids, s=50, edgecolor='w',zorder=100)
ax.set_xlim(500,2000)
ax.set_ylim(0, 50)
pg = lk.periodogram.SNRPeriodogram(f*u.microhertz, p*(cds.ppm**2/u.microhertz))
ax = pg.plot(alpha=.5)
# pg.smooth(filter_width=3.).plot(ax=ax, linewidth=2)
ax.plot(f, m)
plt.scatter(modelocs, [15]*len(modelocs),c=modeids, s=20, edgecolor='k')
plt.show()
###Output
_____no_output_____
###Markdown
Finding the mode frequencies through the asymptotic relation We have good constraints on the mode frequencies from previous studies, but since we are using the same data, we don't want to use their posteriors as priors on our frequencies. Instead we want to find a way to include the prior knowledge from previous studies without making our study dependent on them.The locations of the radial $l = 0$ modes can be predicted from the asymptotic relation. Radial modes of consecutive overtones are separated by the large frequency separation $\Delta\nu$, in principle. However in practice, these mode frequencies can be subject to some curvature, as well as noise from glitches in the sound speed profile in the stellar interior.We're going to omit a detailed treatment of glitches for now, but we *will* include a curvature term, using the astymptotic relation presented in [Vrard et al. 2015](https://ui.adsabs.harvard.edu/abs/2015A%26A...579A..84V/abstract), which goes as$\large\nu_{l=0} = (\bar{n} + \epsilon + (\frac{\alpha}{2}(n_{\rm max} - \bar{n})^2)) \Delta\nu + \Delta$where $\nu_{\rm l=0}$ is the frequency locations of all l=0 modes at overtones $\bar{n}$, $\epsilon$ is the phase offset, $\alpha$ determines the curvature, $\Delta\nu$ is the large separation, and $n_{\rm max}$ is the overtone closest to $\nu_{\rm max}$, round which the curvature is centered, and is given by$n_{\rm max} = \frac{\nu_{\rm max}}{\Delta\nu} - \epsilon$,and $\Delta$ is the noise on the frequency positions. Including this noise term allows us to formalize the 'smoothness condition' ([Davies et al. 2016](https://ui.adsabs.harvard.edu/abs/2016MNRAS.456.2183D/abstract)), where we specify that the difference in large frequency separation between subsequent radial modes should be close to zero, with some scatter. We therefore specify $\Delta$ as:$\Delta = \mathcal{N}(0, \sigma_\Delta)$,where $\sigma_\Delta$ is a free parameter. I guess eventually we should really upgrade this to a Gaussian Process periodic Kernel, so that we take care of glitch patterns.The positions of the dipole and octopole $l = 1, 2$ modes are then determined from the radial frequencies, as$\nu_{l=1} = \nu_{l=0} + \delta\nu_{01} + \mathcal{N}(0, \sigma_{01})$$\nu_{l=2} = \nu_{l=0} - \delta\nu_{02} + \mathcal{N}(0, \sigma_{02})$where $\delta\nu_{01}$ and $\delta\nu_{02}$ are the small separations between the radial frequency and the dipole and octopole frequencies of the same radial degree. $\sigma_{01}$ and $\sigma_{02}$ are the uncertainties on these separations. All are free parameters, and as before we're adding on noise.So we know have a complex hierarchical system where the mode frequencies are latent parameters, and we have a bunch of hyperparameters controlling them, giving them noise and curvature. These hyperparameters are where we include our *prior* information from the Kages and LEGACY papers, as first guesses and as means on the distributions from which they are drawn.$\epsilon \sim \mathcal{N}(\epsilon_{\rm prior}, 1.)$$\alpha \sim \mathcal{N_{\rm log}}(\alpha_{\rm prior}, 1.)$$\Delta\nu \sim \mathcal{N}(\Delta\nu_{\rm prior}, \Delta\nu_{\rm prior}*0.1)$$\nu_{\rm max} \sim \mathcal{N}(\nu_{\rm max, \rm prior}, \nu_{\rm max, \rm prior}*0.1)$$\delta\nu_{01} \sim \mathcal{N}(\delta\nu_{01, \rm prior}, \Delta\nu_{\rm prior}*0.1)$$\delta\nu_{02} \sim \mathcal{N}(\delta\nu_{02, \rm prior}, 3.)$$\epsilon$, $\alpha$ and the small separations will be determined from a fit to the mode frequencies of each star. The remainder are taken as reported in the literature.The noise terms $\sigma_\Delta$, $\sigma_{01}$ and $\sigma_{02}$ will all be drawn from lognormal distributions, to ensure they don't go too close to zero.Notice that I've flipped the sign in the equation for $\nu_{l=2}$. In a typical $l=0,2$ pair, the octopole mode is one overtone lower than the radial mode. When passing the overtone numbers into the model, we simply add $+1$ to those of the octopole modes to make this equation work and maintain our traditional measure of $\delta\nu_{\rm 02}$.We use the overtone numbers $n$ reported in LEGACY and Kages, and also do not fit for any modes of oscillation not reported in those papers. Build the model
###Code
class model():
def __init__(self, f, n0, n1, n2, f0_, f1_, f2_):
self.f = f
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.npts = len(f)
self.M = [len(f0_), len(f1_), len(f2_)]
def harvey(self, a, b, c):
harvey = 0.9*a**2/b/(1.0 + (self.f/b)**c);
return harvey
def get_apodization(self, nyquist):
x = (np.pi * self.f) / (2 * nyquist)
return (np.sin(x)/x)**2
def get_background(self, loga, logb, logc, logd, logj, logk, white, scale, nyq):
background = np.zeros(len(self.f))
background += self.get_apodization(nyq) * scale \
* (self.harvey(10**loga, 10**logb, 4.) \
+ self.harvey(10**logc, 10**logd, 4.) \
+ self.harvey(10**logj, 10**logk, 2.))\
+ white
return background
def epsilon(self, i, l, m):
#We use the prescriptions from Gizon & Solanki 2003 and Handberg & Campante 2012
if l == 0:
return 1
if l == 1:
if m == 0:
return np.cos(i)**2
if np.abs(m) == 1:
return 0.5 * np.sin(i)**2
if l == 2:
if m == 0:
return 0.25 * (3 * np.cos(i)**2 - 1)**2
if np.abs(m) ==1:
return (3/8)*np.sin(2*i)**2
if np.abs(m) == 2:
return (3/8) * np.sin(i)**4
if l == 3:
if m == 0:
return (1/64)*(5*np.cos(3*i) + 3*np.cos(i))**2
if np.abs(m) == 1:
return (3/64)*(5*np.cos(2*i) + 3)**2 * np.sin(i)**2
if np.abs(m) == 2:
return (15/8) * np.cos(i)**2 * np.sin(i)**4
if np.abs(m) == 3:
return (5/16)*np.sin(i)**6
def lor(self, freq, h, w):
return h / (1.0 + 4.0/w**2*(self.f - freq)**2)
def mode(self, l, freqs, hs, ws, i, split=0):
for idx in range(self.M[l]):
for m in range(-l, l+1, 1):
self.modes += self.lor(freqs[idx] + (m*split),
hs[idx] * self.epsilon(i, l, m),
ws[idx])
def model(self, p):
f0, f1, f2, g0, g1, g2, h0, h1, h2, split, i, phi = p
# Unpack background parameters
loga = phi[0]
logb = phi[1]
logc = phi[2]
logd = phi[3]
logj = phi[4]
logk = phi[5]
white = phi[6]
scale = phi[7]
nyq = phi[8]
# Calculate the modes
self.modes = np.zeros(self.npts)
self.mode(0, f0, h0, g0, i)
self.mode(1, f1, h1, g1, i, split)
self.mode(2, f2, h2, g2, i, split)
self.modes *= self.get_apodization(nyq)
#Calculate the background
self.back = self.get_background(loga, logb, logc, logd, logj, logk,
white, scale, nyq)
#Create the model
self.mod = self.modes + self.back
return self.mod
def asymptotic(self, n, numax, deltanu, alpha, epsilon):
nmax = (numax / deltanu) - epsilon
over = (n + epsilon + ((alpha/2)*(nmax - n)**2))
return over * deltanu
def f0(self, p):
numax, deltanu, alpha, epsilon, d01, d02 = p
return self.asymptotic(self.n0, numax, deltanu, alpha, epsilon)
def f1(self, p):
numax, deltanu, alpha, epsilon, d01, d02 = p
f0 = self.asymptotic(self.n1, numax, deltanu, alpha, epsilon)
return f0 + d01
def f2(self, p):
numax, deltanu, alpha, epsilon, d01, d02 = p
f0 = self.asymptotic(self.n2+1, numax, deltanu, alpha, epsilon)
return f0 - d02
f0_ = modelocs[modeids==0]
f1_ = modelocs[modeids==1]
f2_ = modelocs[modeids==2]
f0_e = elocs[modeids==0]
f1_e = elocs[modeids==1]
f2_e = elocs[modeids==2]
n0 = overtones[modeids==0]
n1 = overtones[modeids==1]
n2 = overtones[modeids==2]
alpha_ = cad.alpha.values[0]
alpha_e = cad.e_alpha.values[0]
epsilon_ = cad.epsilon.values[0]
epsilon_e = cad.e_epsilon.values[0]
d01_ = cad.d01.values[0]
d01_e = cad.e_d01.values[0]
d02_ = cad.d02.values[0]
d02_e = cad.e_d02.values[0]
numax_ = star.numax
numax_e = star.enumax
deltanu_ = star.dnu
deltanu_e = star.ednu
###Output
_____no_output_____
###Markdown
Do some first guesses for height
###Code
def gaussian(locs, l, numax, Hmax0):
fwhm = 0.25 * numax
std = fwhm/2.355
Vl = [1.0, 1.22, 0.71, 0.14]
return Hmax0 * Vl[l] * np.exp(-0.5 * (locs - numax)**2 / std**2)
init_m =[f0_, # l0 modes
f1_, # l1 modes
f2_, # l2 modes
np.ones(len(f0_)) * 2.0, # l0 widths
np.ones(len(f1_)) * 2.0, # l1 widths
np.ones(len(f2_)) * 2.0, # l2 widths
np.sqrt(gaussian(f0_, 0, numax_, 15.) * 2.0 * np.pi / 2.0) ,# l0 heights
np.sqrt(gaussian(f1_, 1, numax_, 15.) * 2.0 * np.pi / 2.0) ,# l1 heights
np.sqrt(gaussian(f2_, 2, numax_, 15.) * 2.0 * np.pi / 2.0) ,# l2 heights
1.0 * np.sin(np.pi/2), # projected splitting
np.pi/2., # inclination angle
np.copy(phi_) # background parameters (in log)
]
init_f =[numax_, # numax
dnu_, # deltanu
alpha_, # curvature term
epsilon_, # phase term
d01_ , # small separation l=0,1
d02_ # small separation l=0,2
]
mod = model(f, n0, n1, n2, f0_, f1_, f2_)
with plt.style.context(lk.MPLSTYLE):
fig, ax = plt.subplots()
ax.plot(f, p)
ax.plot(f, mod.model(init_m), lw=2)
ax.scatter(modelocs, [15]*len(modelocs),c=modeids, s=50, edgecolor='grey', zorder=100)
plt.show()
fig, ax = plt.subplots()
ax.errorbar(f0_%dnu_, n0, xerr=f0_e, fmt='^',label='0')
ax.errorbar(f1_%dnu_, n1, xerr=f1_e, fmt='>',label='1')
ax.errorbar(f2_%dnu_, n2, xerr=f2_e, fmt='o',label='2')
ax.plot(mod.f0(init_f)%dnu_, n0, label='0')
ax.plot(mod.f1(init_f)%dnu_, n1, label='1')
ax.plot(mod.f2(init_f)%dnu_, n2, label='2')
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Build the priors in PyMC3
###Code
pm_model = pm.Model()
BoundedNormal = pm.Bound(pm.Normal, lower= 0.0)
with pm_model:
# Frequency Hyperparameters
# alpha = pm.HalfNormal('alpha', sigma=1., testval=alpha_)
# epsilon = BoundedNormal('epsilon', mu=epsilon_, sigma=1., testval=epsilon_)
# d01 = BoundedNormal('d01', mu=d01_, sigma=0.1*deltanu_, testval=d01_)
# d02 = BoundedNormal('d02', mu=d02_, sigma=3., testval=d02_)
# numax = pm.Normal('numax', mu=numax_, sigma=numax_e, testval=numax_)
# deltanu = pm.Normal('deltanu', mu=deltanu_, sigma=deltanu_e, testval=deltanu_)
# Frequency Hyperparameters
alpha = BoundedNormal('alpha', mu=alpha_, sigma=alpha_e, testval=alpha_)
epsilon = BoundedNormal('epsilon', mu=epsilon_, sigma=epsilon_e, testval=epsilon_)
d01 = BoundedNormal('d01', mu=d01_, sigma=d01_e, testval=d01_)
d02 = BoundedNormal('d02', mu=d02_, sigma=d02_e, testval=d02_)
numax = BoundedNormal('numax', mu=numax_, sigma=numax_e, testval=numax_)
deltanu = BoundedNormal('deltanu', mu=deltanu_, sigma=deltanu_e, testval=deltanu_)
# Noise hyperparameters and latent parameters
sigma0 = pm.Gamma('sigma0', alpha=2.0, beta=0.5, testval=2.)
sigma01 = pm.Gamma('sigma01', alpha=2.0, beta=0.5, testval=2.)
sigma02 = pm.Gamma('sigma02', alpha=2.0, beta=0.5, testval=2.)
Delta0 = pm.Normal('Delta0', mu=0., sigma=1., shape=len(f0_))
Delta1 = pm.Normal('Delta1', mu=0., sigma=1., shape=len(f1_))
Delta2 = pm.Normal('Delta2', mu=0., sigma=1., shape=len(f2_))
#Frequencies
f0 = pm.Deterministic('f0', mod.f0([numax_, deltanu_, alpha_, epsilon_, d01_, d02_]) + Delta0*sigma0)
f1 = pm.Deterministic('f1', mod.f1([numax_, deltanu_, alpha_, epsilon_, d01_, d02_]) + Delta1*sigma01)
f2 = pm.Deterministic('f2', mod.f2([numax_, deltanu_, alpha_, epsilon_, d01_, d02_]) + Delta2*sigma02)
# Mode linewidths
g0 = pm.HalfNormal('g0', sigma=2.0, testval=init_m[3], shape=len(init_m[3]))
g1 = pm.HalfNormal('g1', sigma=2.0, testval=init_m[4], shape=len(init_m[4]))
g2 = pm.HalfNormal('g2', sigma=2.0, testval=init_m[5], shape=len(init_m[5]))
# Mode amplitudes
a0 = pm.HalfNormal('a0', sigma=20., testval=init_m[6], shape=len(init_m[6]))
a1 = pm.HalfNormal('a1', sigma=20., testval=init_m[7], shape=len(init_m[7]))
a2 = pm.HalfNormal('a2', sigma=20., testval=init_m[8], shape=len(init_m[8]))
# Mode heights (determined by amplitude and linewidth)
h0 = pm.Deterministic('h0', 2*a0**2/np.pi/g0)
h1 = pm.Deterministic('h1', 2*a1**2/np.pi/g1)
h2 = pm.Deterministic('h2', 2*a2**2/np.pi/g2)
# Rotation and inclination parameterizations
xsplit = pm.HalfNormal('xsplit', sigma=2.0, testval=init_m[10])
cosi = pm.Uniform('cosi', 0., 1.)
# Detangled inclination and splitting
i = pm.Deterministic('i', np.arccos(cosi))
split = pm.Deterministic('split', xsplit/pm.math.sin(i))
# Background treatment
phi = pm.MvNormal('phi', mu=phi_, chol=phi_cholesky, testval=phi_, shape=len(phi_))
#Model
fit = mod.model([f0, f1, f2, g0, g1, g2, h0, h1, h2, split, i, phi])
like = pm.Gamma('like', alpha=1, beta=1.0/fit, observed=p)
init = 5000
with pm_model:
trace = pm.sample(chains=4, tune=int(init/2), draws=int(init/2))
pm.traceplot(trace)
plt.show()
pm.summary(trace, var_names=['sigma0', 'sigma01', 'sigma02'])
###Output
_____no_output_____
###Markdown
Now lets plot some diagnostics...
###Code
labels=['xsplit','cosi','i','split']
chain = np.array([trace[label] for label in labels])
verbose = [r'$\delta\nu_s^*$',r'$\cos(i)$',r'$i$',r'$\delta\nu_{\rm s}$']
corner.corner(chain.T, labels=verbose, quantiles=[0.16, 0.5, 0.84]
,show_titles=True)
plt.show()
labels=['numax','deltanu','alpha','epsilon','d01','d02']
chain = np.array([trace[label] for label in labels])
verbose = [r'$\nu_{\rm max}$', r'$\Delta\nu$', r'$\alpha$',r'$\epsilon$',r'$\delta\nu_{01}$',r'$\delta\nu_{02}$']
corner.corner(chain.T, labels=verbose, quantiles=[0.16, 0.5, 0.84]
,show_titles=True)
plt.show()
for idx in range(len(trace['h0'].T)):
chain = np.array([trace['a0'].T[idx], trace['h0'].T[idx], trace['g0'].T[idx], trace['f0'].T[idx]])
corner.corner(chain.T, labels=['A','H','g','f'],
quantiles=[0.16, 0.5, 0.84],show_titles=True)
plt.show()
cmap = sns.color_palette('plasma', n_colors = 10)
labels=['loga','logb','logc','logd','logj','logk',
'white','scale','nyq']
verbose=[r'$\log_{10}a$',r'$\log_{10}b$',
r'$\log_{10}c$',r'$\log_{10}d$',
r'$\log_{10}j$',r'$\log_{10}k$',
'white','scale',r'$\nu_{\rm nyq}$']
backchain = np.array([backfit[label] for label in labels]).T
phichain = np.array([trace['phi'][:,idx] for idx in range(len(phi_))]).T
limit = [(backfit[label].min(), backfit[label].max()) for label in labels]
fig = corner.corner(backchain, color=cmap[0],range=limit)
corner.corner(phichain, fig=fig, show_titles=True, labels=verbose, color=cmap[6],range=limit)
plt.show()
###Output
_____no_output_____
###Markdown
Looks like all the background parameters have been tightened up or remained within the priors. Always good to check! Now let's plot some output evaluation:
###Code
with plt.style.context(lk.MPLSTYLE):
fig, ax = plt.subplots()
ax.plot(f, p)
labels=['f0','f1','f2','g0','g1','g2','h0','h1','h2','split','i', 'phi']
res = np.array([np.median(trace[label],axis=0) for label in labels])
ax.plot(f, mod.model(init_m), lw=2)
plt.show()
fig, ax = plt.subplots()
res = [np.median(trace[label]) for label in ['numax','deltanu','alpha','epsilon','d01','d02']]
resls = [np.median(trace[label],axis=0) for label in['f0','f1','f2']]
stdls = [np.std(trace[label],axis=0) for label in['f0','f1','f2']]
ax.plot(mod.f0(res)%res[1], n0, label='0 asy')
ax.plot(mod.f1(res)%res[1], n1, label='1 asy')
ax.plot(mod.f2(res)%res[1], n2, label='2 asy')
ax.errorbar(resls[0]%res[1], n0, xerr=stdls[0], fmt='^',label='0 mod')
ax.errorbar(resls[1]%res[1], n1, xerr=stdls[1], fmt='>',label='1 mod')
ax.errorbar(resls[2]%res[1], n2, xerr=stdls[2], fmt='o',label='2 mod')
ax.legend()
labels=['f0','f1','f2','g0','g1','g2','h0','h1','h2','split','i', 'phi']
res = np.array([np.median(trace[label],axis=0) for label in labels])
sns.distplot(p/mod.model(res), label='Model')
sns.distplot(np.random.chisquare(2, size=10000)/2, label=r'Chi22')
plt.legend()
###Output
_____no_output_____
###Markdown
The model doesn't fit the $\chi_2^2$ noise, probably because its binned? Verdict:I'm confident in the completion of this addition, but it takes too long a time to use when testing other targets. I'll save it for now, run a longer test overnight and move on to GP testing.
###Code
import sys
sys.exit()
###Output
_____no_output_____
###Markdown
Leftovers
###Code
res = [np.median(trace[label]) for label in ['numax','deltanu','alpha','epsilon','d01','d02']]
resls = [np.median(trace[label],axis=0) for label in['f0','f1','f2']]
stdls = [np.std(trace[label],axis=0) for label in['f0','f1','f2']]
with plt.style.context(ms.ms):
fig, ax = plt.subplots()
ax.plot(mod.f0(res)%res[1], n0, label='0 asy')
ax.plot(mod.f1(res)%res[1], n1, label='1 asy')
ax.plot(mod.f2(res)%res[1], n2, label='2 asy')
ax.errorbar(resls[0]%res[1], n0, xerr=stdls[0], fmt='^',label='0 mod')
ax.errorbar(resls[1]%res[1], n1, xerr=stdls[1], fmt='>',label='1 mod')
ax.errorbar(resls[2]%res[1], n2, xerr=stdls[2], fmt='o',label='2 mod')
ax.errorbar(f0_%res[1], n0, xerr=f0_e, fmt='^',label='0 lit')
ax.errorbar(f1_%res[1], n1, xerr=f1_e, fmt='>',label='1 lit')
ax.errorbar(f2_%res[1], n2, xerr=f2_e, fmt='o',label='2 lit')
ax.legend()
###Output
_____no_output_____ |
Arithmetic/NBZip.ipynb | ###Markdown
This is sklearn Naive Bayes Implementation
###Code
from sklearn.naive_bayes import GaussianNB
from sklearn.datasets import make_classification
import numpy as np
X, y = make_classification(n_samples=1000, n_features=4,
n_informative=2, n_redundant=0,
random_state=0, shuffle=False)
clf = GaussianNB()
clf.fit(X, y)
print(clf.predict_proba([[0, 0, 0, 0]]))
print(X.shape)
print(y.shape)
clf_pf = GaussianNB()
clf_pf.partial_fit(X, y, np.unique(y))
clf_pf.partial_fit(X, y, np.unique(y))
print(clf_pf.predict_proba([[0, 0, 0, 0]]))
list_of_lists = []
with open('data\ecoli\Ecoli.txt') as f:
for line in f:
#inner_list = [elt.strip() for elt in line.split()]
inner_list = list(line)
list_of_lists.append(inner_list)
print(len(list_of_lists[0])) # About 4 MB
ecoli = list_of_lists[0]
import arithc as arith
import fqt, ppm
import contextlib, sys
import filecmp
from IPython.display import clear_output
import numpy as np
from sklearn.naive_bayes import GaussianNB
from plist import ProbabilityList
np.unique(y)
s = ecoli
char_list = [97, 103, 99, 116] # we can read this as we go
print(char_list)
update_period = len(s)
clf = GaussianNB()
legend = dict([(v, k) for k, v in enumerate(char_list)]) # map character to 0,1,2,3,4, etc.
temp_dict = {'a':97,'g': 103,'c': 99,'t': 116}
s = [temp_dict[i] for i in s]
#Train model
x = np.zeros((update_period-64, 64)) # 64 characters context
y = np.zeros((update_period-64))
print(len(s))
idx3 = 0
for idx2 in range(64,len(s)):
train_seq = [legend[i] for i in s[idx2-64:idx2]]
train_target = legend[s[idx2]]
x[idx3,:] = np.array(train_seq)
y[idx3] = train_target
idx3 += 1
predicted_onehot = []
for i in range(len(ecoli)//100000 - 1):
if i%10 == 0:
print(i)
clf.partial_fit(x[100000*i:100000*(i+1)], y[100000*i:100000*(i+1)], np.unique(y))
predicted_onehot += list(clf.predict_proba(x[100000*(i+1):100000*(i+2)]))
predicted_onehot += list(clf.predict_proba(x[100000*(len(ecoli)//100000):]))
predicted_onehot[-2]
y[-2]
len(s)-len(predicted_onehot)
char_list = [97, 103, 99, 116]
def RF_Warmcompress(inp, bitout):
initprobs= [1/257 for i in range(257)]
initprobs[256] = 1-sum(initprobs[:256])
model = ProbabilityList(initprobs) # For the first 200,000
enc = arith.ArithmeticCoder(32)
enc.start_encode(bitout) # New line!
idx = 0
while True:
symbol = inp.read(1)
if len(symbol) == 0:
break
idx += 1
## Progress Evaluation ## only internal
if idx % (len(ecoli)//10) == 0:
print(str(10*idx//(len(ecoli)//10)) + ' percent done')
clear_output(wait = True)
if idx == 100065:
model = ProbabilityList(initprobs) # reset the model
if idx >= 100065:
for val, prob in enumerate(predicted_onehot[idx-100065]):
model.set_prob(char_list[val], prob+1/257)
model.set_prob(256, 1/25700)
model.normalize()
t = 2**16 ## New lines!
l = int(model.get_low(symbol[0])*2**16)
h = int(model.get_high(symbol[0])*2**16)
enc.storeRegion(l,h,t)
if idx < 100065: # back up before LSTM model
model.prob_list[symbol[0]] += 1/257
model.normalize()
t = 2**16 ## New lines!
l = int(model.get_low(256)*2**16)
h = int(model.get_high(256)*2**16)
enc.storeRegion(l,h,t)
enc.finish_encode(bitout) # New line!
inputfile, outputfile = 'data\ecoli\Ecoli.txt', 'data\ecoli\Ecoli_NB64.txt'
#Perform file compression
with open(inputfile, "rb") as inp, \
contextlib.closing(arith.BitOutputStream(open(outputfile, "wb"))) as bitout:
RF_Warmcompress(inp, bitout)
char_list = [97, 103, 99, 116]
def RF_Warmcompress(inp, bitout):
initfreqs = fqt.FlatFrequencyTable(257)
model = fqt.SimpleFrequencyTable(initfreqs) # For the first 200,000
enc = arith.ArithmeticCoder(32)
enc.start_encode(bitout) # New line!
idx = 0
while True:
symbol = inp.read(1)
if len(symbol) == 0:
break
idx += 1
## Progress Evaluation ## only internal
if idx % (len(ecoli)//10) == 0:
print(str(10*idx//(len(ecoli)//10)) + ' percent done')
clear_output(wait = True)
if idx == 100065:
initfreqs = fqt.FlatFrequencyTable(257)
model = fqt.SimpleFrequencyTable(initfreqs) # reset the model
if idx >= 100065:
for val, prob in enumerate(predicted_onehot[idx-100065]):
model.set(char_list[val], int(prob*100000)+1)
t = model.get_total() ## New lines!
l = model.get_low(symbol[0])
h = model.get_high(symbol[0])
enc.storeRegion(l,h,t)
if idx < 100065: # back up before LSTM model
model.increment(symbol[0])
t = model.get_total() ## New lines!
l = model.get_low(256)
h = model.get_high(256)
enc.storeRegion(l,h,t)
enc.finish_encode(bitout) # New line!
inputfile, outputfile = 'data\ecoli\Ecoli.txt', 'data\ecoli\Ecoli_NB.txt'
#Perform file compression
with open(inputfile, "rb") as inp, \
contextlib.closing(arith.BitOutputStream(open(outputfile, "wb"))) as bitout:
RF_Warmcompress(inp, bitout)
s = ecoli
char_list = [97, 103, 99, 116] # we can read this as we go
print(char_list)
update_period = len(s)
clf = GaussianNB()
k = 6
n = 256
legend = dict([(v, k) for k, v in enumerate(char_list)]) # map character to 0,1,2,3,4, etc.
temp_dict = {'a':97,'g': 103,'c': 99,'t': 116}
s = [temp_dict[i] for i in s]
#Train model
x = np.zeros((update_period-k, k)) # 64 characters context
y = np.zeros((update_period-k))
print(len(s))
idx3 = 0
for idx2 in range(k,len(s)):
train_seq = [legend[i] for i in s[idx2-k:idx2]]
train_target = legend[s[idx2]]
x[idx3,:] = np.array(train_seq)
y[idx3] = train_target
idx3 += 1
predicted_onehot = []
for i in range(len(ecoli)//n - 1):
if i%400 == 0:
print(i)
clf.partial_fit(x[n*i:n*(i+1)], y[n*i:n*(i+1)], np.unique(y))
predicted_onehot += list(clf.predict_proba(x[n*(i+1):n*(i+2)]))
predicted_onehot += list(clf.predict_proba(x[n*(len(ecoli)//n):]))
256+6+1
char_list = [97, 103, 99, 116]
def RF_Warmcompress(inp, bitout):
initfreqs = fqt.FlatFrequencyTable(257)
model = fqt.SimpleFrequencyTable(initfreqs) # For the first 200,000
enc = arith.ArithmeticCoder(32)
enc.start_encode(bitout) # New line!
idx = 0
while True:
symbol = inp.read(1)
if len(symbol) == 0:
break
idx += 1
## Progress Evaluation ## only internal
if idx % (len(ecoli)//10) == 0:
print(str(10*idx//(len(ecoli)//10)) + ' percent done')
clear_output(wait = True)
if idx == 263:
initfreqs = fqt.FlatFrequencyTable(257)
model = fqt.SimpleFrequencyTable(initfreqs) # reset the model
if idx >= 263:
for val, prob in enumerate(predicted_onehot[idx-263]):
model.set(char_list[val], int(prob*100000)+1)
t = model.get_total() ## New lines!
l = model.get_low(symbol[0])
h = model.get_high(symbol[0])
enc.storeRegion(l,h,t)
if idx < 263: # back up before LSTM model
model.increment(symbol[0])
t = model.get_total() ## New lines!
l = model.get_low(256)
h = model.get_high(256)
enc.storeRegion(l,h,t)
enc.finish_encode(bitout) # New line!
inputfile, outputfile = 'data\ecoli\Ecoli.txt', 'data\ecoli\Ecoli_NB6_256.txt'
#Perform file compression
with open(inputfile, "rb") as inp, \
contextlib.closing(arith.BitOutputStream(open(outputfile, "wb"))) as bitout:
RF_Warmcompress(inp, bitout)
###Output
100 percent done
###Markdown
Let's try cheating to confirm that we are not crazy:Say we know the correct answer with 90% certainty we want to show that the compression ratio is incredible
###Code
cheat_code = [[0.9,0.033,0.033,0.034],[0.033,0.9,0.033,0.034],[0.033,0.033,0.9,0.034],[0.033,0.033,0.034,0.9]]
predicted_toohot = [np.array(cheat_code[int(i)]) for i in list(y)]
len(ecoli)-len(predicted_toohot)
y[0:8]
[legend[temp_dict[i]] for i in ecoli[6:20]]
predicted_toohot[0]
char_list = [97, 103, 99, 116]
def Cheatcomp(inp, bitout):
initfreqs = fqt.FlatFrequencyTable(257)
model = fqt.SimpleFrequencyTable(initfreqs) # For the first 200,000
enc = arith.ArithmeticCoder(32)
enc.start_encode(bitout) # New line!
idx = 0
while True:
symbol = inp.read(1)
if len(symbol) == 0:
break
idx += 1
## Progress Evaluation ## only internal
if idx % (len(ecoli)//10) == 0:
print(str(10*idx//(len(ecoli)//10)) + ' percent done')
clear_output(wait = True)
if idx == 7:
initfreqs = fqt.FlatFrequencyTable(257)
model = fqt.SimpleFrequencyTable(initfreqs) # reset the model
if idx >= 7:
for val, prob in enumerate(predicted_toohot[idx-7]):
model.set(char_list[val], int(prob*100000)+1)
t = model.get_total() ## New lines!
l = model.get_low(symbol[0])
h = model.get_high(symbol[0])
enc.storeRegion(l,h,t)
if idx < 7: # back up before LSTM model
model.increment(symbol[0])
t = model.get_total() ## New lines!
l = model.get_low(256)
h = model.get_high(256)
enc.storeRegion(l,h,t)
enc.finish_encode(bitout) # New line!
inputfile, outputfile = 'data\ecoli\Ecoli.txt', 'data\ecoli\Ecoli_cheat_real.txt'
#Perform file compression
with open(inputfile, "rb") as inp, \
contextlib.closing(arith.BitOutputStream(open(outputfile, "wb"))) as bitout:
Cheatcomp(inp, bitout)
###Output
100 percent done
|
Unidade8-Machine-Learning/Regressao/regression_project_br.ipynb | ###Markdown
 Projeto - Regressão 1. Considerações iniciaisNeste notebook vai praticar o desenvolvimento (pipeline) de uma solução utilizando Aprendizado de Máquina (Machine Learning). Mais espeficicamente, iremos criar um algoritmo de **regressão** utilizando o conjunto de dados usando a biblioteca [scikit-learn](https://scikit-learn.org/stable/index.html) do Python. Propositalmente, evitaremos adentrar em algoritmos e explicações mais complexas, pois o foco é percorrer o fluxo por inteiro, ou o máximo dele.De forma bem simples, as etapas são:1. Obter nossos dados2. Limpar e organizar nossos dados3. Explorar e visualizar em busca de padrões ou tendências.4. Aplicar um modelo5. Interpretar os resultados 1.1. Por que usar o Scikit-Learn?O ecossistema de bibiotecas do Python voltadas para aprendizado de máquina é vasto e bem consolidado. Uma das mais conhecidas é o [scikit-learn](https://scikit-learn.org/stable/index.html), que possui uma grande quantidade de algoritmos de aprendizado de máquina implementados - prontos para o uso. Simples e eficiente, o pacote do scikit-learn permite que o cientista rapidamente treine um modelo e interprete seus resultados.Além de bem documentado, o scikit-learn também possui um comunidade de usuários bastante ativa, o que acaba refletindo em uma quantidade enorme de tutoriais, notebooks e exemplos pela web. A uniformidade da biblioteca (o processo de entrada e construção de modelos é semelhante) reflete em rapidez no desenvolvimento de soluções, visto que no momento que o usuário entende o fluxo de trabalho, ele rapidamente consegue "chavear" entre diversos modelos. Caso um modelo não esteja indo bem, basta usar outro. 1.2. Prepare seu ambienteSe necessário, prossiga com a instalação da biblioteca do jeito Python. Sem esforço, use o gerenciador de pacotes da linguagem:```$ pip install scikit-learn```Lembre-se, ninguém brilha sozinho no Python. Pandas, numpy, matplotlib, searborn, etc são exemplos de bibliotecas de apoio. Na verdade, o bom cientista de dados trabalha com várias bibliotecas ao mesmo tempo. 1.3. Conjunto de dadosO conjunto de dados que utilizaremos contém informações sobre diferentes tipos de casas na cidade americana de Boston. Este conjunto de dados foi disponilizado no repositório de Aprendizado de Máquina da UCI. Aqui, o conjunto de dados está no arquivo [housing.csv](housing.csv).Existem 506 amostras e 13 variáveis (colunas) neste conjunto de dados. O objetivo é prever o valor dos preços de casas usando estas variáveis. 1.3.1. Descrição do dadosNo conjunto de dados, encontraremos as seguintes variáveis:- CRIM: Taxa de criminalidade per capita no bairro- ZN: Proporção de terrenos residenciais com lotes com mais de 25.000 m2 no bairro- INDUS: Proporção de acres comerciais não comerciais na cidade- CHAS: Variável fictícia Charles River (1 se o terreno é limitado por rio; 0 caso contrário)- NOX: Concentração de óxido nítrico (partes por 10 milhões) na cidade- RM: Número médio de quartos por moradia no bairro- AGE: Proporção de unidades ocupadas em propriedades construídas antes de 1940- DIS: Distâncias ponderadas para cinco centros de emprego em Boston- RAD: Índice de acessibilidade às rodovias- TAX: Taxa de imposto sobre a propriedade de valor total (x10.000)- PTRATIO: Proporcaoo de alunos por professor no bairro- LSTAT: Porcentagem da população considerada classe baixa no bairro- MEDV: Valor médio das casas ocupadas pelos proprietários (x1000)A nossa variável alvo (que queremos prever) é a MEDV, ou seja, o valor médio das casas. Devemos encontrar um algoritmo que receba as 12 variáveis restantes e preveja o valor de MEDV. É o problema clássico de **regressão**. 2. ProcedimentosComece importando as bibliotecas que você irá utilizar ao longo do caminho. Lembre-se que um cientista de dados trabalha com várias ferramentas. __TAREFA 01__1. Importe as bibliotecas que você utilizará
###Code
# Insira sua resposta aqui
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.
import pandas.util.testing as tm
###Markdown
2.1. Obtendo os dadosVocê não pode fazer nada sem dados. Aqui, o conjunto de dados já foi reunido e está em um arquivo CSV. Precisamos carregá-los para dentro do nosso código em um *dataframe* pandas. __TAREFA 02__1. Importe o pandas2. Importe o arquivo CSV para dentro de um dataframe chamado *houses*.3. Imprima o cabeçalho do dataframe.
###Code
# Insira sua resposta aqui
!git clone https://github.com/awarischool/br-regression-project
houses = pd.read_csv('br-regression-project/houses.csv')
houses.head()
###Output
_____no_output_____
###Markdown
Compare a informação do dataframe com o dicionário de dados apresentado no início deste notebook. Faz sentido? Existem variáveis faltantes?Conhecer bem os dados e o problema em que vamos trabalhar é tão importante quanto saber o funcionamento de algoritmos. Esta tarefa também é conhecida como **Análise Exploratória de Dados (AED)**, onde o cientista faz uma investigação superficial sobre os dados.Você irá perceber que esta investigação não é uma etapa fixa e inflexível dentro do fluxo de trabalho. Pelo contrário, AED acontece durante quase todo o processo. Estamos constantemente aprendendo sobre nossos dados. __TAREFA 03__1. Confirme que o dataframe possui 506 amostras e 13 variáveis
###Code
# Insira sua resposta aqui
houses.shape
###Output
_____no_output_____
###Markdown
2.2. Limpar e organizar os dadosDados corrompidos, falha ao carregar as informações ou extração incompleta podem gerar no mundo real valores faltantes (*missing*). Saber lidar com este tipo de situação é importante. Primeiro devemos verificar se o conjunto de dados possui valores missing. __TAREFA 04__1. Verifique se existem valores *missing* em alguma variável.**DICA**: Esta tarefa pode ser realizada de várias formas. O importante é conseguir visualizar se existem valores *missing*.
###Code
# Insira sua resposta aqui
houses.isnull().sum()
###Output
_____no_output_____
###Markdown
E aí? Conseguiu encontrar valores *missing*? Note que não existe forma padrão de fazer esta busca.Por que devemos procurá-los? R: Identificar e contornar esse problema (preencher ou remover) tende a gerar modelos de dados de Aprendizagem de Máquina confiáveis e robustos. 2.3. Explorar e visualizar em busca de padrões ou tendências.Vamos continuar a AED, extraindo vários parâmetros estatísticos do conjunto de dados. __TAREFA 05__1. Gere estatísticas descritivas que resumem a tendência central, a dispersão e a forma da distribuição de um conjunto de dados.
###Code
# Insira sua resposta aqui
houses.describe()
###Output
_____no_output_____
###Markdown
Vamos verificar o relacionamento de algumas variáveis preditoras com a variável alvo (MEDV). Neste caso, vamos utilizar 'RM', 'LSTAT', 'PTRATIO'. __TAREFA 06__1. Plote a relação das variáveis 'RM', 'LSTAT', 'PTRATIO' com a variável alvo.**DICA**: Use a *sns.pairplot()*.
###Code
# Insira sua resposta aqui
colunas = ['RM', 'LSTAT', 'PTRATIO', 'MEDV']
sns.pairplot(data=houses[colunas])
plt.show()
###Output
_____no_output_____
###Markdown
Conseguiu enxergar relação entre alguma das variáveis?Veja a variável 'RM' e 'MEDV'. Ao que tudo indica, existe uma relação positiva entre a quantidade de quartos e o valor das casas. Quanto mais casas, maior é o valor da casa.Agora a variável 'LSTAT' e 'MEDV' parece que ocorre o inverso. Quanto maior a quantidade de pessoas consideradas classe baixa no bairro, menor é o valor dos imóveis. Indicando que a casa está em um bairro mais pobre da cidade. __TAREFA 07__1. Plote a relação das variáveis de outras variáveis com a variável alvo.2. Investigue o quanto quiser.
###Code
# Insira sua resposta aqui.
# Relação entre o valor dos imóveis e a taxa de criminalidade
ax4 = sns.pairplot(houses, x_vars='CRIM', y_vars='MEDV')
ax4.fig.set_size_inches(10, 7)
plt.title('Valor do Imóvel x Taxa Criminalidade')
###Output
_____no_output_____
###Markdown
Note que há uma correlação entre as variáveis, ainda que fraca. Os bairros com maior índice de criminalidade apresentam valores mais baixos. Entretanto é possível encontrar imóveis com valores menores mesmo em bairros com baixo índice de criminalidade.Veremos agora se há alguma relação entre localizaçao do imóvel no que diz respeito à proximidade com o rio e seu valor comercial.
###Code
ax = sns.pairplot(houses, x_vars='CHAS', y_vars='MEDV')
ax.fig.set_size_inches(10,7)
plt.title('Valor Médio x Proximidade com o Rio')
plt.xlabel('Proximidade com o Rio')
plt.ylabel('Valor Médio')
###Output
_____no_output_____
###Markdown
Note que não há correlação entre o valor do imóvel e o fato de ele se localizar próximo ao rio.- Valor do imóvel x Concentração de Óxido Nítrico
###Code
ax = sns.pairplot(houses, y_vars='MEDV', x_vars='NOX')
ax.fig.set_size_inches(10,7)
plt.title('Valor do Imóvel x Concentração de Óxido Nítrico')
plt.xlabel('Concentração de NO')
plt.ylabel('Valor do Imóvel')
###Output
_____no_output_____
###Markdown
Nesta caso há uma correlação fraca entre as duas variáveis. Entretanto esta não é desprezível. Note que para os maiores valores da concentração de NO o valor do imóvel é menor. Apesar disso é possível encontrar imóveis deste mesmo valor em locais com menos concentração de NO.- Valor do Imóvel x Taxa de Impostos sobre Propriedades
###Code
ax = sns.pairplot(houses, x_vars='TAX', y_vars='MEDV')
ax.fig.set_size_inches(10, 7)
plt.title('Valor do Imóvel x Taxa de Impostos sobre Propriedades')
plt.xlabel('Taxa de Impostos sobre Propriedades')
plt.ylabel('Valor do Imóvel')
###Output
_____no_output_____
###Markdown
Estas variáveis também apresentam correlação fraca. De fato, os imóveis com maiores taxas (a partir de 600) apresentam valores menores. Ou seja, o valor do imóvel não é fator preponderante para a taxa média de impostos. Existe outra forma de verificar a relação entre as variáveis. Podemos usar a correlação entre as variáveis. __TAREFA 08__1. Extraia a matriz de correlação das variáveis.2. Plote a matriz de correlação usando o seaborn.
###Code
# Insira sua resposta aqui
corr = houses.corr()
corr
# Plot da matriz de correlação usando o heatmap
fig = plt.figure(figsize=(15,8))
sns.heatmap(data=corr, annot=True)
plt.show()
###Output
_____no_output_____
###Markdown
O coeficiente de correlação entre as variáveis varia de -1 a 1. Se o valor for próximo de 1, significa que há uma forte correlação positiva entre as duas variáveis. Quando está próximo de -1, as variáveis têm uma forte correlação negativa. Interprete o gráfico!Devemos ficar atentos aquelas variáveis que possuem forte correlação com o variável alvo, pois elas podem ter peso significativo na performace do nosso modelo. Em Aprendizado de Máquina, esta seleção/exclusão de variáveis preditoras se chamada *Feature Selection*. Neste passo, o cientista de dados pode escolher remover ou combinar variáveis afim de melhorar (tunning) seu modelo. Não iremos realizar esta etapa por questão de tempo, mas você mesmo pode fazê-la ao término das tarefas.Vamos analisar melhor a relação das variáveis LSTAT e RM com MEDV? __TAREFA 09__1. Plote um gráfico de dispersão de LSTAT e RM com MEDV.2. Utilize subplots, matplotlib e seaborn
###Code
# Insira sua resposta aqui
###Output
_____no_output_____
###Markdown
Confirmando:- Os preços aumentam à medida que o valor de RM aumenta linearmente. Existem poucos valores discrepantes e os dados parecem estar limitados a 50.- Os preços tendem a diminuir com o aumento do LSTAT. Embora não pareça seguir exatamente uma linha linear.Prosseguindo, vamos analisar a distribuição da variável alvo MEDV. __TAREFA 10__1. Plote a distribuição da variável alvo.2. Use o *seaborn.distplot()*.
###Code
# Insira sua resposta aqui
###Output
_____no_output_____
###Markdown
Podemos verificar que a variável alvo possui um distribuição próxima a uma normal. Contudo, podemos notar a presenção de alguns outliers em torno de 50. É tarefa essencial verificar a distribuição não só da variável alvo, como de todo o conjunto. Vamos discrepantes (outliers) podem confundir nosso modelo diminuindo a performance. Esta também é uma tarefa que pode consumir bastante tempo e você é encorajado à fazê-la. 2.4 Aplicar um modeloApós realizar uma investigação superficial*, vamos prosseguir separando nossos dados de treino e teste. Logo após, vamos treinar nosso modelo.\*Repare que chamamos a etapa anterior de **Análise Exploratória de Dados**, mas a verdade é que nem chegamos perto compreender totalmente o conjunto de dados. O que é normal, pois estamos querendo em entender o processo e não em exaurir o assunto. __TAREFA 11__1. Separe os dados em treino e teste.2. Utilize 80% do dados para treino.**DICA**: Use a função *train_test_split()* do scikit-learn
###Code
# Insira sua resposta aqui
###Output
_____no_output_____
###Markdown
Ótimo! Carregamos nossos dados, verificamos se era necessário alguma limpeza, fizemos uma investigação superficial (AED) e separamos dados de treino e teste.**FINALMENTE** o grande momento de treinar um algoritmo de Aprendizado de Máquina. Neste caso, vamos utilizar a Regressão Linear. Não entrarei em detalhes, mas a regressão linear basicamente gera uma equação que irá descrever a relação estatística entre as variáveis preditoras e a variável alvo. A Regressão Linear encontrará a linha que melhor representa as variáveis de entrada (X) com a variável de saída (Y). __TAREFA 12__1. Use o algoritmo de Regressão Linear para treinar um modelo2. Passe os dados de treino de X e Y para o modelo.
###Code
# Insira sua resposta aqui
###Output
_____no_output_____
###Markdown
Sim, com algumas linhas de código você tem um modelo treinado no scikit-learn. Percebeu que gastamos mais tempo preparando e entendendo os dados do que treinando o modelo? Muito bom não é! É exatamente assim que o cientista de dados gasta a maior parte do seu tempo - preparando e entendendo os dados. 2.5. Interpretar os resultadosVamos verificar a performance do nosso modelo. __TAREFA 13__1. Utilize os dados de treino para prever a variável alvo.2. Como métrica de performance, utilize o score R^2
###Code
# Insira sua resposta aqui
###Output
_____no_output_____
###Markdown
__TAREFA 14__1. Utilize os dados de teste para prever a variável alvo.2. Como métrica de performance, utilize o score R^2
###Code
# Insira sua resposta aqui
###Output
_____no_output_____ |
EcoFOCI_Moorings/ERDDAP_Automated_Tools/ERDDAP_FullMooringRecord.ipynb | ###Markdown
ERDDAP Table/Grid 2 Mooring Site- use Gridded hourly data to build complete mooring site record + will need to split on mooring type (profile vs non) and synthesize multi-deployments (A&B&C)- use table data (from final and preliminary) to build sfc/shallowest and btm/deepest Salinity and Temperature + btm salinity (with collocated temperature) + btm temperature (no salinity... this may be deeper than the salinity records)
###Code
#identify mooring locations
from erddapy import ERDDAP
import pandas as pd
import numpy as np
import xarray as xa
import matplotlib.pyplot as plt
import cmocean
server_url = 'http://ecofoci-field.pmel.noaa.gov:8080/erddap'
###Output
_____no_output_____
###Markdown
Loop through all datasets
###Code
e = ERDDAP(server=server_url)
df = pd.read_csv(e.get_search_url(response='csv', search_for='datasets_Mooring final 1hr -Full'))
print(f"{df['Dataset ID'].count()} datasets to be looped through")
print("Names:", df['Dataset ID'].values)
import warnings
warnings.filterwarnings("ignore")
write_data = True
def erddap_xmlbuild(xmlfilename, dataset_id, MooringName=None, datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/'):
f=open(xmlfilename, "a+")
f.write(f"""
<dataset type="EDDGridFromNcFiles" datasetID="1hr_gridded_{dataset_id}" active="true">
<reloadEveryNMinutes>10080</reloadEveryNMinutes>
<updateEveryNMillis>10000</updateEveryNMillis>
<fileDir>{datahost_path}</fileDir>
<fileNameRegex>{dataset_id}.nc</fileNameRegex>
<recursive>false</recursive>
<pathRegex>.*</pathRegex>
<metadataFrom>last</metadataFrom>
<matchAxisNDigits>20</matchAxisNDigits>
<fileTableInMemory>false</fileTableInMemory>
<accessibleViaFiles>false</accessibleViaFiles>
<!-- sourceAttributes>
</sourceAttributes -->
<addAttributes>
<att name="cdm_data_type">Grid</att>
<att name="Conventions">COARDS, CF-1.6, ACDD-1.3</att>
<att name="infoUrl">https://pmel.noaa.gov/</att>
<att name="institution">NOAA/PMEL - EcoFOCI</att>
<att name="keywords">gridded, active, available, chemistry, chlorophyll, Chlorophyll_Fluorescence, color, concentration, concentration_of_chlorophyll_in_sea_water, data, date, density, depth, dissolved, dissolved o2, downwelling, downwelling_photosynthetic_photon_radiance_in_sea_water, earth, Earth Science > Oceans > Ocean Chemistry > Chlorophyll, Earth Science > Oceans > Ocean Optics > Photosynthetically Active Radiation, Earth Science > Oceans > Ocean Optics > Radiance, Earth Science > Oceans > Salinity/Density > Salinity, latitude, local, longitude, number, O2, ocean, ocean color, oceans, optical, optical properties, optics, oxygen, Oxygen_Concentration, Oxygen_Saturation, PAR, photon, photosynthetic, photosynthetically, practical, pressure, properties, radiance, radiation, salinity, saturation, science, sea, sea_water_practical_salinity, seawater, serial, Serial_Number, source, statistics, temperature, water</att>
<att name="keywords_vocabulary">GCMD Science Keywords</att>
<att name="license">[standard]</att>
<att name="standard_name_vocabulary">CF Standard Name Table v55</att>
<att name="summary">QC'd (final) mooring data from {MooringName}. This dataset has been gridded to 1hr resolution (with pressure left at discrete depths). It uses only 1hr_gridded_datasets. A depth field exisists for each known instrument depth, even if the parameter wasn't measured at that depth. </att>
<att name="title">Gridded Mooring Data from {MooringName} - final</att>
</addAttributes>
<axisVariable>
<sourceName>depth</sourceName>
<destinationName>depth</destinationName>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Location</att>
<att name="long_name">Depth</att>
<att name="standard_name">depth</att>
<att name="units">m</att>
</addAttributes>
</axisVariable>
<axisVariable>
<sourceName>date</sourceName>
<destinationName>time</destinationName>
<!-- sourceAttributes>
<att name="calendar">proleptic_gregorian</att>
<att name="units">days since 1970-01-01 00:00:00</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Time</att>
<att name="long_name">Date</att>
<att name="source_name">date</att>
<att name="standard_name">time</att>
<att name="units">days since 1970-01-01 00:00:00</att>
</addAttributes>
</axisVariable>
<dataVariable>
<sourceName>latitude</sourceName>
<destinationName>latitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">90.0</att>
<att name="colorBarMinimum" type="double">-90.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Latitude</att>
<att name="standard_name">latitude</att>
<att name="units">degrees_north</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>longitude</sourceName>
<destinationName>longitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">180.0</att>
<att name="colorBarMinimum" type="double">-180.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Longitude</att>
<att name="standard_name">longitude</att>
<att name="units">degrees_east</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>temperature</sourceName>
<destinationName>temperature</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="epic_key">T_20</att>
<att name="long_name">Sea temperature in-situ ITS-90 scale</att>
<att name="standard_name">sea_water_temperature</att>
<att name="units">degree_C</att>
<att name="colorBarMaximum" type="double">20.0</att>
<att name="colorBarMinimum" type="double">-2.0</att>
<att name="ioos_category">Temperature</att>
<att name="standard_name">sea_water_temperature</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>pressure</sourceName>
<destinationName>pressure</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Sea Level</att>
<att name="long_name">Sea water pressure, equals 0 at sea-level</att>
<att name="standard_name">sea_water_pressure_due_to_sea_water</att>
<att name="units">dbar</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>salinity</sourceName>
<destinationName>salinity</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">37.0</att>
<att name="colorBarMinimum" type="double">32.0</att>
<att name="ioos_category">Salinity</att>
<att name="long_name">Sea Water Practical Salinity</att>
<att name="standard_name">sea_water_practical_salinity</att>
<att name="units">PSU</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>PAR</sourceName>
<destinationName>PAR</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">70.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Optical Properties</att>
<att name="long_name">Downwelling Photosynthetic Photon Radiance In Sea Water</att>
<att name="standard_name">downwelling_photosynthetic_photon_radiance_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Chlorophyll_Fluorescence</sourceName>
<destinationName>Chlorophyll_Fluorescence</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">30.0</att>
<att name="colorBarMinimum" type="double">0.03</att>
<att name="colorBarScale">Log</att>
<att name="ioos_category">Ocean Color</att>
<att name="long_name">Concentration Of Chlorophyll In Sea Water</att>
<att name="standard_name">concentration_of_chlorophyll_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Saturation</sourceName>
<destinationName>Oxygen_Saturation</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">percent</att>
<att name="standard_name">oxygen_saturation_over_air</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Concentration</sourceName>
<destinationName>Oxygen_Concentration</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">µmole/kg</att>
<att name="standard_name">volume_oxygen_in_solution_volume_of_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Serial_Number</sourceName>
<destinationName>Serial_Number</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Statistics</att>
<att name="long_name">Serial Number</att>
</addAttributes>
</dataVariable>
</dataset>
""")
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
first_dataset=True
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
for groups, dfg in df.groupby('subname'):
if groups in ['bsm','bsm2','bs2','bs2']: #M2
for i,dfs in dfg.iterrows():
print(dfs['Dataset ID'])
e.constraints = None
e.protocol = 'griddap'
e.dataset_id=dfs['Dataset ID']
e.response = 'nc'
ds = e.to_xarray(decode_times=True)
if first_dataset:
xdf = ds
first_dataset = False
else:
xdf = xa.concat([xdf,ds],dim='time')
else:
pass
xdf = xdf.sortby('time')
xdf = xdf.sortby('depth')
xdf = xdf.where(xdf != -9999.0,np.nan)
# two moorings out simultaneously may result in duplicated measurements... improperly trimmed data would do the same - there's also the case of beggining of time period or end.
# in theory one might take the smaller of two temp measurements, but who is to say for any other measurements?
# so for this sake, just take the first occurance
xdf = xdf.sel(time=~xdf.get_index("time").duplicated())
if write_data:
xdf.to_netcdf('Full_BS2_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
erddap_xmlbuild('1hr_gridded_Full_bs2_record'+'.xml', 'Full_bs2_record', MooringName='bs2', datahost_path=datahost_path)
xdf.temperature.plot(yincrease=False,vmax=15,vmin=-2,figsize=(12,3),cmap=cmocean.cm.thermal)
xdf.salinity.plot(yincrease=False,vmax=32.5,vmin=31.5,figsize=(12,3),cmap=cmocean.cm.haline)
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
for groups, dfg in df.groupby('subname'):
if groups in ['bsm','bsm2','bs2','bs2']: #M2
continue #build seperately due to multinames
elif dfg['Dataset ID'].count() == 1:
continue #no reason to build dataset, only one deployment
else:
first_dataset=True
for i,dfs in dfg.iterrows():
print(dfs['Dataset ID'])
e.constraints = None
e.protocol = 'griddap'
e.dataset_id=dfs['Dataset ID']
e.response = 'nc'
try:
ds = e.to_xarray(decode_times=True)
if first_dataset:
xdf = ds
first_dataset = False
else:
xdf = xa.concat([xdf,ds],dim='time')
except:
pass
xdf = xdf.sortby('time')
xdf = xdf.sortby('depth')
xdf = xdf.where(xdf != -9999.0,np.nan)
xdf = xdf.where(xdf['depth'] <5000).dropna(dim='depth',how='all')
# two moorings out simultaneously may result in duplicated measurements... improperly trimmed data would do the same - there's also the case of beggining of time period or end.
# in theory one might take the smaller of two temp measurements, but who is to say for any other measurements?
# so for this sake, just take the first occurance
xdf = xdf.sel(time=~xdf.get_index("time").duplicated())
if write_data:
xdf.to_netcdf('Full_'+groups+'_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
erddap_xmlbuild('1hr_gridded_Full_'+groups+'_record'+'.xml', 'Full_'+groups+'_record', MooringName=groups, datahost_path=datahost_path)
###Output
1hr_gridded_datasets_Mooring_03amp1a_final
1hr_gridded_datasets_Mooring_03amp1b_final
1hr_gridded_datasets_Mooring_05amp1a_final
1hr_gridded_datasets_Mooring_06amp1a_final
1hr_gridded_datasets_Mooring_08amp1a_final
1hr_gridded_datasets_Mooring_03amp2a_final
1hr_gridded_datasets_Mooring_03amp2b_final
1hr_gridded_datasets_Mooring_05amp2a_final
1hr_gridded_datasets_Mooring_06amp2a_final
1hr_gridded_datasets_Mooring_07amp2a_final
1hr_gridded_datasets_Mooring_08amp2a_final
1hr_gridded_datasets_Mooring_03amp3a_final
1hr_gridded_datasets_Mooring_03amp3b_final
1hr_gridded_datasets_Mooring_05amp3a_final
1hr_gridded_datasets_Mooring_06amp3a_final
1hr_gridded_datasets_Mooring_07amp3a_final
1hr_gridded_datasets_Mooring_08amp3a_final
1hr_gridded_datasets_Mooring_03amp4a_final
1hr_gridded_datasets_Mooring_03amp4b_final
1hr_gridded_datasets_Mooring_05amp4a_final
1hr_gridded_datasets_Mooring_06amp4a_final
1hr_gridded_datasets_Mooring_07amp4a_final
1hr_gridded_datasets_Mooring_08amp4a_final
1hr_gridded_datasets_Mooring_00bs4s_final
1hr_gridded_datasets_Mooring_00bs4w_final
1hr_gridded_datasets_Mooring_01bs4a_final
1hr_gridded_datasets_Mooring_01bs4b_final
1hr_gridded_datasets_Mooring_02bs4a_final
1hr_gridded_datasets_Mooring_02bs4b_final
1hr_gridded_datasets_Mooring_03bs4a_final
1hr_gridded_datasets_Mooring_03bs4b_final
1hr_gridded_datasets_Mooring_04bs4a_final
1hr_gridded_datasets_Mooring_04bs4b_final
1hr_gridded_datasets_Mooring_05bs4a_final
1hr_gridded_datasets_Mooring_05bs4b_final
1hr_gridded_datasets_Mooring_06bs4a_final
1hr_gridded_datasets_Mooring_06bs4b_final
1hr_gridded_datasets_Mooring_07bs4a_final
1hr_gridded_datasets_Mooring_07bs4b_final
1hr_gridded_datasets_Mooring_08bs4a_final
1hr_gridded_datasets_Mooring_08bs4b_final
1hr_gridded_datasets_Mooring_10bs4b_final
1hr_gridded_datasets_Mooring_11bs4a_final
1hr_gridded_datasets_Mooring_12bs4a_final
1hr_gridded_datasets_Mooring_12bs4b_final
1hr_gridded_datasets_Mooring_13bs4a_final
1hr_gridded_datasets_Mooring_13bs4b_final
1hr_gridded_datasets_Mooring_14bs4b_final
1hr_gridded_datasets_Mooring_15bs4b_final
1hr_gridded_datasets_Mooring_16bs4b_final
1hr_gridded_datasets_Mooring_17bs4b_final
1hr_gridded_datasets_Mooring_18bs4b_final
1hr_gridded_datasets_Mooring_05bs5a_final
1hr_gridded_datasets_Mooring_05bs5b_final
1hr_gridded_datasets_Mooring_06bs5a_final
1hr_gridded_datasets_Mooring_06bs5b_final
1hr_gridded_datasets_Mooring_07bs5a_final
1hr_gridded_datasets_Mooring_07bs5b_final
1hr_gridded_datasets_Mooring_08bs5b_final
1hr_gridded_datasets_Mooring_09bs5a_final
1hr_gridded_datasets_Mooring_09bs5b_final
1hr_gridded_datasets_Mooring_10bs5a_final
1hr_gridded_datasets_Mooring_10bs5b_final
1hr_gridded_datasets_Mooring_11bs5a_final
1hr_gridded_datasets_Mooring_11bs5b_final
1hr_gridded_datasets_Mooring_12bs5a_final
1hr_gridded_datasets_Mooring_13bs5a_final
1hr_gridded_datasets_Mooring_14bs5a_final
1hr_gridded_datasets_Mooring_15bs5a_final
1hr_gridded_datasets_Mooring_16bs5a_final
1hr_gridded_datasets_Mooring_17bs5a_final
1hr_gridded_datasets_Mooring_18bs5a_final
1hr_gridded_datasets_Mooring_05bs8a_final
1hr_gridded_datasets_Mooring_05bs8b_final
1hr_gridded_datasets_Mooring_06bs8a_final
1hr_gridded_datasets_Mooring_07bs8a_final
1hr_gridded_datasets_Mooring_08bs8a_final
1hr_gridded_datasets_Mooring_09bs8a_final
1hr_gridded_datasets_Mooring_10bs8a_final
1hr_gridded_datasets_Mooring_11bs8a_final
1hr_gridded_datasets_Mooring_12bs8a_final
1hr_gridded_datasets_Mooring_13bs8a_final
1hr_gridded_datasets_Mooring_14bs8a_final
1hr_gridded_datasets_Mooring_15bs8a_final
1hr_gridded_datasets_Mooring_16bs8a_final
1hr_gridded_datasets_Mooring_17bs8a_final
1hr_gridded_datasets_Mooring_18bs8a_final
1hr_gridded_datasets_Mooring_95bsm3a_final
1hr_gridded_datasets_Mooring_96bsm3a_final
1hr_gridded_datasets_Mooring_97bsm3a_final
1hr_gridded_datasets_Mooring_00bsp2s_final
1hr_gridded_datasets_Mooring_01bsp2b_final
1hr_gridded_datasets_Mooring_01bsp2s_final
1hr_gridded_datasets_Mooring_02bsp2a_final
1hr_gridded_datasets_Mooring_02bsp2b_final
1hr_gridded_datasets_Mooring_02bsp2c_final
1hr_gridded_datasets_Mooring_03bsp2a_final
1hr_gridded_datasets_Mooring_03bsp2b_final
1hr_gridded_datasets_Mooring_03bsp2c_final
1hr_gridded_datasets_Mooring_04bsp2a_final
1hr_gridded_datasets_Mooring_04bsp2b_final
1hr_gridded_datasets_Mooring_05bsp2a_final
1hr_gridded_datasets_Mooring_05bsp2b_final
1hr_gridded_datasets_Mooring_06bsp2a_final
1hr_gridded_datasets_Mooring_06bsp2d_final
1hr_gridded_datasets_Mooring_08bsp2b_final
1hr_gridded_datasets_Mooring_09bsp2a_final
1hr_gridded_datasets_Mooring_09bsp2b_final
1hr_gridded_datasets_Mooring_10bsp2a_final
1hr_gridded_datasets_Mooring_10bsp2b_final
1hr_gridded_datasets_Mooring_11bsp2a_final
1hr_gridded_datasets_Mooring_11bsp2b_final
1hr_gridded_datasets_Mooring_12bsp2a_final
1hr_gridded_datasets_Mooring_12bsp2b_final
1hr_gridded_datasets_Mooring_13bsp2a_final
1hr_gridded_datasets_Mooring_13bsp2b_final
1hr_gridded_datasets_Mooring_14bsp2a_final
1hr_gridded_datasets_Mooring_15bsp2a_final
1hr_gridded_datasets_Mooring_16bsp2a_final
1hr_gridded_datasets_Mooring_16bsp2b_final
1hr_gridded_datasets_Mooring_17bsp2a_final
1hr_gridded_datasets_Mooring_17bsp2b_final
1hr_gridded_datasets_Mooring_18bsp2a_final
1hr_gridded_datasets_Mooring_18bsp2b_final
###Markdown
Now do similar but only for deepest T/S**Deepest Temp/Sal**- M2- M4- M5- M8- C2**Shallowest Temp**- M2- M4- M5- M8**Notes:**- read in data, drop na's, determine max and min depths- keep depth and value so it can be masked if its too shallow by the enduser
###Code
#read from recently created grouped data and interpolate/extrapolate to fixed bottom depth or
def btm_TS_timeseries(df,mooring_id=[None],parameter='salinity',max_depth=50,verbose=True):
"""
"""
#xml related
depth_threshold=50
first_dataset=True
#M2
for groups, dfg in df.groupby('subname'):
if groups in mooring_id: #M2
for i,dfs in dfg.iterrows():
if verbose:
print(dfs['Dataset ID'])
e.constraints = None
e.protocol = 'griddap'
e.dataset_id=dfs['Dataset ID']
e.response = 'nc'
try:
ds = e.to_xarray(decode_times=True)
btmsaldepth = ds[parameter].dropna(dim='depth', how='all').depth.max()
if btmsaldepth < max_depth:
continue
if first_dataset:
xdf = ds.sel(depth=btmsaldepth)[['temperature','salinity']]
first_dataset = False
else:
xdf = xa.concat([xdf,ds.sel(depth=btmsaldepth)[['temperature','salinity']]],dim='time')
except:
pass
xdf = xdf.sortby('time')
xdf = xdf.where(xdf != -9999.0,np.nan)
return xdf
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bsm','bsm2','bs2'],parameter='salinity',max_depth=50,verbose=False)
fig, ax = plt.subplots(3,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.salinity.plot(ax=ax[1])
xdf.depth.plot(marker='+',ax=ax[2])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bsm','bsm2','bs2'],parameter='temperature',max_depth=50,verbose=False)
xdf.to_netcdf('bs2_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bs4'],parameter='temperature',max_depth=50,verbose=False)
xdf.to_netcdf('bs4_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bs5'],parameter='temperature',max_depth=50,verbose=False)
xdf.to_netcdf('bs5_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1900-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bs8'],parameter='temperature',max_depth=50,verbose=False)
xdf = xdf.resample(time='1H').median()
xdf.to_netcdf('bs8_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
###Output
_____no_output_____
###Markdown
ERDDAP Table/Grid 2 Mooring Site- use Gridded hourly data to build complete mooring site record + will need to split on mooring type (profile vs non) and synthesize multi-deployments (A&B&C)- use table data (from final and preliminary) to build sfc/shallowest and btm/deepest Salinity and Temperature + btm salinity (with collocated temperature) + btm temperature (no salinity... this may be deeper than the salinity records)
###Code
#identify mooring locations
from erddapy import ERDDAP
import pandas as pd
import numpy as np
import xarray as xa
import matplotlib.pyplot as plt
import cmocean
server_url = 'http://ecofoci-field.pmel.noaa.gov:8080/erddap'
###Output
_____no_output_____
###Markdown
Loop through all datasets
###Code
e = ERDDAP(server=server_url)
df = pd.read_csv(e.get_search_url(response='csv', search_for='datasets_Mooring AND final AND 1hr_gridded -Full'))
print(f"{df['Dataset ID'].count()} datasets to be looped through")
print("Names:", df['Dataset ID'].values)
import warnings
warnings.filterwarnings("ignore")
def erddap_xmlbuild(xmlfilename, dataset_id, MooringName=None, datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/'):
f=open(xmlfilename, "a+")
f.write(f"""
<dataset type="EDDGridFromNcFiles" datasetID="1hr_gridded_{dataset_id}" active="true">
<reloadEveryNMinutes>10080</reloadEveryNMinutes>
<updateEveryNMillis>10000</updateEveryNMillis>
<fileDir>{datahost_path}</fileDir>
<fileNameRegex>{dataset_id}.nc</fileNameRegex>
<recursive>false</recursive>
<pathRegex>.*</pathRegex>
<metadataFrom>last</metadataFrom>
<matchAxisNDigits>20</matchAxisNDigits>
<fileTableInMemory>false</fileTableInMemory>
<accessibleViaFiles>false</accessibleViaFiles>
<!-- sourceAttributes>
</sourceAttributes -->
<addAttributes>
<att name="cdm_data_type">Grid</att>
<att name="Conventions">COARDS, CF-1.6, ACDD-1.3</att>
<att name="infoUrl">https://pmel.noaa.gov/</att>
<att name="institution">NOAA/PMEL - EcoFOCI</att>
<att name="keywords">gridded, active, available, chemistry, chlorophyll, Chlorophyll_Fluorescence, color, concentration, concentration_of_chlorophyll_in_sea_water, data, date, density, depth, dissolved, dissolved o2, downwelling, downwelling_photosynthetic_photon_radiance_in_sea_water, earth, Earth Science > Oceans > Ocean Chemistry > Chlorophyll, Earth Science > Oceans > Ocean Optics > Photosynthetically Active Radiation, Earth Science > Oceans > Ocean Optics > Radiance, Earth Science > Oceans > Salinity/Density > Salinity, latitude, local, longitude, number, O2, ocean, ocean color, oceans, optical, optical properties, optics, oxygen, Oxygen_Concentration, Oxygen_Saturation, PAR, photon, photosynthetic, photosynthetically, practical, pressure, properties, radiance, radiation, salinity, saturation, science, sea, sea_water_practical_salinity, seawater, serial, Serial_Number, source, statistics, temperature, water</att>
<att name="keywords_vocabulary">GCMD Science Keywords</att>
<att name="license">[standard]</att>
<att name="standard_name_vocabulary">CF Standard Name Table v55</att>
<att name="summary">QC'd (final) mooring data from {MooringName}. This dataset has been gridded to 1hr resolution (with pressure left at discrete depths). It uses only 1hr_gridded_datasets. A depth field exisists for each known instrument depth, even if the parameter wasn't measured at that depth. </att>
<att name="title">Gridded Mooring Data from {MooringName} - final</att>
</addAttributes>
<axisVariable>
<sourceName>depth</sourceName>
<destinationName>depth</destinationName>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Location</att>
<att name="long_name">Depth</att>
<att name="standard_name">depth</att>
<att name="units">m</att>
</addAttributes>
</axisVariable>
<axisVariable>
<sourceName>date</sourceName>
<destinationName>time</destinationName>
<!-- sourceAttributes>
<att name="calendar">proleptic_gregorian</att>
<att name="units">days since 1900-01-01 00:00:00</att>
</sourceAttributes -->
<addAttributes>
<att name="ioos_category">Time</att>
<att name="long_name">Date</att>
<att name="source_name">date</att>
<att name="standard_name">time</att>
<att name="units">days since 1900-01-01 00:00:00</att>
</addAttributes>
</axisVariable>
<dataVariable>
<sourceName>latitude</sourceName>
<destinationName>latitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">90.0</att>
<att name="colorBarMinimum" type="double">-90.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Latitude</att>
<att name="standard_name">latitude</att>
<att name="units">degrees_north</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>longitude</sourceName>
<destinationName>longitude</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">180.0</att>
<att name="colorBarMinimum" type="double">-180.0</att>
<att name="ioos_category">Location</att>
<att name="long_name">Longitude</att>
<att name="standard_name">longitude</att>
<att name="units">degrees_east</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>temperature</sourceName>
<destinationName>temperature</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="epic_key">T_20</att>
<att name="long_name">Sea temperature in-situ ITS-90 scale</att>
<att name="standard_name">sea_water_temperature</att>
<att name="units">degree_C</att>
<att name="colorBarMaximum" type="double">20.0</att>
<att name="colorBarMinimum" type="double">-2.0</att>
<att name="ioos_category">Temperature</att>
<att name="standard_name">sea_water_temperature</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>pressure</sourceName>
<destinationName>pressure</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Sea Level</att>
<att name="long_name">Sea water pressure, equals 0 at sea-level</att>
<att name="standard_name">sea_water_pressure_due_to_sea_water</att>
<att name="units">dbar</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>salinity</sourceName>
<destinationName>salinity</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">37.0</att>
<att name="colorBarMinimum" type="double">32.0</att>
<att name="ioos_category">Salinity</att>
<att name="long_name">Sea Water Practical Salinity</att>
<att name="standard_name">sea_water_practical_salinity</att>
<att name="units">PSU</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>PAR</sourceName>
<destinationName>PAR</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">70.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Optical Properties</att>
<att name="long_name">Downwelling Photosynthetic Photon Radiance In Sea Water</att>
<att name="standard_name">downwelling_photosynthetic_photon_radiance_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Chlorophyll_Fluorescence</sourceName>
<destinationName>Chlorophyll_Fluorescence</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">30.0</att>
<att name="colorBarMinimum" type="double">0.03</att>
<att name="colorBarScale">Log</att>
<att name="ioos_category">Ocean Color</att>
<att name="long_name">Concentration Of Chlorophyll In Sea Water</att>
<att name="standard_name">concentration_of_chlorophyll_in_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Saturation</sourceName>
<destinationName>Oxygen_Saturation</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">percent</att>
<att name="standard_name">oxygen_saturation_over_air</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Oxygen_Concentration</sourceName>
<destinationName>Oxygen_Concentration</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="missing_value" type="float">-9999.0</att>
<att name="_FillValue" type="float">1.0e35</att>
<att name="ioos_category">Dissolved O2</att>
<att name="units">µmole/kg</att>
<att name="standard_name">volume_oxygen_in_solution_volume_of_sea_water</att>
</addAttributes>
</dataVariable>
<dataVariable>
<sourceName>Serial_Number</sourceName>
<destinationName>Serial_Number</destinationName>
<dataType>double</dataType>
<!-- sourceAttributes>
<att name="_FillValue" type="double">NaN</att>
</sourceAttributes -->
<addAttributes>
<att name="colorBarMaximum" type="double">100.0</att>
<att name="colorBarMinimum" type="double">0.0</att>
<att name="ioos_category">Statistics</att>
<att name="long_name">Serial Number</att>
</addAttributes>
</dataVariable>
</dataset>
""")
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
first_dataset=True
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
for groups, dfg in df.groupby('subname'):
if groups in ['bsm','bsm2','bs2','bs2']: #M2
for i,dfs in dfg.iterrows():
print(dfs['Dataset ID'])
e.constraints = None
e.protocol = 'griddap'
e.dataset_id=dfs['Dataset ID']
e.response = 'nc'
ds = e.to_xarray(decode_times=True)
if first_dataset:
xdf = ds
first_dataset = False
else:
xdf = xa.concat([xdf,ds],dim='time')
else:
pass
xdf = xdf.sortby('time')
xdf = xdf.sortby('depth')
xdf = xdf.where(xdf != -9999.0,np.nan)
xdf.to_netcdf('Full_BS2_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
erddap_xmlbuild('1hr_gridded_Full_bs2_record'+'.xml', 'Full_bs2_record', MooringName='bs2', datahost_path=datahost_path)
xdf.temperature.plot(yincrease=False,vmax=15,vmin=-2,figsize=(12,3),cmap=cmocean.cm.thermal)
xdf.salinity.plot(yincrease=False,vmax=32.5,vmin=31.5,figsize=(12,3),cmap=cmocean.cm.haline)
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
for groups, dfg in df.groupby('subname'):
if groups in ['bsm','bsm2','bs2','bs2']: #M2
continue #build seperately due to multinames
elif dfg['Dataset ID'].count() == 1:
continue #no reason to build dataset, only one deployment
else:
first_dataset=True
for i,dfs in dfg.iterrows():
print(dfs['Dataset ID'])
e.constraints = None
e.protocol = 'griddap'
e.dataset_id=dfs['Dataset ID']
e.response = 'nc'
try:
ds = e.to_xarray(decode_times=True)
if first_dataset:
xdf = ds
first_dataset = False
else:
xdf = xa.concat([xdf,ds],dim='time')
except:
pass
xdf = xdf.sortby('time')
xdf = xdf.sortby('depth')
xdf = xdf.where(xdf != -9999.0,np.nan)
xdf = xdf.where(xdf['depth'] <5000).dropna(dim='depth',how='all')
xdf.to_netcdf('Full_'+groups+'_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
erddap_xmlbuild('1hr_gridded_Full_'+groups+'_record'+'.xml', 'Full_'+groups+'_record', MooringName=groups, datahost_path=datahost_path)
###Output
1hr_gridded_datasets_Mooring_03amp1a_final
1hr_gridded_datasets_Mooring_03amp1b_final
1hr_gridded_datasets_Mooring_05amp1a_final
1hr_gridded_datasets_Mooring_06amp1a_final
1hr_gridded_datasets_Mooring_08amp1a_final
1hr_gridded_datasets_Mooring_03amp2a_final
1hr_gridded_datasets_Mooring_03amp2b_final
1hr_gridded_datasets_Mooring_05amp2a_final
1hr_gridded_datasets_Mooring_06amp2a_final
1hr_gridded_datasets_Mooring_07amp2a_final
1hr_gridded_datasets_Mooring_08amp2a_final
1hr_gridded_datasets_Mooring_03amp3a_final
1hr_gridded_datasets_Mooring_03amp3b_final
1hr_gridded_datasets_Mooring_05amp3a_final
1hr_gridded_datasets_Mooring_06amp3a_final
1hr_gridded_datasets_Mooring_07amp3a_final
1hr_gridded_datasets_Mooring_08amp3a_final
1hr_gridded_datasets_Mooring_03amp4a_final
1hr_gridded_datasets_Mooring_03amp4b_final
1hr_gridded_datasets_Mooring_05amp4a_final
1hr_gridded_datasets_Mooring_06amp4a_final
1hr_gridded_datasets_Mooring_07amp4a_final
1hr_gridded_datasets_Mooring_08amp4a_final
1hr_gridded_datasets_Mooring_00bs4s_final
1hr_gridded_datasets_Mooring_00bs4w_final
1hr_gridded_datasets_Mooring_01bs4a_final
1hr_gridded_datasets_Mooring_01bs4b_final
1hr_gridded_datasets_Mooring_02bs4a_final
1hr_gridded_datasets_Mooring_02bs4b_final
1hr_gridded_datasets_Mooring_03bs4a_final
1hr_gridded_datasets_Mooring_03bs4b_final
1hr_gridded_datasets_Mooring_04bs4a_final
1hr_gridded_datasets_Mooring_04bs4b_final
1hr_gridded_datasets_Mooring_05bs4a_final
1hr_gridded_datasets_Mooring_05bs4b_final
1hr_gridded_datasets_Mooring_06bs4a_final
1hr_gridded_datasets_Mooring_06bs4b_final
1hr_gridded_datasets_Mooring_07bs4a_final
1hr_gridded_datasets_Mooring_07bs4b_final
1hr_gridded_datasets_Mooring_08bs4a_final
1hr_gridded_datasets_Mooring_08bs4b_final
1hr_gridded_datasets_Mooring_10bs4b_final
1hr_gridded_datasets_Mooring_11bs4a_final
1hr_gridded_datasets_Mooring_12bs4a_final
1hr_gridded_datasets_Mooring_12bs4b_final
1hr_gridded_datasets_Mooring_13bs4a_final
1hr_gridded_datasets_Mooring_13bs4b_final
1hr_gridded_datasets_Mooring_14bs4b_final
1hr_gridded_datasets_Mooring_15bs4b_final
1hr_gridded_datasets_Mooring_16bs4b_final
1hr_gridded_datasets_Mooring_17bs4b_final
1hr_gridded_datasets_Mooring_18bs4b_final
1hr_gridded_datasets_Mooring_05bs5a_final
1hr_gridded_datasets_Mooring_05bs5b_final
1hr_gridded_datasets_Mooring_06bs5a_final
1hr_gridded_datasets_Mooring_06bs5b_final
1hr_gridded_datasets_Mooring_07bs5a_final
1hr_gridded_datasets_Mooring_07bs5b_final
1hr_gridded_datasets_Mooring_08bs5b_final
1hr_gridded_datasets_Mooring_09bs5a_final
1hr_gridded_datasets_Mooring_09bs5b_final
1hr_gridded_datasets_Mooring_10bs5a_final
1hr_gridded_datasets_Mooring_10bs5b_final
1hr_gridded_datasets_Mooring_11bs5a_final
1hr_gridded_datasets_Mooring_11bs5b_final
1hr_gridded_datasets_Mooring_12bs5a_final
1hr_gridded_datasets_Mooring_13bs5a_final
1hr_gridded_datasets_Mooring_14bs5a_final
1hr_gridded_datasets_Mooring_15bs5a_final
1hr_gridded_datasets_Mooring_16bs5a_final
1hr_gridded_datasets_Mooring_17bs5a_final
1hr_gridded_datasets_Mooring_18bs5a_final
1hr_gridded_datasets_Mooring_05bs8a_final
1hr_gridded_datasets_Mooring_05bs8b_final
1hr_gridded_datasets_Mooring_06bs8a_final
1hr_gridded_datasets_Mooring_07bs8a_final
1hr_gridded_datasets_Mooring_08bs8a_final
1hr_gridded_datasets_Mooring_09bs8a_final
1hr_gridded_datasets_Mooring_10bs8a_final
1hr_gridded_datasets_Mooring_11bs8a_final
1hr_gridded_datasets_Mooring_12bs8a_final
1hr_gridded_datasets_Mooring_13bs8a_final
1hr_gridded_datasets_Mooring_14bs8a_final
1hr_gridded_datasets_Mooring_15bs8a_final
1hr_gridded_datasets_Mooring_16bs8a_final
1hr_gridded_datasets_Mooring_17bs8a_final
1hr_gridded_datasets_Mooring_18bs8a_final
1hr_gridded_datasets_Mooring_95bsm3a_final
1hr_gridded_datasets_Mooring_96bsm3a_final
1hr_gridded_datasets_Mooring_97bsm3a_final
1hr_gridded_datasets_Mooring_00bsp2s_final
1hr_gridded_datasets_Mooring_01bsp2b_final
1hr_gridded_datasets_Mooring_01bsp2s_final
1hr_gridded_datasets_Mooring_02bsp2a_final
1hr_gridded_datasets_Mooring_02bsp2b_final
1hr_gridded_datasets_Mooring_02bsp2c_final
1hr_gridded_datasets_Mooring_03bsp2a_final
1hr_gridded_datasets_Mooring_03bsp2b_final
1hr_gridded_datasets_Mooring_03bsp2c_final
1hr_gridded_datasets_Mooring_04bsp2a_final
1hr_gridded_datasets_Mooring_04bsp2b_final
1hr_gridded_datasets_Mooring_05bsp2a_final
1hr_gridded_datasets_Mooring_05bsp2b_final
1hr_gridded_datasets_Mooring_06bsp2a_final
1hr_gridded_datasets_Mooring_06bsp2d_final
1hr_gridded_datasets_Mooring_08bsp2b_final
1hr_gridded_datasets_Mooring_09bsp2a_final
1hr_gridded_datasets_Mooring_09bsp2b_final
1hr_gridded_datasets_Mooring_10bsp2a_final
1hr_gridded_datasets_Mooring_10bsp2b_final
1hr_gridded_datasets_Mooring_11bsp2a_final
1hr_gridded_datasets_Mooring_11bsp2b_final
1hr_gridded_datasets_Mooring_12bsp2a_final
1hr_gridded_datasets_Mooring_12bsp2b_final
1hr_gridded_datasets_Mooring_13bsp2a_final
1hr_gridded_datasets_Mooring_13bsp2b_final
1hr_gridded_datasets_Mooring_14bsp2a_final
1hr_gridded_datasets_Mooring_15bsp2a_final
1hr_gridded_datasets_Mooring_16bsp2a_final
1hr_gridded_datasets_Mooring_16bsp2b_final
1hr_gridded_datasets_Mooring_17bsp2a_final
1hr_gridded_datasets_Mooring_17bsp2b_final
1hr_gridded_datasets_Mooring_18bsp2a_final
1hr_gridded_datasets_Mooring_18bsp2b_final
1hr_gridded_datasets_Mooring_00bsp4s_final
1hr_gridded_datasets_Mooring_06bsp4a_final
1hr_gridded_datasets_Mooring_07bsp4a_final
1hr_gridded_datasets_Mooring_07bsp4b_final
1hr_gridded_datasets_Mooring_08bsp4a_final
1hr_gridded_datasets_Mooring_08bsp4b_final
1hr_gridded_datasets_Mooring_09bsp4a_final
1hr_gridded_datasets_Mooring_11bsp4a_final
1hr_gridded_datasets_Mooring_12bsp4a_final
1hr_gridded_datasets_Mooring_12bsp4b_final
1hr_gridded_datasets_Mooring_13bsp4a_final
1hr_gridded_datasets_Mooring_15bsp4a_final
1hr_gridded_datasets_Mooring_17bsp4a_final
1hr_gridded_datasets_Mooring_18bsp4a_final
1hr_gridded_datasets_Mooring_04bsp5a_final
1hr_gridded_datasets_Mooring_05bsp5b_final
1hr_gridded_datasets_Mooring_06bsp5b_final
1hr_gridded_datasets_Mooring_07bsp5a_final
1hr_gridded_datasets_Mooring_07bsp5b_final
1hr_gridded_datasets_Mooring_08bsp5b_final
1hr_gridded_datasets_Mooring_09bsp5a_final
1hr_gridded_datasets_Mooring_09bsp5b_final
1hr_gridded_datasets_Mooring_10bsp5a_final
1hr_gridded_datasets_Mooring_11bsp5a_final
1hr_gridded_datasets_Mooring_11bsp5b_final
1hr_gridded_datasets_Mooring_12bsp5a_final
1hr_gridded_datasets_Mooring_14bsp5a_final
1hr_gridded_datasets_Mooring_16bsp5a_final
1hr_gridded_datasets_Mooring_17bsp5a_final
1hr_gridded_datasets_Mooring_18bsp5a_final
1hr_gridded_datasets_Mooring_03bsp6a_final
1hr_gridded_datasets_Mooring_14bsp6a_final
1hr_gridded_datasets_Mooring_05bsp8a_final
1hr_gridded_datasets_Mooring_05bsp8b_final
1hr_gridded_datasets_Mooring_06bsp8a_final
1hr_gridded_datasets_Mooring_07bsp8a_final
1hr_gridded_datasets_Mooring_08bsp8a_final
1hr_gridded_datasets_Mooring_10bsp8a_final
1hr_gridded_datasets_Mooring_11bsp8a_final
1hr_gridded_datasets_Mooring_13bsp8a_final
1hr_gridded_datasets_Mooring_14bsp8a_final
1hr_gridded_datasets_Mooring_15bsp8a_final
1hr_gridded_datasets_Mooring_16bsp8a_final
1hr_gridded_datasets_Mooring_17bsp8a_final
1hr_gridded_datasets_Mooring_18bsp8a_final
1hr_gridded_datasets_Mooring_08bsp9a_final
1hr_gridded_datasets_Mooring_09bsp9a_final
1hr_gridded_datasets_Mooring_06bst2a_final
1hr_gridded_datasets_Mooring_07bst2a_final
1hr_gridded_datasets_Mooring_08bst2a_final
1hr_gridded_datasets_Mooring_09bst2a_final
1hr_gridded_datasets_Mooring_10bst2a_final
1hr_gridded_datasets_Mooring_08bsv8a_final
1hr_gridded_datasets_Mooring_09bsv8a_final
1hr_gridded_datasets_Mooring_00cb1a_final
1hr_gridded_datasets_Mooring_03cb1a_final
1hr_gridded_datasets_Mooring_03cb1b_final
1hr_gridded_datasets_Mooring_04cb1a_final
1hr_gridded_datasets_Mooring_05cb1a_final
1hr_gridded_datasets_Mooring_06cb1a_final
1hr_gridded_datasets_Mooring_07cb1a_final
1hr_gridded_datasets_Mooring_08cb1a_final
1hr_gridded_datasets_Mooring_09cb1a_final
1hr_gridded_datasets_Mooring_10cb1a_final
1hr_gridded_datasets_Mooring_11cb1a_final
1hr_gridded_datasets_Mooring_11cb1b_final
1hr_gridded_datasets_Mooring_12cb1a_final
1hr_gridded_datasets_Mooring_12cb1b_final
1hr_gridded_datasets_Mooring_13cb1a_final
1hr_gridded_datasets_Mooring_15cb1a_final
1hr_gridded_datasets_Mooring_16cb1a_final
1hr_gridded_datasets_Mooring_17cb1a_final
1hr_gridded_datasets_Mooring_10ckip1a_final
1hr_gridded_datasets_Mooring_11ckip1a_final
1hr_gridded_datasets_Mooring_17ckip1a_final
1hr_gridded_datasets_Mooring_18ckip1a_final
1hr_gridded_datasets_Mooring_10ckip2a_final
1hr_gridded_datasets_Mooring_11ckip2a_final
1hr_gridded_datasets_Mooring_12ckip2a_final
1hr_gridded_datasets_Mooring_16ckip2a_final
1hr_gridded_datasets_Mooring_17ckip2a_final
1hr_gridded_datasets_Mooring_18ckip2a_final
1hr_gridded_datasets_Mooring_10ckip3a_final
1hr_gridded_datasets_Mooring_11ckip3a_final
1hr_gridded_datasets_Mooring_17ckip3a_final
1hr_gridded_datasets_Mooring_18ckip3a_final
1hr_gridded_datasets_Mooring_10ckp1a_final
1hr_gridded_datasets_Mooring_11ckp1a_final
1hr_gridded_datasets_Mooring_13ckp1a_final
1hr_gridded_datasets_Mooring_14ckp1a_final
1hr_gridded_datasets_Mooring_15ckp1a_final
1hr_gridded_datasets_Mooring_16ckp1a_final
1hr_gridded_datasets_Mooring_17ckp1a_final
1hr_gridded_datasets_Mooring_18ckp1a_final
1hr_gridded_datasets_Mooring_16ckp10a_final
1hr_gridded_datasets_Mooring_17ckp10a_final
1hr_gridded_datasets_Mooring_18ckp10a_final
1hr_gridded_datasets_Mooring_16ckp11a_final
1hr_gridded_datasets_Mooring_17ckp11a_final
1hr_gridded_datasets_Mooring_18ckp11a_final
1hr_gridded_datasets_Mooring_16ckp12a_final
1hr_gridded_datasets_Mooring_17ckp12a_final
1hr_gridded_datasets_Mooring_18ckp12a_final
1hr_gridded_datasets_Mooring_10ckp2a_final
1hr_gridded_datasets_Mooring_11ckp2a_final
1hr_gridded_datasets_Mooring_12ckp2a_final
1hr_gridded_datasets_Mooring_13ckp2a_final
1hr_gridded_datasets_Mooring_14ckp2a_final
1hr_gridded_datasets_Mooring_15ckp2a_final
1hr_gridded_datasets_Mooring_16ckp2a_final
1hr_gridded_datasets_Mooring_17ckp2a_final
1hr_gridded_datasets_Mooring_18ckp2a_final
1hr_gridded_datasets_Mooring_19ckp2a_final
1hr_gridded_datasets_Mooring_10ckp3a_final
1hr_gridded_datasets_Mooring_11ckp3a_final
1hr_gridded_datasets_Mooring_16ckp3a_final
1hr_gridded_datasets_Mooring_17ckp3a_final
1hr_gridded_datasets_Mooring_18ckp3a_final
1hr_gridded_datasets_Mooring_12ckp4a_final
1hr_gridded_datasets_Mooring_13ckp4a_final
1hr_gridded_datasets_Mooring_14ckp4a_final
1hr_gridded_datasets_Mooring_15ckp4a_final
1hr_gridded_datasets_Mooring_16ckp4a_final
1hr_gridded_datasets_Mooring_17ckp4a_final
1hr_gridded_datasets_Mooring_18ckp4a_final
1hr_gridded_datasets_Mooring_19ckp4a_final
1hr_gridded_datasets_Mooring_13ckp5a_final
1hr_gridded_datasets_Mooring_14ckp5a_final
1hr_gridded_datasets_Mooring_16ckp5a_final
1hr_gridded_datasets_Mooring_17ckp5a_final
1hr_gridded_datasets_Mooring_18ckp5a_final
1hr_gridded_datasets_Mooring_13ckp6a_final
1hr_gridded_datasets_Mooring_14ckp6a_final
1hr_gridded_datasets_Mooring_13ckp7a_final
1hr_gridded_datasets_Mooring_14ckp7a_final
1hr_gridded_datasets_Mooring_14ckp9a_final
1hr_gridded_datasets_Mooring_15ckp9a_final
1hr_gridded_datasets_Mooring_16ckp9a_final
1hr_gridded_datasets_Mooring_11cs12a_final
1hr_gridded_datasets_Mooring_13cs12a_final
1hr_gridded_datasets_Mooring_11cs13a_final
1hr_gridded_datasets_Mooring_13cs13a_final
1hr_gridded_datasets_Mooring_05csp1a_final
1hr_gridded_datasets_Mooring_10csp1a_final
1hr_gridded_datasets_Mooring_11csp11a_final
1hr_gridded_datasets_Mooring_13csp11a_final
1hr_gridded_datasets_Mooring_10csp3a_final
1hr_gridded_datasets_Mooring_13csp3a_final
1hr_gridded_datasets_Mooring_03gb1a_final
1hr_gridded_datasets_Mooring_03gb1b_final
1hr_gridded_datasets_Mooring_04gb1a_final
1hr_gridded_datasets_Mooring_03gb2a_final
1hr_gridded_datasets_Mooring_03gb2b_final
1hr_gridded_datasets_Mooring_01gbm3a_final
1hr_gridded_datasets_Mooring_01gbm3b_final
1hr_gridded_datasets_Mooring_03gbm3a_final
1hr_gridded_datasets_Mooring_03gbm3b_final
1hr_gridded_datasets_Mooring_04gbm3a_final
1hr_gridded_datasets_Mooring_03gbp12a_final
1hr_gridded_datasets_Mooring_03gbp12b_final
1hr_gridded_datasets_Mooring_03gbp3a_final
1hr_gridded_datasets_Mooring_03gbp3b_final
1hr_gridded_datasets_Mooring_04gbp3a_final
1hr_gridded_datasets_Mooring_03gbp5a_final
1hr_gridded_datasets_Mooring_03gbp5b_final
1hr_gridded_datasets_Mooring_04gbp5a_final
1hr_gridded_datasets_Mooring_03gp32a_final
1hr_gridded_datasets_Mooring_03gp32b_final
1hr_gridded_datasets_Mooring_04gp32a_final
1hr_gridded_datasets_Mooring_03gp34a_final
1hr_gridded_datasets_Mooring_03gp34b_final
1hr_gridded_datasets_Mooring_04gp34a_final
1hr_gridded_datasets_Mooring_11gpp32a_final
1hr_gridded_datasets_Mooring_13gpp32a_final
1hr_gridded_datasets_Mooring_11gpp34a_final
1hr_gridded_datasets_Mooring_13gpp34a_final
1hr_gridded_datasets_Mooring_03gpp36a_final
1hr_gridded_datasets_Mooring_03gpp36b_final
1hr_gridded_datasets_Mooring_04gpp36a_final
1hr_gridded_datasets_Mooring_11gpp36a_final
1hr_gridded_datasets_Mooring_13gpp36a_final
1hr_gridded_datasets_Mooring_97if4a_final
1hr_gridded_datasets_Mooring_98if4a_final
1hr_gridded_datasets_Mooring_97if5a_final
1hr_gridded_datasets_Mooring_98if5a_final
1hr_gridded_datasets_Mooring_97if6a_final
1hr_gridded_datasets_Mooring_98if6a_final
1hr_gridded_datasets_Mooring_97if7a_final
1hr_gridded_datasets_Mooring_98if7a_final
1hr_gridded_datasets_Mooring_97if8a_final
1hr_gridded_datasets_Mooring_98if8a_final
1hr_gridded_datasets_Mooring_97if9a_final
1hr_gridded_datasets_Mooring_98if9a_final
1hr_gridded_datasets_Mooring_97ifm1a_final
1hr_gridded_datasets_Mooring_98ifm1a_final
1hr_gridded_datasets_Mooring_11ipp2a_final
1hr_gridded_datasets_Mooring_13ipp2a_final
1hr_gridded_datasets_Mooring_00kc1a_final
1hr_gridded_datasets_Mooring_03kc1a_final
1hr_gridded_datasets_Mooring_05kc1a_final
1hr_gridded_datasets_Mooring_06kc1a_final
1hr_gridded_datasets_Mooring_00kc2a_final
1hr_gridded_datasets_Mooring_03kc2a_final
1hr_gridded_datasets_Mooring_04kc2a_final
1hr_gridded_datasets_Mooring_05kc2a_final
1hr_gridded_datasets_Mooring_06kc2a_final
1hr_gridded_datasets_Mooring_07kc2a_final
1hr_gridded_datasets_Mooring_08kc2a_final
1hr_gridded_datasets_Mooring_09kc2a_final
1hr_gridded_datasets_Mooring_11kep41a_final
1hr_gridded_datasets_Mooring_13kep41a_final
1hr_gridded_datasets_Mooring_00pa1a_final
1hr_gridded_datasets_Mooring_03pa1a_final
1hr_gridded_datasets_Mooring_04pa1a_final
1hr_gridded_datasets_Mooring_05pa1a_final
1hr_gridded_datasets_Mooring_06pa1a_final
1hr_gridded_datasets_Mooring_07pa1a_final
1hr_gridded_datasets_Mooring_08pa1a_final
1hr_gridded_datasets_Mooring_09pa1a_final
1hr_gridded_datasets_Mooring_12pa1a_final
1hr_gridded_datasets_Mooring_11pcp1a_final
1hr_gridded_datasets_Mooring_13pcp1a_final
1hr_gridded_datasets_Mooring_08sbp1a_final
1hr_gridded_datasets_Mooring_09sbp1a_final
1hr_gridded_datasets_Mooring_03sg3a_final
1hr_gridded_datasets_Mooring_03sg3b_final
1hr_gridded_datasets_Mooring_03sg5a_final
1hr_gridded_datasets_Mooring_03sg5b_final
1hr_gridded_datasets_Mooring_03sgp1a_final
1hr_gridded_datasets_Mooring_03sgp1b_final
1hr_gridded_datasets_Mooring_03ssp1a_final
1hr_gridded_datasets_Mooring_03ssp1b_final
1hr_gridded_datasets_Mooring_05ssp1a_final
1hr_gridded_datasets_Mooring_06ssp1a_final
1hr_gridded_datasets_Mooring_03ssp2a_final
1hr_gridded_datasets_Mooring_03ssp2b_final
1hr_gridded_datasets_Mooring_04ssp2a_final
1hr_gridded_datasets_Mooring_05ssp2a_final
1hr_gridded_datasets_Mooring_06ssp2a_final
1hr_gridded_datasets_Mooring_03ssp3a_final
1hr_gridded_datasets_Mooring_03ssp3b_final
1hr_gridded_datasets_Mooring_04ssp3a_final
1hr_gridded_datasets_Mooring_05ssp3a_final
1hr_gridded_datasets_Mooring_06ssp3a_final
1hr_gridded_datasets_Mooring_04stl1a_final
1hr_gridded_datasets_Mooring_04stl1b_final
1hr_gridded_datasets_Mooring_11svp39a_final
1hr_gridded_datasets_Mooring_13svp39a_final
###Markdown
Now do similar but only for deepest T/S**Deepest Temp/Sal**- M2- M4- M5- M8- C2**Shallowest Temp**- M2- M4- M5- M8**Notes:**- read in data, drop na's, determine max and min depths- keep depth and value so it can be masked if its too shallow by the enduser
###Code
#read from recently created grouped data and interpolate/extrapolate to fixed bottom depth or
def btm_TS_timeseries(df,mooring_id=[None],parameter='salinity',max_depth=50,verbose=True):
"""
"""
#xml related
depth_threshold=50
first_dataset=True
#M2
for groups, dfg in df.groupby('subname'):
if groups in mooring_id: #M2
for i,dfs in dfg.iterrows():
if verbose:
print(dfs['Dataset ID'])
e.constraints = None
e.protocol = 'griddap'
e.dataset_id=dfs['Dataset ID']
e.response = 'nc'
try:
ds = e.to_xarray(decode_times=True)
btmsaldepth = ds[parameter].dropna(dim='depth', how='all').depth.max()
if btmsaldepth < max_depth:
continue
if first_dataset:
xdf = ds.sel(depth=btmsaldepth)[['temperature','salinity']]
first_dataset = False
else:
xdf = xa.concat([xdf,ds.sel(depth=btmsaldepth)[['temperature','salinity']]],dim='time')
except:
pass
xdf = xdf.sortby('time')
xdf = xdf.where(xdf != -9999.0,np.nan)
return xdf
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bsm','bsm2','bs2'],parameter='salinity',max_depth=50,verbose=False)
fig, ax = plt.subplots(3,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.salinity.plot(ax=ax[1])
xdf.depth.plot(marker='+',ax=ax[2])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bsm','bsm2','bs2'],parameter='temperature',max_depth=50,verbose=False)
xdf.to_netcdf('bs2_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bs4'],parameter='temperature',max_depth=50,verbose=False)
xdf.to_netcdf('bs4_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bs5'],parameter='temperature',max_depth=50,verbose=False)
xdf.to_netcdf('bs5_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1900-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
#split names and get mooring id
#some groupings are at the same site, but get different names over winter (its the a,b,c nomenclature) so drop the final letter too
df['subname'] = [x.split('_')[-2][2:-1] for x in df['Dataset ID'].values]
#xml related
datahost_path='/home/akutan/bell/in_and_outbox/erddap_generated/foci_products/unified_moorings/'
xdf = btm_TS_timeseries(df,mooring_id=['bs8'],parameter='temperature',max_depth=50,verbose=False)
xdf = xdf.resample(time='1H').median()
xdf.to_netcdf('bs8_bottomtemperature_record'+'.nc',encoding={'time':{'units':'days since 1970-01-01'}})
fig, ax = plt.subplots(2,figsize=(17, 6))
xdf.temperature.plot(ax=ax[0])
xdf.depth.plot(marker='+',ax=ax[1])
###Output
_____no_output_____ |
Alpha_Delta_Info.ipynb | ###Markdown
Alpha and Delta Information (Deceased = COV ONLY)
###Code
import pandas as pd
import os
input_loc = "input_data"
output_loc = "output_data"
infections_mz = pd.read_csv(os.path.join(os.getcwd(), input_loc, "infections_alpha_delta.csv"))
hospitalized_mz = pd.read_csv(os.path.join(os.getcwd(), input_loc, "hospitalized_alpha_delta.csv"))
deceased_mz_cov = pd.read_csv(os.path.join(os.getcwd(), input_loc, "deceased_alpha_delta_without_oc_cov.csv"))
###Output
_____no_output_____
###Markdown
Prevent categorical data loss due to joins between files
###Code
age_groups = ['0-4', '5-9', '10-14', '15-19', '20-24', '25-29','30-34', '35-39', '40-44',
'45-49', '50-54', '55-59', '60-64', '65-69', '70-74', '75-79',
'80-84', '85-89', '90-94', '95-99', '100+']
age_binding = [age for age in age_groups for period in ['Period 1', 'Period 2']]
period_binding = [period for age in age_groups for period in ['Period 1', 'Period 2']]
full_data = pd.DataFrame({
'periods': period_binding,
'age_groups': age_binding
})
full_data = full_data.merge(infections_mz, on=['age_groups', 'periods'], how='left')
full_data = full_data.merge(hospitalized_mz, on=['age_groups', 'periods'], how='left')
full_data = full_data.merge(deceased_mz_cov, on=['age_groups', 'periods'], how='left')
full_data.to_csv(os.path.join(os.getcwd(), output_loc, "infected_hospitalized_deceased[cov-only]_combined.csv"), index=False)
###Output
_____no_output_____ |
deprecated/DatasetCreator.ipynb | ###Markdown
DatasetCreator**Contributors:** Donna HooshmandNote that this Jupyter notebook has been deprecated and is not used by our final working version of the project. instructionYou have 2 Folders in the main folder!Copy all the tarining images into train folder!Copy all the testing images into test folder!Don't add any files or folders to the main folder or any of the subfolders!If you do so, you definitely break everything!There shouldn't be any folders or files except the images in the train and test fodler!
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
from os import listdir
from os.path import isfile, join
###Output
_____no_output_____
###Markdown
Creating tarining Dataset!
###Code
onlyfiles = [f for f in listdir("train") if isfile(join("train", f))]
files_lenght = len(onlyfiles)
counter = 1
all_images = []
for i in onlyfiles:
img = cv2.imread("train/"+i)
b, g, r = cv2.split(img)
img = cv2.merge((r,g,b))
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
all_images.append([img,i])
if counter%10==0:
print(counter,"/",files_lenght, "images loaded so far!")
counter+=1
print(files_lenght,"/",files_lenght)
print("Finished!")
plt.imshow(all_images[0][0])
plt.show()
all_images[0][1]
col_list=["image_name","target"]
tag = pd.read_csv("train.csv", usecols=col_list)
print(len(all_images))
print(len(tag.values))
print("Both numbers appearing on the top of this line SHOULD be the same let me know if they are not!")
counter = 1
for i in range(len(all_images)):
for j in range(len(tag.values)):
if all_images[i][1]==tag.values[j][0]+".jpg":
if tag.values[j][1]==1:
all_images[i][1]=[0,1]
else:
all_images[i][1]=[1,0]
break
if counter%10==0:
print(counter,"/",files_lenght, "images taged so far!")
counter+=1
print(files_lenght,"/",files_lenght)
print("Finished!")
for i in range(len(all_images)):
new = all_images[i][0]/255
all_images[i][0] = new
x_train = np.array([i[0] for i in all_images])
print(x_train.shape)
y_train = np.array([i[1] for i in all_images])
print(y_train.shape)
del onlyfiles, all_images, tag, col_list
np.save("x_train",x_train)
np.save("y_train",y_train)
#x_train = np.load("x_train.npy")
#y_train = np.load("y_train.npy")
###Output
_____no_output_____
###Markdown
END of creating training dataset Creating testing Dataset!
###Code
onlyfiles = [f for f in listdir("test") if isfile(join("test", f))]
files_lenght = len(onlyfiles)
counter = 1
all_images = []
for i in onlyfiles:
img = cv2.imread("test/"+i)
b, g, r = cv2.split(img)
img = cv2.merge((r,g,b))
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
all_images.append([img,i])
if counter%10==0:
print(counter,"/",files_lenght, "images loaded so far!")
counter+=1
print(files_lenght,"/",files_lenght)
print("Finished!")
plt.imshow(all_images[0][0])
plt.show()
print(len(all_images))
for i in range(len(all_images)):
new = all_images[i][0]/255
all_images[i][0] = new
x_test = np.array([i[0] for i in all_images])
print(x_test.shape)
del onlyfiles, all_images
np.save("x_test",x_train)
#x_test = np.load("x_test.npy")
###Output
_____no_output_____ |
catboost/tutorials/competition_examples/quora_w2v.ipynb | ###Markdown
Example of using CatBoost on text data with word2vec embedding.[](https://colab.research.google.com/github/catboost/tutorials/blob/master/competition_examples/quora_w2v.ipynb)
###Code
import catboost
import collections
import gensim
import os
import nltk
import numpy as np
import pandas as pd
import random
import tensorflow as tf
import zipfile
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss, roc_auc_score
from urllib import urlretrieve
data_path = '../text8/'
if not os.path.exists(data_path):
os.makedirs(data_path)
###Output
_____no_output_____
###Markdown
Embedding Train word2vec embeddings using Tensorflow ([from this example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/5_word2vec.ipynb)). Load [Text8](http://mattmahoney.net/dc/textdata) data.
###Code
url = 'http://mattmahoney.net/dc/'
filename = 'text8.zip'
filename, _ = urlretrieve(url + filename, data_path + filename)
with zipfile.ZipFile(data_path + filename) as f:
words = tf.compat.as_str(f.read(f.namelist()[0])).split()
###Output
_____no_output_____
###Markdown
Build a dataset. Rare words are replaced with 'UNK' token.
###Code
vocabulary_size = 50000
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
del words
###Output
_____no_output_____
###Markdown
Write batch generator.
###Code
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buf = collections.deque(maxlen=span)
for _ in xrange(span):
buf.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in xrange(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in xrange(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buf[skip_window]
labels[i * num_skips + j, 0] = buf[target]
buf.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
###Output
_____no_output_____
###Markdown
Train a skip-gram model.
###Code
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/np.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed,
labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
num_steps = 500001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 100000 == 0 and step > 0:
print('Average loss at step %d: %f' % (step, average_loss / 100000))
average_loss = 0
word2vec = normalized_embeddings.eval()
###Output
Average loss at step 100000: 3.454290
Average loss at step 200000: 3.242673
Average loss at step 300000: 3.177683
Average loss at step 400000: 3.131030
Average loss at step 500000: 3.077533
###Markdown
Check trained word2vec: find nearest for car.
###Code
distances = -word2vec[dictionary['car']].reshape((1, -1)).dot(word2vec.T)
inds = np.argsort(distances.ravel())[1:6]
print(' '.join([reverse_dictionary[i] for i in inds]))
###Output
cars automobile train aircraft company
###Markdown
Also you can:1. Change parameters of model.2. Change dataset to bigger one.3. Increase train time.4. Use pretrained model (not only word2vec). Dataset Load dataset from [Kaggle Quora Question Pairs](https://www.kaggle.com/c/quora-question-pairs/overview) competition. The goal of this task is to determine which pair of questions is duplicated (binary classification).
###Code
data = pd.read_csv(data_path + 'train.csv').fillna('')
data.head()
target = data.is_duplicate
data.drop(['is_duplicate', 'id', 'qid1', 'qid2'], axis=1, inplace=True)
data.question1 = data.question1.apply(lambda x: x.lower().decode('utf-8'))
data.question2 = data.question2.apply(lambda x: x.lower().decode('utf-8'))
###Output
_____no_output_____
###Markdown
Feature extraction Nltk for tokenizer and stop-words filtering.
###Code
nltk.download('punkt')
nltk.download('stopwords')
stop_words = nltk.corpus.stopwords.words('english')
###Output
_____no_output_____
###Markdown
Get a vector of every question as:1. Tokenizing2. Filtering from stop-words and non-words3. Summig vectors of words and normilizing it.
###Code
EPS = 1e-100
def question2vec(s):
words = nltk.word_tokenize(s)
words = filter(lambda x: not x in stop_words and x.isalpha(), words)
seq = np.array([word2vec[dictionary[w]] for w in words if w in dictionary])
v = seq.sum(axis=0)
return v / ((v ** 2).sum() + EPS) ** 0.5 if seq.shape[0] != 0 else np.ones(embedding_size)*1.0/embedding_size**0.5
question1_vec = np.array([question2vec(q) for q in data.question1.values])
question2_vec = np.array([question2vec(q) for q in data.question2.values])
###Output
_____no_output_____
###Markdown
You can not only average vectors but also find max, min and std for all question. Generate features on embeddings.
###Code
data['cosine'] = [cosine(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['cityblock'] = [cityblock(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['canberra'] = [canberra(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['euclidean'] = [euclidean(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['minkowski'] = [minkowski(x, y, 3) for (x, y) in zip(question1_vec, question2_vec)]
data['braycurtis'] = [braycurtis(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['skew_q1'] = [skew(x) for x in question1_vec]
data['skew_q2'] = [skew(x) for x in question2_vec]
data['kur_q1'] = [kurtosis(x) for x in question1_vec]
data['kur_q2'] = [kurtosis(x) for x in question2_vec]
data['skew_diff'] = np.abs(data['skew_q1'] - data['skew_q2'])
data['kur_diff'] = np.abs(data['kur_q1'] - data['kur_q2'])
###Output
_____no_output_____
###Markdown
In addition you can not only calculate metric between question but use all vectors or differences. Generate simple features.
###Code
data['len_q1'] = data.question1.apply(lambda x: len(x))
data['len_q2'] = data.question2.apply(lambda x: len(x))
data['len_diff'] = np.abs(data.len_q1 - data.len_q2)
data['len_char_q1'] = data.question1.apply(lambda x: len(x.replace(' ', '')))
data['len_char_q2'] = data.question2.apply(lambda x: len(x.replace(' ', '')))
data['len_char_diff'] = np.abs(data.len_char_q1 - data.len_char_q2)
data['len_uniq_char_q1'] = data.question1.apply(lambda x: len(''.join(set(x.replace(' ', '')))))
data['len_uniq_char_q2'] = data.question2.apply(lambda x: len(''.join(set(x.replace(' ', '')))))
data['len_uniq_char_diff'] = np.abs(data.len_uniq_char_q1 - data.len_uniq_char_q2)
data['len_word_q1'] = data.question1.apply(lambda x: len(x.split()))
data['len_word_q2'] = data.question2.apply(lambda x: len(x.split()))
data['len_word_diff'] = np.abs(data.len_word_q1 - data.len_word_q2)
data['len_uniq_word_q1'] = data.question1.apply(lambda x: len(set(x.split())))
data['len_uniq_word_q2'] = data.question2.apply(lambda x: len(set(x.split())))
data['len_uniq_word_diff'] = np.abs(data.len_uniq_word_q1 - data.len_uniq_word_q2)
data['common_words'] = data.apply(lambda x: len(set(x['question1'].split()).intersection(set(x['question2'].split()))), axis=1)
data['union_words'] = data.apply(lambda x: len(set(x['question1'].split()).union(set(x['question2'].split()))), axis=1)
data['jaccard_words'] = data.common_words / (data.union_words + EPS)
###Output
_____no_output_____
###Markdown
Train and check model Split dataset to train and validation parts.
###Code
train, test, y_train, y_test = train_test_split(data.drop(['question1', 'question2'], axis=1), target, test_size=0.2)
###Output
_____no_output_____
###Markdown
Train CatBoost and check prediction on validation part.
###Code
clf = catboost.CatBoostClassifier(depth=6, iterations=1000, learning_rate=0.1, thread_count=16, logging_level='Silent')
clf.fit(train, y_train)
y_pred = clf.predict_proba(test)[:, 1]
print 'AUC:', roc_auc_score(y_test, y_pred)
###Output
AUC: 0.8268292157683419
###Markdown
Example of using CatBoost on text data with word2vec embedding.
###Code
import catboost
import collections
import gensim
import os
import nltk
import numpy as np
import pandas as pd
import random
import tensorflow as tf
import zipfile
from scipy.stats import skew, kurtosis
from scipy.spatial.distance import cosine, cityblock, jaccard, canberra, euclidean, minkowski, braycurtis
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss, roc_auc_score
from urllib import urlretrieve
data_path = '../text8/'
if not os.path.exists(data_path):
os.makedirs(data_path)
###Output
_____no_output_____
###Markdown
Embedding Train word2vec embeddings using Tensorflow ([from this example](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/5_word2vec.ipynb)). Load [Text8](http://mattmahoney.net/dc/textdata) data.
###Code
url = 'http://mattmahoney.net/dc/'
filename = 'text8.zip'
filename, _ = urlretrieve(url + filename, data_path + filename)
with zipfile.ZipFile(data_path + filename) as f:
words = tf.compat.as_str(f.read(f.namelist()[0])).split()
###Output
_____no_output_____
###Markdown
Build a dataset. Rare words are replaced with 'UNK' token.
###Code
vocabulary_size = 50000
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
del words
###Output
_____no_output_____
###Markdown
Write batch generator.
###Code
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buf = collections.deque(maxlen=span)
for _ in xrange(span):
buf.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in xrange(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in xrange(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buf[skip_window]
labels[i * num_skips + j, 0] = buf[target]
buf.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
###Output
_____no_output_____
###Markdown
Train a skip-gram model.
###Code
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
train_dataset = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
softmax_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/np.sqrt(embedding_size)))
softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
embed = tf.nn.embedding_lookup(embeddings, train_dataset)
loss = tf.reduce_mean(
tf.nn.sampled_softmax_loss(weights=softmax_weights, biases=softmax_biases, inputs=embed,
labels=train_labels, num_sampled=num_sampled, num_classes=vocabulary_size))
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
num_steps = 500001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels = generate_batch(batch_size, num_skips, skip_window)
feed_dict = {train_dataset : batch_data, train_labels : batch_labels}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 100000 == 0 and step > 0:
print('Average loss at step %d: %f' % (step, average_loss / 100000))
average_loss = 0
word2vec = normalized_embeddings.eval()
###Output
Average loss at step 100000: 3.454290
Average loss at step 200000: 3.242673
Average loss at step 300000: 3.177683
Average loss at step 400000: 3.131030
Average loss at step 500000: 3.077533
###Markdown
Check trained word2vec: find nearest for car.
###Code
distances = -word2vec[dictionary['car']].reshape((1, -1)).dot(word2vec.T)
inds = np.argsort(distances.ravel())[1:6]
print(' '.join([reverse_dictionary[i] for i in inds]))
###Output
cars automobile train aircraft company
###Markdown
Also you can:1. Change parameters of model.2. Change dataset to bigger one.3. Increase train time.4. Use pretrained model (not only word2vec). Dataset Load dataset from [Kaggle Quora Question Pairs](https://www.kaggle.com/c/quora-question-pairs/overview) competition. The goal of this task is to determine which pair of questions is duplicated (binary classification).
###Code
data = pd.read_csv(data_path + 'train.csv').fillna('')
data.head()
target = data.is_duplicate
data.drop(['is_duplicate', 'id', 'qid1', 'qid2'], axis=1, inplace=True)
data.question1 = data.question1.apply(lambda x: x.lower().decode('utf-8'))
data.question2 = data.question2.apply(lambda x: x.lower().decode('utf-8'))
###Output
_____no_output_____
###Markdown
Feature extraction Nltk for tokenizer and stop-words filtering.
###Code
nltk.download('punkt')
nltk.download('stopwords')
stop_words = nltk.corpus.stopwords.words('english')
###Output
_____no_output_____
###Markdown
Get a vector of every question as:1. Tokenizing2. Filtering from stop-words and non-words3. Summig vectors of words and normilizing it.
###Code
EPS = 1e-100
def question2vec(s):
words = nltk.word_tokenize(s)
words = filter(lambda x: not x in stop_words and x.isalpha(), words)
seq = np.array([word2vec[dictionary[w]] for w in words if w in dictionary])
v = seq.sum(axis=0)
return v / ((v ** 2).sum() + EPS) ** 0.5 if seq.shape[0] != 0 else np.ones(embedding_size)*1.0/embedding_size**0.5
question1_vec = np.array([question2vec(q) for q in data.question1.values])
question2_vec = np.array([question2vec(q) for q in data.question2.values])
###Output
_____no_output_____
###Markdown
You can not only average vectors but also find max, min and std for all question. Generate features on embeddings.
###Code
data['cosine'] = [cosine(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['cityblock'] = [cityblock(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['canberra'] = [canberra(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['euclidean'] = [euclidean(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['minkowski'] = [minkowski(x, y, 3) for (x, y) in zip(question1_vec, question2_vec)]
data['braycurtis'] = [braycurtis(x, y) for (x, y) in zip(question1_vec, question2_vec)]
data['skew_q1'] = [skew(x) for x in question1_vec]
data['skew_q2'] = [skew(x) for x in question2_vec]
data['kur_q1'] = [kurtosis(x) for x in question1_vec]
data['kur_q2'] = [kurtosis(x) for x in question2_vec]
data['skew_diff'] = np.abs(data['skew_q1'] - data['skew_q2'])
data['kur_diff'] = np.abs(data['kur_q1'] - data['kur_q2'])
###Output
_____no_output_____
###Markdown
In addition you can not only calculate metric between question but use all vectors or differences. Generate simple features.
###Code
data['len_q1'] = data.question1.apply(lambda x: len(x))
data['len_q2'] = data.question2.apply(lambda x: len(x))
data['len_diff'] = np.abs(data.len_q1 - data.len_q2)
data['len_char_q1'] = data.question1.apply(lambda x: len(x.replace(' ', '')))
data['len_char_q2'] = data.question2.apply(lambda x: len(x.replace(' ', '')))
data['len_char_diff'] = np.abs(data.len_char_q1 - data.len_char_q2)
data['len_uniq_char_q1'] = data.question1.apply(lambda x: len(''.join(set(x.replace(' ', '')))))
data['len_uniq_char_q2'] = data.question2.apply(lambda x: len(''.join(set(x.replace(' ', '')))))
data['len_uniq_char_diff'] = np.abs(data.len_uniq_char_q1 - data.len_uniq_char_q2)
data['len_word_q1'] = data.question1.apply(lambda x: len(x.split()))
data['len_word_q2'] = data.question2.apply(lambda x: len(x.split()))
data['len_word_diff'] = np.abs(data.len_word_q1 - data.len_word_q2)
data['len_uniq_word_q1'] = data.question1.apply(lambda x: len(set(x.split())))
data['len_uniq_word_q2'] = data.question2.apply(lambda x: len(set(x.split())))
data['len_uniq_word_diff'] = np.abs(data.len_uniq_word_q1 - data.len_uniq_word_q2)
data['common_words'] = data.apply(lambda x: len(set(x['question1'].split()).intersection(set(x['question2'].split()))), axis=1)
data['union_words'] = data.apply(lambda x: len(set(x['question1'].split()).union(set(x['question2'].split()))), axis=1)
data['jaccard_words'] = data.common_words / (data.union_words + EPS)
###Output
_____no_output_____
###Markdown
Train and check model Split dataset to train and validation parts.
###Code
train, test, y_train, y_test = train_test_split(data.drop(['question1', 'question2'], axis=1), target, test_size=0.2)
###Output
_____no_output_____
###Markdown
Train CatBoost and check prediction on validation part.
###Code
clf = catboost.CatBoostClassifier(depth=6, iterations=1000, learning_rate=0.1, thread_count=16, logging_level='Silent')
clf.fit(train, y_train)
y_pred = clf.predict_proba(test)[:, 1]
print 'AUC:', roc_auc_score(y_test, y_pred)
###Output
AUC: 0.8268292157683419
|
Telecom_Churn_Model_Buildling.ipynb | ###Markdown
Step 1: Sanity checks on Data
###Code
df = pd.read_csv('telecom_churn_data.csv')
df.head()
df.info()
# let get the shape of the data frame before we start
df.shape
# let get the shape of the data frame before we start
# currently commenting this out as it is taking a long time for the 1 lakh records.
# df.describe
###Output
_____no_output_____
###Markdown
Step 2: Data cleaning and Preparation Let us start with Data cleaning and prepare data for analysis Start with the treatment of Null Values - more than 74% drop the column, less than 3%, replace null with 0, Rest impute with mean/median/mode according to the type of the column
###Code
#finding the null percentage value in the columns
columns = df.columns
percent_missing_Nulls = (df.isnull().sum() * 100) / df.shape[0]
missing_value_df = pd.DataFrame({'column_name': columns,
'percent_missing': percent_missing_Nulls})
#dropping the columns whose missing values is more than 70%
threshold_percentage = 74
filtered_cols = list(missing_value_df[missing_value_df.percent_missing > threshold_percentage].column_name)
df = df.drop(filtered_cols, axis=1)
#checking the data shape
df.shape
df.isnull().sum()
100*df.isnull().sum()/df.shape[0]
#finding the null percentage value in the columns
columns = df.columns
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': columns,
'percent_missing': percent_missing})
#dropping the columns whose missing values is more than 70%
percentage = 3
filtered_cols = list(missing_value_df[(missing_value_df.percent_missing<percentage) & (missing_value_df.percent_missing>0) ].column_name)
filtered_cols
###Output
_____no_output_____
###Markdown
Inference : the first 3 are number value, rest are date values and we will have different treatment for dates
###Code
### imputing the null value
num_cols = df.select_dtypes('number').columns
num_cols
#imputing the null values with 0
df['loc_og_t2o_mou'].fillna(0.0,inplace=True)
df['std_og_t2o_mou'].fillna(0.0,inplace=True)
df['loc_ic_t2o_mou'].fillna(0.0,inplace=True)
float_cols = df.select_dtypes('float').columns
#filling the last data of each month where null values are present.
df['last_date_of_month_7'].fillna('7/31/2014',inplace=True)
df['last_date_of_month_8'].fillna('8/31/2014',inplace=True)
df['last_date_of_month_9'].fillna('9/30/2014',inplace=True)
def impute_df(df, col):
if df[col].dtype == "float":
df[col].fillna(df[col].median(), inplace=True)
else:
df[col].fillna(df[col].mode()[0], inplace=True)
##filling the nan values with median value
impute_df(df,'onnet_mou_6')
impute_df(df,'onnet_mou_7')
impute_df(df,'onnet_mou_8')
impute_df(df,'onnet_mou_9')
##filling the nan values with median values
impute_df(df,'offnet_mou_6')
impute_df(df,'offnet_mou_7')
impute_df(df,'offnet_mou_8')
impute_df(df,'offnet_mou_9')
##filling the nan values with median values
impute_df(df,'roam_ic_mou_6')
impute_df(df,'roam_ic_mou_7')
impute_df(df,'roam_ic_mou_8')
impute_df(df,'roam_ic_mou_9')
##filling the nan values with median values
impute_df(df,'roam_og_mou_6')
impute_df(df,'roam_og_mou_7')
impute_df(df,'roam_og_mou_8')
impute_df(df,'roam_og_mou_9')
##filling the nan values with median values
impute_df(df,'loc_og_t2t_mou_6')
impute_df(df,'loc_og_t2t_mou_7')
impute_df(df,'loc_og_t2t_mou_8')
impute_df(df,'loc_og_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'loc_og_t2t_mou_6')
impute_df(df,'loc_og_t2t_mou_7')
impute_df(df,'loc_og_t2t_mou_8')
impute_df(df,'loc_og_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'loc_og_t2t_mou_6')
impute_df(df,'loc_og_t2t_mou_7')
impute_df(df,'loc_og_t2t_mou_8')
impute_df(df,'loc_og_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'loc_og_t2m_mou_6')
impute_df(df,'loc_og_t2m_mou_7')
impute_df(df,'loc_og_t2m_mou_8')
impute_df(df,'loc_og_t2m_mou_9')
##filling the nan values with median values
impute_df(df,'loc_og_t2f_mou_6')
impute_df(df,'loc_og_t2f_mou_7')
impute_df(df,'loc_og_t2f_mou_8')
impute_df(df,'loc_og_t2f_mou_9')
##filling the nan values with median value
impute_df(df,'loc_og_t2f_mou_6')
impute_df(df,'loc_og_t2f_mou_7')
impute_df(df,'loc_og_t2f_mou_8')
impute_df(df,'loc_og_t2f_mou_9')
##filling the nan values with median values
impute_df(df,'loc_og_t2c_mou_6')
impute_df(df,'loc_og_t2c_mou_7')
impute_df(df,'loc_og_t2c_mou_8')
impute_df(df,'loc_og_t2c_mou_9')
##filling the nan values with median values
impute_df(df,'loc_og_mou_6')
impute_df(df,'loc_og_mou_7')
impute_df(df,'loc_og_mou_8')
impute_df(df,'loc_og_mou_9')
##filling the nan values with median values
impute_df(df,'std_og_t2t_mou_6')
impute_df(df,'std_og_t2t_mou_7')
impute_df(df,'std_og_t2t_mou_8')
impute_df(df,'std_og_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'std_og_t2t_mou_6')
impute_df(df,'std_og_t2t_mou_7')
impute_df(df,'std_og_t2t_mou_8')
impute_df(df,'std_og_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'std_og_t2m_mou_6')
impute_df(df,'std_og_t2m_mou_7')
impute_df(df,'std_og_t2m_mou_8')
impute_df(df,'std_og_t2m_mou_9')
##filling the nan values with median values
impute_df(df,'std_og_t2f_mou_6')
impute_df(df,'std_og_t2f_mou_7')
impute_df(df,'std_og_t2f_mou_8')
impute_df(df,'std_og_t2f_mou_9')
##filling the nan values with median values
impute_df(df,'std_og_t2c_mou_6')
impute_df(df,'std_og_t2c_mou_7')
impute_df(df,'std_og_t2c_mou_8')
impute_df(df,'std_og_t2c_mou_9')
##filling the nan values with median values
impute_df(df,'std_og_mou_6')
impute_df(df,'std_og_mou_7')
impute_df(df,'std_og_mou_8')
impute_df(df,'std_og_mou_9')
##filling the nan values with median values
impute_df(df,'isd_og_mou_6')
impute_df(df,'isd_og_mou_7')
impute_df(df,'isd_og_mou_8')
impute_df(df,'isd_og_mou_9')
##filling the nan values with median values
impute_df(df,'spl_og_mou_6')
impute_df(df,'spl_og_mou_7')
impute_df(df,'spl_og_mou_8')
impute_df(df,'spl_og_mou_9')
##filling the nan values with median values
impute_df(df,'og_others_6')
impute_df(df,'og_others_7')
impute_df(df,'og_others_8')
impute_df(df,'og_others_9')
##filling the nan values with median values
impute_df(df,'loc_ic_t2t_mou_6')
impute_df(df,'loc_ic_t2t_mou_7')
impute_df(df,'loc_ic_t2t_mou_8')
impute_df(df,'loc_ic_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'loc_ic_t2m_mou_6')
impute_df(df,'loc_ic_t2m_mou_7')
impute_df(df,'loc_ic_t2m_mou_8')
impute_df(df,'loc_ic_t2m_mou_9')
##filling the nan values with median values
impute_df(df,'loc_ic_t2f_mou_6')
impute_df(df,'loc_ic_t2f_mou_7')
impute_df(df,'loc_ic_t2f_mou_8')
impute_df(df,'loc_ic_t2f_mou_9')
##filling the nan values with median values
impute_df(df,'loc_ic_mou_6')
impute_df(df,'loc_ic_mou_7')
impute_df(df,'loc_ic_mou_8')
impute_df(df,'loc_ic_mou_9')
##filling the nan values with median values
impute_df(df,'std_ic_t2t_mou_6')
impute_df(df,'std_ic_t2t_mou_7')
impute_df(df,'std_ic_t2t_mou_8')
impute_df(df,'std_ic_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'std_ic_t2m_mou_6')
impute_df(df,'std_ic_t2m_mou_7')
impute_df(df,'std_ic_t2m_mou_8')
impute_df(df,'std_ic_t2m_mou_9')
##filling the nan values with median values
impute_df(df,'std_ic_t2f_mou_6')
impute_df(df,'std_ic_t2f_mou_7')
impute_df(df,'std_ic_t2f_mou_8')
impute_df(df,'std_ic_t2f_mou_9')
##filling the nan values with median values
impute_df(df,'std_ic_t2t_mou_6')
impute_df(df,'std_ic_t2t_mou_7')
impute_df(df,'std_ic_t2t_mou_8')
impute_df(df,'std_ic_t2t_mou_9')
##filling the nan values with median values
impute_df(df,'std_ic_t2o_mou_6')
impute_df(df,'std_ic_t2o_mou_7')
impute_df(df,'std_ic_t2o_mou_8')
impute_df(df,'std_ic_t2o_mou_9')
##filling the nan values with median values
impute_df(df,'std_ic_mou_6')
impute_df(df,'std_ic_mou_7')
impute_df(df,'std_ic_mou_8')
impute_df(df,'std_ic_mou_9')
##filling the nan values with median values
impute_df(df,'spl_ic_mou_6')
impute_df(df,'spl_ic_mou_7')
impute_df(df,'spl_ic_mou_8')
impute_df(df,'spl_ic_mou_9')
##filling the nan values with median values
impute_df(df,'isd_ic_mou_6')
impute_df(df,'isd_ic_mou_7')
impute_df(df,'isd_ic_mou_8')
impute_df(df,'isd_ic_mou_9')
##filling the nan values with median values
impute_df(df,'ic_others_6')
impute_df(df,'ic_others_7')
impute_df(df,'ic_others_8')
impute_df(df,'ic_others_9')
#computing the last days of each month
df['date_of_last_rech_6'].fillna('6/30/2014',inplace=True)
df['date_of_last_rech_7'].fillna('7/31/2014',inplace=True)
df['date_of_last_rech_8'].fillna('8/31/2014',inplace=True)
df['date_of_last_rech_9'].fillna('9/30/2014',inplace=True)
##dropping circle_id
df.drop(columns=['circle_id'],inplace=True)
##converting the column mobile_number to object
df['mobile_number'] = df['mobile_number'].astype('object')
###Output
_____no_output_____
###Markdown
Deriving Variables
###Code
##finding the average recharge amount for the month 6 and 7
df['avg_rech_amt_6_7'] = (df['total_rech_amt_6']+df['total_rech_amt_7'])/2
### finding the average revenue generated per user for the month of 6 and 7
df['avg_arpu_6_7'] = round((df['arpu_6']+df['arpu_7'])/2,2)
cut_off = df['avg_rech_amt_6_7'].quantile(.7)
df['HVC'] = df['avg_rech_amt_6_7'].map(lambda x: 1 if x > cut_off else 0)
df.HVC.sum()#checkpoint reached
df_hvc = df[df['HVC'] == 1]
df_hvc.shape
def flag_df(df):
if(df['total_ic_mou_9'] == 0) and (df['total_og_mou_9'] == 0) and (df['vol_2g_mb_9'] == 0) and (df['vol_3g_mb_9'] == 0):
return 1
else:
return 0
df_hvc['Churn'] = df_hvc.apply(flag_df,axis=1)
df_hvc['Churn'].value_counts()
###Output
_____no_output_____
###Markdown
Inference: the number of churn cases is 2539 Inference: This seems to be a very imbalanced data. So, we first need to get a balanced data for deriving a model. In the next few steps we shall balance the record
###Code
# get the churn counts
churn_count_0, churn_count_1 = df_hvc['Churn'].value_counts()
churn_count_0, churn_count_1
# Separate class
churn_class_0 = df_hvc[df_hvc['Churn'] == 0]
churn_class_1 = df_hvc[df_hvc['Churn'] == 1] # print the shape of the class
print('class 0:', churn_class_0.shape)
print('class 1:', churn_class_1.shape)
# take a random szmple of the size of the number of churn records
churn_class_0_cut = churn_class_0.sample(churn_count_1)
df_hvc_balanced = pd.concat([churn_class_0_cut, churn_class_1], axis=0)
print("total class of 1 and0:",df_hvc_balanced['Churn'].value_counts())# plot the count after under-sampeling
df_hvc_balanced['Churn'].value_counts().plot(kind='bar', title='count (target)')
df_hvc_balanced.head()
#churn customers
df_hvc_balanced[['total_ic_mou_9','total_og_mou_9','vol_2g_mb_9','vol_3g_mb_9','Churn']]
###removing all the columns of the _9 month
df_hvc_balanced = df_hvc_balanced[df_hvc_balanced.columns.drop(list(df_hvc_balanced.filter(regex='_9')))]
df_hvc_balanced.shape
df_hvc_balanced.head()
###Output
_____no_output_____
###Markdown
look for object type columns as they cannot be used in model building, and keep only numeric variables.
###Code
df_hvc_balanced.isnull().sum()
obj_cols = df_hvc_balanced.select_dtypes('object').columns
obj_cols
df_hvc_balanced.drop(columns=obj_cols,inplace=True)
###Output
_____no_output_____
###Markdown
look for NaN values and replace by 0
###Code
df_hvc_balanced.isna().any()
df_hvc_balanced['total_rech_data_8'] = df_hvc_balanced['total_rech_data_8'].fillna(0)
df_hvc_balanced['max_rech_data_8'] = df_hvc_balanced['max_rech_data_8'].fillna(0)
df_hvc_balanced['count_rech_2g_8'] = df_hvc_balanced['count_rech_2g_8'].fillna(0)
df_hvc_balanced['count_rech_3g_8'] = df_hvc_balanced['count_rech_3g_8'].fillna(0)
df_hvc_balanced['av_rech_amt_data_8'] = df_hvc_balanced['av_rech_amt_data_8'].fillna(0)
df_hvc_balanced['arpu_3g_8'] = df_hvc_balanced['arpu_3g_8'].fillna(0)
df_hvc_balanced['night_pck_user_8'] = df_hvc_balanced['night_pck_user_8'].fillna(0)
df_hvc_balanced['arpu_2g_8'] = df_hvc_balanced['arpu_2g_8'].fillna(0)
df_hvc_balanced['fb_user_8'] = df_hvc_balanced['fb_user_8'].fillna(0)
df_hvc_balanced.isnull().sum().sum()
###Output
_____no_output_____
###Markdown
Step 3: EDA
###Code
# lets start analysis of the data
# let us check how many HV customers and also what is the relationship between churn and HVC
df["HVC"].value_counts().plot.barh()
df["HVC"].value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
Inference : we need to concentrate on 30% of the customers who can be termed high value
###Code
# next let us look at what is the relationship between churn and HVC
# let us check how many Churn customers and also what is the relationship between churn and HVC
df_hvc_balanced["Churn"].value_counts().plot.barh()
df_hvc_balanced["Churn"].value_counts()
###Output
_____no_output_____
###Markdown
Inference : We have a balanced dataset for building a model. next lets check for outliers and other preliminary data which could give us some insights
###Code
# Checking for outliers in the continuous variables
temp_columns = df_hvc_balanced[['avg_arpu_6_7','avg_rech_amt_6_7']]
# Checking outliers at 25%, 50%, 75%, 90%, 95% and 99%
temp_columns.describe(percentiles=[.25, .5, .75, .90, .95, .99])
###Output
_____no_output_____
###Markdown
Inference : Among the HVC itself there is a huge variation. there are chances of outliers - we will put a box plot to analyse this.
###Code
sns.boxplot( y=df_hvc_balanced["avg_arpu_6_7"] );
plt.show()
sns.boxplot( y=df_hvc_balanced["avg_rech_amt_6_7"] );
plt.show()
###Output
_____no_output_____
###Markdown
Inference :There seems to some outliers and we really need to remove those records so that they do not skew the analysis
###Code
df_hvc_balanced = df_hvc_balanced[df_hvc_balanced['avg_rech_amt_6_7'] <= 5000]
# Checking for outliers in the continuous variables
temp_columns = df_hvc_balanced[['avg_arpu_6_7','avg_rech_amt_6_7']]
# Checking outliers at 25%, 50%, 75%, 90%, 95% and 99%
temp_columns.describe(percentiles=[.25, .5, .75, .90, .95, .99])
sns.boxplot( y=df_hvc_balanced["avg_rech_amt_6_7"] );
plt.show()
sns.histplot(data=df_hvc_balanced, x="avg_rech_amt_6_7")
plt.show()
###Output
_____no_output_____
###Markdown
Inference : there are very less records beyond 2000, so we will remove them too, we will try with the value counts by binning them, then take the call of deleting, if the records are very less in number we shall remove them
###Code
df_hvc_balanced["avg_rech_amt_6_7"].value_counts(bins=20)
df_hvc_balanced = df_hvc_balanced[df_hvc_balanced['avg_rech_amt_6_7'] <= 2000]
df_hvc_balanced["avg_rech_amt_6_7"].value_counts(bins=200)
sns.boxplot( y=df_hvc_balanced["avg_rech_amt_6_7"].value_counts(bins=200));
plt.show()
###Output
_____no_output_____
###Markdown
Inference : now the data looks good. there will be people who recharge for near 2000, so let us have these records for further processing
###Code
plt.figure(figsize = (20,10))
sns.heatmap(df_hvc_balanced.corr(),annot = True)
plt.show()
###Output
_____no_output_____
###Markdown
Inference : Nothing is infereable from this - we need to reduce the number of variables. it is better we perfrom tis after we find the top 10-15 variables using the rfe model
###Code
# Checking for outliers in the continuous variables
out_check = df_hvc_balanced[['avg_rech_amt_6_7','avg_arpu_6_7']]
# Checking outliers at 25%, 50%, 75%, 90%, 95% and 99%
out_check.describe(percentiles=[.25, .5, .75, .90, .95, .99])
# Data Cleaning - outlier, Imputation, null values removal - (rishab)
# Data Preparation - derived variables(hvc, churn, arpu) - (rishab)
# EDA (univariate,bivariate, multivariate) (arthi)
# model - (Logistic(arthi) , decision trees(arthi), random forest (rishab))
###Output
_____no_output_____
###Markdown
Step 4: Test-Train Split
###Code
from sklearn.model_selection import train_test_split
# Putting feature variable to X
X = df_hvc_balanced.drop(['Churn'], axis=1)
X.head()
# Putting response variable to y
y = df_hvc_balanced['Churn']
y.head()
# Splitting the data into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, test_size=0.3, random_state=100)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
Step 5: Feature Scaling
###Code
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
###Output
_____no_output_____
###Markdown
Step 6: Model Building
###Code
import statsmodels.api as sm
df_hvc_balanced.shape
# Logistic regression model
logm1 = sm.GLM(y_train,(sm.add_constant(X_train)), family = sm.families.Binomial())
logm1.fit().summary()
###Output
_____no_output_____
###Markdown
Step 7: Feature Selection Using RFE
###Code
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
from sklearn.feature_selection import RFE
rfe = RFE(logreg, 15) # running RFE with 15 variables as output
rfe = rfe.fit(X_train, y_train)
rfe.support_
list(zip(X_train.columns, rfe.support_, rfe.ranking_))
col = X_train.columns[rfe.support_]
X_train.columns[~rfe.support_]
# assesing the stats model
X_train_sm = sm.add_constant(X_train[col])
logm2 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm2.fit()
res.summary()
plt.figure(figsize = (20,10))
sns.heatmap(X_train_sm.corr(),annot = True)
plt.show()
# Getting the predicted values on the train set
y_train_pred = res.predict(X_train_sm)
y_train_pred[:10]
y_train_pred = y_train_pred.values.reshape(-1)
y_train_pred[:10]
y_train_pred_final = pd.DataFrame({'Churn':y_train.values, 'Churn_Prob':y_train_pred})
y_train_pred_final['mobile_number'] = y_train.index
y_train_pred_final.head()
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
# Let's see the head
y_train_pred_final.head()
y_train_pred_final.head(50)
###Output
_____no_output_____
###Markdown
Step 8 : checking the confusion matirx and the other scores of accuracy, sensitivity, specificity
###Code
from sklearn import metrics
# Confusion matrix
confusion = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted )
print(confusion)
# Let's check the overall accuracy.
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
###Output
_____no_output_____
###Markdown
Inference : Thats a good level of accuracy. but we will still check th vif and see if some variables can be removed to get a better predictability Checking VIFs
###Code
# Check for the VIF values of the feature variables.
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Create a dataframe that will contain the names of all the feature variables and their respective VIFs
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
###Output
_____no_output_____
###Markdown
Inference: There are a few variables with high VIF. It's best to drop these variables as they aren't helping much with prediction and unnecessarily making the model complex. So let's start by dropping that.
###Code
#col = col.drop(['total_ic_mou_8','loc_ic_mou_8','total_og_mou_8','std_ic_mou_8','offnet_mou_8'], 1)
cols=[]
for i in range(0,vif.shape[0]):
if vif['VIF'][i] > 20:
cols.append(vif['Features'][i])
col = col.drop(cols, 1)
col
# Let's re-run the model using the selected variables
X_train_sm = sm.add_constant(X_train[col])
logm3 = sm.GLM(y_train,X_train_sm, family = sm.families.Binomial())
res = logm3.fit()
res.summary()
y_train_pred = res.predict(X_train_sm).values.reshape(-1)
y_train_pred[:10]
y_train_pred_final['Churn_Prob'] = y_train_pred
# Creating new column 'predicted' with 1 if Churn_Prob > 0.5 else 0
y_train_pred_final['predicted'] = y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.5 else 0)
y_train_pred_final.head()
vif = pd.DataFrame()
vif['Features'] = X_train[col].columns
vif['VIF'] = [variance_inflation_factor(X_train[col].values, i) for i in range(X_train[col].shape[1])]
vif['VIF'] = round(vif['VIF'], 2)
vif = vif.sort_values(by = "VIF", ascending = False)
vif
###Output
_____no_output_____
###Markdown
there seems to be no more impacts and multicollinearity - so we can proceeed with this split.
###Code
# Let's check the overall accuracy.
# Confusion matrix
confusion = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.predicted )
print(confusion)
print(metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
###Output
_____no_output_____
###Markdown
there seems to be no more impacts and multicollinearity - so we can proceeed with this split. Metrics beyond simply accuracy
###Code
TP = confusion[1,1] # true positive
TN = confusion[0,0] # true negatives
FP = confusion[0,1] # false positives
FN = confusion[1,0] # false negatives
# Let's see the sensitivity of our logistic regression model
print(" Sensitivity (Recall): " , TP / float(TP+FN))
# Calculate false postive rate - predicting churn when customer does not have churned
print(" False positive rate:" , FP / float(TN+FP))
# positive predictive value
print (" Precision ", TP / float(TP+FP))
# Negative predictive value
print (" True negatives rate:",TN / float(TN+ FN))
###Output
_____no_output_____
###Markdown
Step 9: Plotting ROC, Tradeoffs, and Threshold cutoff Plotting the ROC An ROC curve demonstrates several things:- It shows the tradeoff between sensitivity and specificity (any increase in sensitivity will be accompanied by a decrease in specificity).- The closer the curve follows the left-hand border and then the top border of the ROC space, the more accurate the test.- The closer the curve comes to the 45-degree diagonal of the ROC space, the less accurate the test.
###Code
def draw_roc( actual, probs ):
fpr, tpr, thresholds = metrics.roc_curve( actual, probs,
drop_intermediate = False )
auc_score = metrics.roc_auc_score( actual, probs )
plt.figure(figsize=(5, 5))
plt.plot( fpr, tpr, label='ROC curve (area = %0.2f)' % auc_score )
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate or [1 - True Negative Rate]')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
return None
fpr, tpr, thresholds = metrics.roc_curve( y_train_pred_final.Churn, y_train_pred_final.Churn_Prob, drop_intermediate = False )
draw_roc(y_train_pred_final.Churn, y_train_pred_final.Churn_Prob)
###Output
_____no_output_____
###Markdown
Step 10: Finding Optimal Cutoff PointOptimal cutoff probability is that prob where we get balanced sensitivity and specificity
###Code
# Let's create columns with different probability cutoffs
numbers = [float(x)/10 for x in range(10)]
for i in numbers:
y_train_pred_final[i]= y_train_pred_final.Churn_Prob.map(lambda x: 1 if x > i else 0)
y_train_pred_final.head()
# Now let's calculate accuracy sensitivity and specificity for various probability cutoffs.
cutoff_df = pd.DataFrame( columns = ['prob','accuracy','sensi','speci'])
from sklearn.metrics import confusion_matrix
# TP = confusion[1,1] # true positive
# TN = confusion[0,0] # true negatives
# FP = confusion[0,1] # false positives
# FN = confusion[1,0] # false negatives
num = [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
for i in num:
cm1 = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final[i] )
total1=sum(sum(cm1))
accuracy = (cm1[0,0]+cm1[1,1])/total1
speci = cm1[0,0]/(cm1[0,0]+cm1[0,1])
sensi = cm1[1,1]/(cm1[1,0]+cm1[1,1])
cutoff_df.loc[i] =[ i ,accuracy,sensi,speci]
print(cutoff_df)
###Output
_____no_output_____
###Markdown
inference : the best results can be seen at between .5 and .6 where the sensitivty and specificty is .83,.73 and .77 and .83 respectiviely.
###Code
# Let's plot accuracy sensitivity and specificity for various probabilities.
cutoff_df.plot.line(x='prob', y=['accuracy','sensi','speci'])
plt.show()
###Output
_____no_output_____
###Markdown
Inference : From the curve above, ~.55 is the optimum point to take it as a cutoff probability.
###Code
y_train_pred_final['final_predicted'] = y_train_pred_final.Churn_Prob.map( lambda x: 1 if x > 0.55 else 0)
y_train_pred_final.head()
# Let's check the overall accuracy.
metrics.accuracy_score(y_train_pred_final.Churn, y_train_pred_final.final_predicted)
confusion2 = metrics.confusion_matrix(y_train_pred_final.Churn, y_train_pred_final.final_predicted )
confusion2
TP = confusion2[1,1] # true positive
TN = confusion2[0,0] # true negatives
FP = confusion2[0,1] # false positives
FN = confusion2[1,0] # false negatives
Recall = TP / float(TP+FN)
Precision = TP / float(TP+FP)
# Let's see the sensitivity of our logistic regression model
print(" Sensitivity (Recall): " , Recall)
# Calculate false postive rate - predicting churn when customer does not have churned
print(" False positive rate:" , FP / float(TN+FP))
# positive predictive value
print (" Precision :", Precision)
# Negative predictive value
print (" True negatives rate:",TN / float(TN+ FN))
## calclulate the F1 score
F1 = 2 * (Precision * Recall)/(Precision + Recall)
F1
###Output
_____no_output_____
###Markdown
Inference : Not a bad F-score ! looks like the model is working Recall to Precision Tradeoff
###Code
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import precision_recall_curve
print("Precision score: ", precision_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
print("Recall score :", recall_score(y_train_pred_final.Churn, y_train_pred_final.predicted))
y_train_pred_final.Churn, y_train_pred_final.predicted
p, r, thresholds = precision_recall_curve(y_train_pred_final.Churn, y_train_pred_final.Churn_Prob)
plt.plot(thresholds, p[:-1], "g-")
plt.plot(thresholds, r[:-1], "r-")
plt.show()
###Output
_____no_output_____
###Markdown
Inference : the thresholds correlate to ~.55 Step 10: Making predictions on the test set
###Code
X_test_temp = X_test[col]
X_test_temp.head()
X_test_sm = sm.add_constant(X_test_temp)
y_test_pred = res.predict(X_test_sm)
y_test_pred[:10]
# Converting y_pred to a dataframe which is an array
y_pred_1 = pd.DataFrame(y_test_pred)
y_pred_1.head()
# Converting y_test to dataframe
y_test_df = pd.DataFrame(y_test)
# Putting CustID to index
y_test_df['mobile_number'] = y_test_df.index
# Removing index for both dataframes to append them side by side
y_pred_1.reset_index(drop=True, inplace=True)
y_test_df.reset_index(drop=True, inplace=True)
# Appending y_test_df and y_pred_1
y_pred_final = pd.concat([y_test_df, y_pred_1],axis=1)
y_pred_final.head()
# Renaming the column
y_pred_final= y_pred_final.rename(columns={ 0 : 'Churn_Prob'})
# Rearranging the columns
y_pred_final = y_pred_final.reindex(['mobile_number','Churn','Churn_Prob'], axis=1)
y_pred_final.head()
y_pred_final['final_predicted'] = y_pred_final.Churn_Prob.map(lambda x: 1 if x > 0.42 else 0)
y_pred_final.head()
# Let's check the overall accuracy.
metrics.accuracy_score(y_pred_final.Churn, y_pred_final.final_predicted)
confusion2 = metrics.confusion_matrix(y_pred_final.Churn, y_pred_final.final_predicted )
confusion2
TP = confusion2[1,1] # true positive
TN = confusion2[0,0] # true negatives
FP = confusion2[0,1] # false positives
FN = confusion2[1,0] # false negatives
# Let's see the sensitivity of our logistic regression model
print(" Sensitivity (Recall): " , TP / float(TP+FN))
# Calculate false postive rate - predicting churn when customer does not have churned
print(" False positive rate:" , FP / float(TN+FP))
# positive predictive value
print (" Precision ", TP / float(TP+FP))
# Negative predictive value
print (" True negatives rate:",TN / float(TN+ FN))
###Output
_____no_output_____
###Markdown
Inference : the current model is has a good prediction stats of 88% Sensitivity and True negatives detection as 85% and a precision of 72% and the variables that make a difference are total_rech_num_8 total_rech_num_7 total_ic_mou_8 std_ic_mou_8 isd_ic_mou_8 roam_og_mou_8 ic_others_8 sep_vbc_3g And with this model applied to the records and the prob threshold value of more than .55. Decision Trees
###Code
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=3)
dt.fit(X_train, y_train)
# Importing required packages for visualization
import os
os.environ['PATH'] = os.environ['PATH']
from IPython.display import Image
#from sklearn.externals.six import StringIO
from six import StringIO
from sklearn.tree import export_graphviz
import pydotplus, graphviz
# plotting tree with max_depth=3
dot_data = StringIO()
export_graphviz(dt, out_file=dot_data, filled=True, rounded=True,
feature_names=X.columns,
class_names=['Churn', "No Churn"])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
#Image(graph.create_png(),width=800,height=900)
#graph.write_pdf("dt_heartdisease.pdf")
###Output
_____no_output_____
###Markdown
INference:1. the main variables that helps us decide are:- total_ic_mou_8- last_day_rch_amt_8- total_og_mou_8- total_rech_amt_8- loc_og_mou_8- roam_og_mou_8- arpu_2g_8 paths to takeif we look at this tree, to find the cases for churn the path to be followed are:- total_ic_mou_8 > 39.45, roam_og_mou_8 267and for non churns- total_ic_mou_8 <=39.45, arpu_8 <=289.605, total_ic_mou_8 <= 3.74 these are based on the fact gini gains between the nodes Let us now check on the feature importances
###Code
dt.feature_importances_
imp_df = pd.DataFrame({
"Var name":X_train.columns,
"Imp":dt.feature_importances_
})
imp_df.sort_values(by="Imp",ascending=False)
###Output
_____no_output_____
###Markdown
Inference: the top features for the model can be considered from this list let us move on to find the accuracy and the outcome of the confusion matrix
###Code
y_train_pred = dt.predict(X_train)
y_test_pred = dt.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score
print(accuracy_score(y_train, y_train_pred))
confusion = confusion_matrix(y_train, y_train_pred)
TP = confusion[1,1] # true positive
TN = confusion[0,0] # true negatives
FP = confusion[0,1] # false positives
FN = confusion[1,0] # false negatives
Recall = TP / float(TP+FN)
Precision = TP / float(TP+FP)
# Let's see the sensitivity of our logistic regression model
print(" Sensitivity (Recall): " , Recall)
# Calculate false postive rate - predicting churn when customer does not have churned
print(" False positive rate:" , FP / float(TN+FP))
# positive predictive value
print (" Precision :", Precision)
# Negative predictive value
print (" True negatives rate:",TN / float(TN+ FN))
###Output
_____no_output_____
###Markdown
Inference : This model has a good sensitivity and precision and able to predict true positives, te=rue negative very well. We shall check on thr test data perfromance too and then move on to checking if the tuning of hyper parameters give even better results.
###Code
print(accuracy_score(y_test, y_test_pred))
confusion = confusion_matrix(y_test, y_test_pred)
confusion
TP = confusion[1,1] # true positive
TN = confusion[0,0] # true negatives
FP = confusion[0,1] # false positives
FN = confusion[1,0] # false negatives
Recall = TP / float(TP+FN)
Precision = TP / float(TP+FP)
# Let's see the sensitivity of our logistic regression model
print(" Sensitivity (Recall): " , Recall)
# Calculate false postive rate - predicting churn when customer does not have churned
print(" False positive rate:" , FP / float(TN+FP))
# positive predictive value
print (" Precision :", Precision)
# Negative predictive value
print (" True negatives rate:",TN / float(TN+ FN))
###Output
_____no_output_____
###Markdown
Inference : this model is good in predicting the churn vs no churns for the test data too very well
###Code
y_train.value_counts()
###Output
_____no_output_____
###Markdown
Let us calculate the impurity measures Let us find the classifcation of errorsChurn = 0 no of cases - 18930Churn = 1 no of cases - 1745p(0) = 18930/20675 = .91p(1) = 1745/20675 = .08Since P(max) here is .91 : we can say that classification error could possibly become (1-.91) = .09. the chance of wrongly predicting a wrong no churn is .09.. this means incase we are saying all of them will not churn, we could possibly be wrong .09 % of times Let us start playing with the hyperparameters.
###Code
def get_dt_graph(dt_classifier):
dot_data = StringIO()
export_graphviz(dt_classifier, out_file=dot_data, filled=True,rounded=True,
feature_names=X.columns,
class_names=['Churn', "No Churn"])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
return graph
def evaluate_model(dt_classifier):
print("Train Accuracy :", accuracy_score(y_train, dt_classifier.predict(X_train)))
print("Train Confusion Matrix:")
print(confusion_matrix(y_train, dt_classifier.predict(X_train)))
print("-"*50)
print("Test Accuracy :", accuracy_score(y_test, dt_classifier.predict(X_test)))
print("Test Confusion Matrix:")
print(confusion_matrix(y_test, dt_classifier.predict(X_test)))
###Output
_____no_output_____
###Markdown
Case 1 : all default parameters
###Code
dt_default = DecisionTreeClassifier(random_state=42)
dt_default.fit(X_train, y_train)
gph = get_dt_graph(dt_default)
Image(gph.create_png())
evaluate_model(dt_default)
###Output
_____no_output_____
###Markdown
Inference : this is a overfit. iwth train accuracy as 1. but test accuracy is good enough at .78. we will try the other parameters now Case 2 - max_depth = 3
###Code
dt_depth = DecisionTreeClassifier(max_depth=3)
dt_depth.fit(X_train, y_train)
gph = get_dt_graph(dt_depth)
Image(gph.create_png())
evaluate_model(dt_depth)
###Output
_____no_output_____
###Markdown
Case 3 - max_samples_split = 20
###Code
dt_min_split = DecisionTreeClassifier(min_samples_split=20)
dt_min_split.fit(X_train, y_train)
gph = get_dt_graph(dt_min_split)
Image(gph.create_png())
evaluate_model(dt_min_split)
###Output
_____no_output_____
###Markdown
Case 4 - max_samples_leaf = 20
###Code
dt_min_leaf = DecisionTreeClassifier(min_samples_leaf=20, random_state=42)
dt_min_leaf.fit(X_train, y_train)
gph = get_dt_graph(dt_min_leaf)
Image(gph.create_png())
evaluate_model(dt_min_leaf)
###Output
_____no_output_____
###Markdown
Using Entropy instead of Gini Case 5 - for the impurity checks, instead of gini let us play with the entropy values
###Code
dt_min_leaf_entropy = DecisionTreeClassifier(min_samples_leaf=20, random_state=42, criterion="entropy")
dt_min_leaf_entropy.fit(X_train, y_train)
gph = get_dt_graph(dt_min_leaf_entropy)
Image(gph.create_png())
evaluate_model(dt_min_leaf_entropy)
###Output
_____no_output_____
###Markdown
Case 6 -- enough of trying one by one, let us try all the posible values for the hyperparamter vaiables and check on the results once for all using the gridsearch
###Code
dt = DecisionTreeClassifier(random_state=42)
from sklearn.model_selection import GridSearchCV
# Create the parameter grid based on the results of random search
params = {
'max_depth': [2, 3, 5, 10, 20],
'min_samples_leaf': [5, 10, 20, 50, 100],
'criterion': ["gini", "entropy"]
}
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=dt,
param_grid=params,
cv=4, n_jobs=-1, verbose=1, scoring = "accuracy")
%%time
grid_search.fit(X_train, y_train)
score_df = pd.DataFrame(grid_search.cv_results_)
score_df.head()
score_df.nlargest(5,"mean_test_score")
grid_search.best_estimator_
dt_best = grid_search.best_estimator_
dt_best
evaluate_model(dt_best)
from sklearn.metrics import classification_report
print(classification_report(y_test, dt_best.predict(X_test)))
gph = get_dt_graph(dt_best)
Image(gph.create_png())
###Output
_____no_output_____
###Markdown
Final inference: this tree with depth 5 and sample_leaf of 10 gives the best results. let us now again see which are the varibles that matter the variables that now matter are - total_ic_mou_8- arpu_8- vol_2g_mb_8- Onnet_mou_8- aon- last_day_rch_amt_8- total_rech_amt_8- roam_og_mou_8for the non churns the best path to take:total_ic_mou_8 .036for churnstotal_ic_mou_8 > 39.475 and roam_og_mou_8 267 and last_day_rch_amt_8 417.5 Random Forest
###Code
#importing libraries
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(random_state=42, n_estimators=10, max_depth=3,oob_score=True)
X_train.head()
rf.fit(X_train, y_train)
rf.estimators_[0]
from sklearn.metrics import plot_roc_curve
plot_roc_curve(rf,X_train,y_train)
sample_tree = rf.estimators_[4]
gph = get_dt_graph(sample_tree)
Image(gph.create_png(), width=700, height=700)
gph = get_dt_graph(rf.estimators_[2])
Image(gph.create_png(), width=700, height=700)
evaluate_model(rf)
###Output
_____no_output_____
###Markdown
Grid search for hyper-parameter tuning
###Code
classifier_rf = RandomForestClassifier(random_state=42, n_jobs=-1,oob_score=True)
# Create the parameter grid based on the results of random search
params = {
'max_depth': [1, 2, 5, 10, 20],
'min_samples_leaf': [5, 10, 20, 50, 100],
'max_features': [2,3,4],
'n_estimators': [5,10,15,20,25]
}
# Instantiate the grid search model
grid_search = GridSearchCV(estimator=classifier_rf, param_grid=params,
cv=4, n_jobs=-1, verbose=1, scoring = "accuracy")
%%time
grid_search.fit(X,y)
rf_best = grid_search.best_estimator_
rf_best
rf_best.oob_score_
plot_roc_curve(rf_best,X_train,y_train)
evaluate_model(rf_best)
sample_tree = rf_best.estimators_[0]
sample_tree
gph = get_dt_graph(sample_tree)
Image(gph.create_png())
gph = get_dt_graph(rf_best.estimators_[0])
Image(gph.create_png(), height=600, width=600)
gph = get_dt_graph(rf_best.estimators_[10])
Image(gph.create_png(), height=600, width=600)
print(classification_report(y_test, rf_best.predict(X_test)))
###Output
_____no_output_____
###Markdown
Variable importance in RandomForest and Decision trees
###Code
imp_df = pd.DataFrame({
"Varname": X_train.columns,
"Imp": rf_best.feature_importances_
})
imp_df.sort_values(by="Imp", ascending=False,inplace=True)
imp_df.head(15)
###Output
_____no_output_____ |
fmri_classification/Localizer_Classification.ipynb | ###Markdown
Classification of Localizer Data Import necessary packages
###Code
%matplotlib inline
import glob
import os.path as op
import os as os
import nibabel as nib
import pandas as pd
import numpy as np
from nilearn.masking import compute_epi_mask
import matplotlib.pyplot as plt
import matplotlib as mpl
# Nilearn for neuro-imaging-specific machine learning
from nilearn.input_data import NiftiMasker
from nilearn import image
# Nibabel for general neuro-imaging tools
import nibabel
# Scikit-learn for machine learning
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.dummy import DummyClassifier
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.pipeline import Pipeline
from sklearn.cross_validation import LeaveOneLabelOut, LeavePLabelOut, cross_val_score
from sklearn import preprocessing
# Plotting
import matplotlib.pyplot as plt
from nilearn import plotting
import seaborn as sns
sns.set(context="poster", style="ticks", font="Arial")
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Greens):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, np.round(cm[i, j], decimals=2),
horizontalalignment="center", size='xx-large',
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def plot_results(subj_info, cm_group, df_acc, df_auc=None,
classes=['animal', 'face', 'fruitveg', 'tool', 'virtualtown']):
if df_auc is not None:
data = df_auc.merge(subj_info)
print data.group.value_counts()/10
plt.figure()
plot_confusion_matrix(cm_group.mean(axis=0), classes=classes,
title='Mean confusion matrix')
plt.figure()
sns.factorplot(x='category', y='accuracy', hue='classifier', aspect=2,
units='subid', ci=68, data=df_acc, dodge=.1)
plt.figure()
sns.boxplot(x='category', y='accuracy', hue='classifier', data=df_acc)
sns.stripplot(x='category', y='accuracy', hue='classifier', jitter=True,
data=df_acc)
if df_auc is not None:
sns.factorplot(x='category', y='auc', hue='group', aspect=2,
units='subid', ci=68, data=data, dodge=.1)
###Output
_____no_output_____
###Markdown
Set up some colors for the plots
###Code
palette = {'logreg': 'mediumseagreen',
'chance': 'darkgray',
'f1 score': 'teal'}
###Output
_____no_output_____
###Markdown
Define some functions for classification
###Code
# While debugging:
%load_ext autoreload
%aimport ap_classify
from ap_classify import *
###Output
_____no_output_____
###Markdown
Set up directory & file information
###Code
smoothing = 'unsmoothed'
regspace = 'epi'
design = 'localizer_cond_mvpa.csv' # onset file in lyman-style
# design = 'localizer_subcat.csv' # onset file in lyman-style
smoothing_fwhm = 0
standardize = True
tr = float(2) # in seconds
tr_shift = 4.5 # seconds to shift forward by
ts_type = 'raw' # raw or residual
run_list = [7, 8]
basedir = '/Volumes/group/awagner/sgagnon/AP'
analydir = op.join(basedir, 'analysis/mvpa_raw')
subjfile = op.join(analydir, 'notebooks/subj_info.csv')
subj_info = pd.read_csv(subjfile)
# Filepath templates
if ts_type == 'raw':
tsfilename = 'timeseries_xfm.nii.gz'
elif ts_type == 'residual':
tsfilename = 'res4d_xfm.nii.gz'
tsfile = op.join(analydir, "{subid}", 'reg', regspace,
smoothing, "run_{run_id}", tsfilename)
func_maskfile = op.join(analydir, "{subid}", 'reg', regspace,
smoothing, "run_{run_id}", 'functional_mask_xfm.nii.gz')
maskfile = op.join(basedir, 'data', "{subid}", 'masks',
"{mask_name}.nii.gz")
meanfile = op.join(analydir, "{subid}", 'preproc',
"run_{run_id}", 'mean_func.nii.gz')
onsetfile = op.join(basedir, 'data', "{subid}", 'design', design)
# Output templates
outnifti = op.join(analydir, "{subid}", 'importance_maps')
artifacts = op.join(analydir, '{subid}', 'preproc', 'run_{run}', 'artifacts.csv')
# Combine paths into dictionary (facilitate passing i/o of funcs)
paths = dict(tsfile=tsfile, func_maskfile=func_maskfile,
maskfile=maskfile, meanfile=meanfile,
onsetfile=onsetfile, outnifti=outnifti,
analydir=analydir, artifacts=artifacts)
###Output
_____no_output_____
###Markdown
We create anatomical masks in native space from a cortical parcellation of the high-resolution T1image obtained for each participant using FreeSurfer and the resulting **bilateralinferior temporal cortex**, **fusiform gyrus**, and **parahippocampal gyrus** were combined to serveas the mask for MVPA classification (as in Zeithamova et al., 2012; *Neuron*). Run Classification (training/testing on 3 categories)
###Code
subj_info.group.value_counts()
###Output
_____no_output_____
###Markdown
Run localizer CV:
###Code
# Initialize some dataframes for storage
df = pd.DataFrame(columns=['subid', 'mask_name', 'category', 'type', 'mean', 'sd'])
df_acc = pd.DataFrame(columns=['subid', 'mask_name', 'category', 'classifier', 'accuracy', 'count'])
df_proba = pd.DataFrame(columns=['subid', 'mask_name', 'true_category', 'guess_category', 'classifier', 'probability'])
df_auc = pd.DataFrame()
n_permutations=0 # or if want to run permutation test, e.g., 100
if n_permutations > 0:
iter_list = list(np.arange(1, n_permutations + 1))
iter_list.insert(0, 'subid')
d_permute = pd.DataFrame(columns=iter_list)
mask_type = 'mask' # functional mask, or anatomical mask defined w/mask_name
mask_name = 'bilat-parahipp_fusi_inftemp_nohipp'
# mask_name = 'bilat-fusi_inftemp_nohipp' # excluding parahipp to see if just that
# mask_name = 'lh-inferiorparietal'
cond_list = ['face', 'object', 'place']
multi_class = 'ovr'
pca_n = None #or None
univariate_fsl_k=None # 1000 #or None
# mask_type = 'func' # functional mask, or anatomical mask defined w/mask_name
# mask_name = 'wholebrain'
# cond_list = ['face', 'object', 'place']
# Confusion matrix
cm_group = np.zeros((1,len(cond_list), len(cond_list)))
# Iterate through subjects
for subid in subj_info.subid:
print subid
onsetfile = paths['onsetfile']
# Get subj-specific data
X, run_labels, ev_labels, ev_trs, ev_onsets, func_masker = get_subj_data(subid, onsetfile, cond_list, paths, mask_type, mask_name,
smoothing_fwhm, standardize, tr, tr_shift, run_list,
shift_rest=True, filter_artifacts=True)
cv = LeaveOneLabelOut(run_labels)
plot_validation_curve(X, ev_labels, cv)
# Classification
if n_permutations > 0:
df, d_permute = calc_scores(df, subid, mask_name, X, ev_labels, run_labels,
n_permutations=n_permutations, d_permute=d_permute,
plot_permutation=True, multi_class=multi_class)
else:
df = calc_scores(df, subid, mask_name, X, ev_labels, run_labels, multi_class=multi_class)
if multi_class == 'MLP':
df_acc, df_proba, cm_group = calc_acc_proba(df_acc, df_proba, subid, mask_name, X, ev_labels,
run_labels, cv,conf_mat=True, multi_class=multi_class,
cm_group=cm_group)
else:
df_acc, df_proba, df_auc, cm_group = calc_acc_proba(df_acc, df_proba, subid, mask_name, X, ev_labels,
run_labels, cv,conf_mat=True, multi_class=multi_class,
univariate_fsel_k=univariate_fsl_k, undersampling=True,
pca_n=pca_n,
cm_group=cm_group,
compute_AUC=True, df_auc=df_auc,
repeated_ttest_fsel=None)
# # Create coef maps by training on all data, save niis and pngs to dir
# create_coef_maps(subid, X, ev_labels, func_masker, mask_name, paths, calc_A=False)
###Output
_____no_output_____
###Markdown
Various possible outputs
###Code
df.to_csv('output_ap/localizer_scores_filterart_raw_scalewithinrun.csv')
df_acc.to_csv('output_ap/localizer_accuracy_raw.csv', index=False)
df_proba.to_csv('output_ap/localizer_proba_raw.csv', index=False)
# VTC - no hipp
df_auc.to_csv('output_ap/localizer_vtcnohipp_auc_filterart_raw_scalewithinrun.csv')
df_acc.to_csv('output_ap/localizer_vtcnohipp_accuracy_filterart_raw_scalewithinrun.csv')
df_proba.to_csv('output_ap/localizer_vtcnohipp_proba_filterart_raw_scalewithinrun.csv')
df.to_csv('output_ap/localizer_vtcnohipp_df_filterart_raw_scalewithinrun.csv')
d_permute.to_csv('output_ap/localizer_vtcnohipp_permute_filterart_raw_scalewithinrun.csv')
# Whole hippocampus
df_auc.to_csv('output_ap/localizer_bilat-hipp_auc_filterart_raw_scalewithinrun.csv')
df_acc.to_csv('output_ap/localizer_bilat-hipp_accuracy_filterart_raw_scalewithinrun.csv')
df_proba.to_csv('output_ap/localizer_bilat-hipp_proba_filterart_raw_scalewithinrun.csv')
df.to_csv('output_ap/localizer_bilat-hipp_df_filterart_raw_scalewithinrun.csv')
d_permute.to_csv('output_ap/localizer_bilat-hipp_permute_filterart_raw_scalewithinrun.csv')
# inf parietal
df_auc.to_csv('output_ap/localizer_inferiorparietal_auc_filterart_raw_scalewithinrun.csv')
df_acc.to_csv('output_ap/localizer_inferiorparietal_accuracy_filterart_raw_scalewithinrun.csv')
df_proba.to_csv('output_ap/localizer_inferiorparietal_proba_filterart_raw_scalewithinrun.csv')
df.to_csv('output_ap/localizer_inferiorparietal_df_filterart_raw_scalewithinrun.csv')
d_permute.to_csv('output_ap/localizer_inferiorparietal_permute_filterart_raw_scalewithinrun.csv')
###Output
_____no_output_____ |
Live_Demo_Two_steam_net.ipynb | ###Markdown
This is a live demo of video action recognition using two-stream architectureThis will clone my repo and download the models on drive and uses them to infer the output in a live frame-level demo. then an output video will be generated showing the output prediction for each frame accordingly.I suggest running all the cells and have a 5 mins-break then view the output video :D Starting by installation process This will clone my repo and download the models on drive.I used gdown.pl tool for downloading my public checkpoints on drive with no authentication overhead
###Code
import os
!git clone https://github.com/mohammed-elkomy/two-stream-action-recognition.git
os.chdir("/content/two-stream-action-recognition")
!git clone https://github.com/circulosmeos/gdown.pl.git
!./gdown.pl/gdown.pl https://drive.google.com/file/d/1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK/view "spatial.zip"
!./gdown.pl/gdown.pl https://drive.google.com/file/d/1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ/view "motion.zip"
!unzip spatial.zip
!unzip motion.zip
!pip install -U -q PyDrive 2> s.txt >> s.txt
!pip install opencv-python 2> s.txt >> s.txt
!pip install imgaug 2> s.txt >> s.txt
!pip install scikit-video 2> s.txt >> s.txt
###Output
Cloning into 'two-stream-action-recognition'...
remote: Enumerating objects: 6, done.[K
remote: Counting objects: 16% (1/6) [K
remote: Counting objects: 33% (2/6) [K
remote: Counting objects: 50% (3/6) [K
remote: Counting objects: 66% (4/6) [K
remote: Counting objects: 83% (5/6) [K
remote: Counting objects: 100% (6/6) [K
remote: Counting objects: 100% (6/6), done.[K
remote: Compressing objects: 100% (4/4), done.[K
remote: Total 224 (delta 2), reused 6 (delta 2), pack-reused 218[K
Receiving objects: 100% (224/224), 50.15 MiB | 47.02 MiB/s, done.
Resolving deltas: 100% (46/46), done.
Cloning into 'gdown.pl'...
remote: Enumerating objects: 41, done.[K
remote: Counting objects: 100% (41/41), done.[K
remote: Compressing objects: 100% (26/26), done.[K
remote: Total 81 (delta 22), reused 32 (delta 14), pack-reused 40[K
Unpacking objects: 100% (81/81), done.
Cannot open cookies file ‘gdown.cookie.temp’: No such file or directory
--2019-04-08 17:36:57-- https://docs.google.com/uc?id=1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK&export=download
Resolving docs.google.com (docs.google.com)... 74.125.141.101, 74.125.141.138, 74.125.141.100, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.101|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [text/html]
Saving to: ‘spatial.zip’
0K 46.4M=0s
2019-04-08 17:36:57 (46.4 MB/s) - ‘spatial.zip’ saved [3259]
--2019-04-08 17:36:57-- https://docs.google.com/uc?export=download&confirm=t3IN&id=1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK
Resolving docs.google.com (docs.google.com)... 74.125.141.101, 74.125.141.138, 74.125.141.102, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.101|:443... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://doc-10-50-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/kutlh0076lmlt6hid35dhgnkaceee7b6/1554739200000/13087578489450988105/*/1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK?e=download [following]
Warning: wildcards not supported in HTTP.
--2019-04-08 17:36:57-- https://doc-10-50-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/kutlh0076lmlt6hid35dhgnkaceee7b6/1554739200000/13087578489450988105/*/1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK?e=download
Resolving doc-10-50-docs.googleusercontent.com (doc-10-50-docs.googleusercontent.com)... 74.125.141.132, 2607:f8b0:400c:c06::84
Connecting to doc-10-50-docs.googleusercontent.com (doc-10-50-docs.googleusercontent.com)|74.125.141.132|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [application/zip]
Saving to: ‘spatial.zip’
0K ........ ........ ........ ........ 63.0M
32768K ........ ........ ........ ........ 184M
65536K ........ ........ ........ ........ 131M
98304K ........ ........ ........ ........ 189M
131072K ........ ........ ........ ........ 197M
163840K ........ ........ ........ ........ 166M
196608K ........ ........ ........ ....... 40.5M=2.2s
2019-04-08 17:36:59 (100 MB/s) - ‘spatial.zip’ saved [234458427]
Cannot open cookies file ‘gdown.cookie.temp’: No such file or directory
--2019-04-08 17:37:02-- https://docs.google.com/uc?id=1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ&export=download
Resolving docs.google.com (docs.google.com)... 74.125.141.101, 74.125.141.138, 74.125.141.100, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.101|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [text/html]
Saving to: ‘motion.zip’
0K 44.8M=0s
2019-04-08 17:37:02 (44.8 MB/s) - ‘motion.zip’ saved [3259]
--2019-04-08 17:37:02-- https://docs.google.com/uc?export=download&confirm=jfI3&id=1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ
Resolving docs.google.com (docs.google.com)... 74.125.141.102, 74.125.141.113, 74.125.141.101, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.102|:443... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://doc-0c-2g-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/lm81f787epkvu21ukccl2a8ku38udndc/1554739200000/09175336730828036060/*/1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ?e=download [following]
Warning: wildcards not supported in HTTP.
--2019-04-08 17:37:02-- https://doc-0c-2g-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/lm81f787epkvu21ukccl2a8ku38udndc/1554739200000/09175336730828036060/*/1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ?e=download
Resolving doc-0c-2g-docs.googleusercontent.com (doc-0c-2g-docs.googleusercontent.com)... 74.125.141.132, 2607:f8b0:400c:c06::84
Connecting to doc-0c-2g-docs.googleusercontent.com (doc-0c-2g-docs.googleusercontent.com)|74.125.141.132|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [application/zip]
Saving to: ‘motion.zip’
0K ........ ........ ........ ........ 77.3M
32768K ........ ........ ........ ........ 197M
65536K ........ ........ ........ ........ 202M
98304K ........ ........ ........ ........ 181M
131072K ........ ........ ........ ........ 164M
163840K ........ ........ ........ ........ 189M
196608K ........ ........ ........ ....... 177M=1.5s
2019-04-08 17:37:04 (154 MB/s) - ‘motion.zip’ saved [234618151]
Archive: spatial.zip
inflating: spatial.log
inflating: spatial.preds
inflating: spatial.h5
Archive: motion.zip
inflating: motion.log
inflating: motion.preds
inflating: motion.h5
###Markdown
Showing the demoIt will start by selecting one random video of the videos **testing video samples** stored in my repo(it contains 100 videos from the test dataset and you can add more, originally it was possible to get a sinlge video by name using an HTTP request but the UCF101 changed their site a little bit and it's not possible now)
###Code
high_resolution_video = True # for good internet :D
# importing those only once
import cv2
from imgaug import augmenters as iaa
from evaluation import legacy_load_model
from evaluation.evaluation import *
import random
from frame_dataloader import DataUtil
import matplotlib.pyplot as plt
import numpy as np
import skvideo.io
import io
import base64
from IPython.display import HTML
from matplotlib import gridspec
# defining functions and global objects
# dictionary of class names
data_util = DataUtil(path= './UCF_list/', split="01")
action_names = {v:k for k,v in data_util.action_to_label.items()} # class name dictionary
stacked_frames = 10
# image resize augmenter to be fed into the network
augmenter = iaa.Sequential([
iaa.Scale({"height": 299, "width": 299})
])
def convert_to_image(flow_image):
"""
this is the conversion function of each flow frame
based on the cpp version of extracting optical flow https://github.com/feichtenhofer/gpu_flow/blob/master/compute_flow.cpp
"""
l, h = -20, 20
return (255 * (flow_image - l) / (h - l)).astype(np.uint8)
def stack_opticalflow(start_frame_index, stacked_frames): # returns numpy (h,w,stacked*2) = one sample
"""
Stacks "stacked_frames" u/v frames on a single numpy array : (h,w,stacked*2)
"""
first_optical_frame_u = original_u_frames[start_frame_index] # horizontal
first_optical_frame_v = original_v_frames[start_frame_index] # vertical
stacked_optical_flow_sample = np.zeros(first_optical_frame_u.shape + (2 * stacked_frames,), dtype=np.uint8) # with channel dimension of stacked_frames(u)+ stacked_frames(v)
stacked_optical_flow_sample[:, :, 0] = first_optical_frame_u
stacked_optical_flow_sample[:, :, 0 + stacked_frames] = first_optical_frame_v
for index, optical_frame_id in enumerate(range(start_frame_index + 1, start_frame_index + stacked_frames), 1): # index starts at 1 placed after the first one
stacked_optical_flow_sample[:, :, index] = original_u_frames[optical_frame_id]
stacked_optical_flow_sample[:, :, index + stacked_frames] = original_v_frames[optical_frame_id]
return stacked_optical_flow_sample
def get_image_from_fig(fig):
"""
converts matplotlib figure into a numpy array for demo video generation
"""
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
###Output
_____no_output_____
###Markdown
loading keras models just downloaded from drive (loaded once)
###Code
# legacy_load_model is an older version of keras load_model since keras API changed a little bit when I was working on action recognition
# load into ram
print("Loading Spatial stream")
spatial_model_restored = legacy_load_model(filepath="spatial.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
spatial_model_restored.summary()
# load into ram
print("Loading Motion stream")
motion_model_restored = legacy_load_model(filepath="motion.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
motion_model_restored.summary()
###Output
Loading Spatial stream
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_spatial (InputLayer) (None, 299, 299, 3) 0
_________________________________________________________________
data_norm (BatchNormalizatio (None, 299, 299, 3) 6
_________________________________________________________________
xception (Model) (None, 10, 10, 2048) 20861480
_________________________________________________________________
avg_pool (GlobalAveragePooli (None, 2048) 0
_________________________________________________________________
predictions (Dense) (None, 101) 206949
=================================================================
Total params: 21,068,435
Trainable params: 21,013,901
Non-trainable params: 54,534
_________________________________________________________________
Loading Motion stream
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_motion (InputLayer) (None, 299, 299, 20) 0
_________________________________________________________________
data_norm (BatchNormalizatio (None, 299, 299, 20) 40
_________________________________________________________________
xception (Model) (None, 101) 21073325
=================================================================
Total params: 21,073,365
Trainable params: 21,018,797
Non-trainable params: 54,568
_________________________________________________________________
###Markdown
Selecting one video and process it for RGB frames and optical flow framesoptical flow frames are computed using TVL1 which is never real time on CPU, might take few minutes for long vidoes (I process them on CPU since GPU version requires building opencv from the source and doing some nasty things not helpful for a short demo)
###Code
# select a random video
video_name = random.choice(os.listdir("testing video samples"))
selected_video=os.path.join("testing video samples",video_name)
print("selected_video:",selected_video)
vidcap = cv2.VideoCapture(selected_video)
print("frame rate for demo:",vidcap.get(cv2.CAP_PROP_FPS))
success, image = vidcap.read()
# make the rgb frames
original_rgb_frames = []
while success:
original_rgb_frames.append(image)
success, image = vidcap.read()
print("frames count in video", len(original_rgb_frames))
# make the optical flow frames
original_v_frames = []
original_u_frames = []
frames = list(map(lambda frame: cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255.0, original_rgb_frames))
optical_flow = cv2.DualTVL1OpticalFlow_create()
for frame_index in range(len(frames) - 1):
if frame_index % 10 == 0:
print("processing tvl flow of frame ",frame_index)
flow = optical_flow.calc(frames[frame_index], frames[frame_index + 1], None)
u_frame = convert_to_image(flow[..., 0])
v_frame = convert_to_image(flow[..., 1])
original_v_frames.append(v_frame)
original_u_frames.append(u_frame)
print("original_rgb_frames:", len(original_rgb_frames), "original_u_frames:", len(original_u_frames), "original_v_frames:", len(original_v_frames))
# generate spatial batch as done in the dataloader
spatial_batch = []
for image in original_rgb_frames:
spatial_batch.append(
cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
)
spatial_batch = np.array(augmenter.augment_images(spatial_batch), dtype=np.float32) / 255.0
# generate motion batch as done in the dataloader
motion_batch = []
for first_optical_frame_id in range(len(original_u_frames) - stacked_frames):
motion_batch.append( # append one sample which is (h,w,stacked*2)
stack_opticalflow(start_frame_index=first_optical_frame_id, stacked_frames=stacked_frames)
)
motion_batch = np.array(augmenter.augment_images(motion_batch), dtype=np.float32) / 255.0
###Output
frames count in video 139
processing tvl flow of frame 0
processing tvl flow of frame 10
processing tvl flow of frame 20
processing tvl flow of frame 30
processing tvl flow of frame 40
processing tvl flow of frame 50
processing tvl flow of frame 60
processing tvl flow of frame 70
processing tvl flow of frame 80
processing tvl flow of frame 90
processing tvl flow of frame 100
processing tvl flow of frame 110
processing tvl flow of frame 120
processing tvl flow of frame 130
original_rgb_frames: 139 original_u_frames: 138 original_v_frames: 138
###Markdown
Predict the output of each frame organized in the batch
###Code
"""
predict spatial stream output
"""
spatial_pred = spatial_model_restored.predict(spatial_batch)
spatial_classes = np.argsort(spatial_pred,axis=1)[:,:-6:-1]
spatial_scores = np.sort(spatial_pred,axis=1)[:,:-6:-1]
"""
predict motion stream output
"""
motion_pred = motion_model_restored.predict(motion_batch)
motion_classes = np.argsort(motion_pred,axis=1)[:,:-6:-1]
motion_scores = np.sort(motion_pred,axis=1)[:,:-6:-1]
"""
get the average output prediction
"""
average_pred = motion_pred + spatial_pred[:motion_pred.shape[0],]
average_classes = np.argsort(average_pred,axis=1)[:,:-6:-1]
average_scores = np.sort(average_pred,axis=1)[:,:-6:-1]
def make_bar_chart(classes,scores):
height = scores.tolist()
bars = [action_names[class_index] for class_index in classes]
y_pos = np.arange(len(bars))
bar = plt.bar(y_pos, height, color=['yellow', 'red', 'green', 'blue', 'cyan'])
# plt.xticks(y_pos, bars, rotation=90) this will draw them below
# plt.tick_params(axis="x",labelsize=10,direction="in", pad=-15)
plt.ylim(top=1)
plt.ylim(bottom=0)
for bar_id,rect in enumerate(bar):
plt.text(rect.get_x() + rect.get_width()/2.0, .5, bars[bar_id], ha='center', va='center', rotation=75,fontdict={'fontsize': 13 if high_resolution_video else 10})
# Define the codec and create VideoWriter object.The output is stored in 'demo.mp4' file.
writer = skvideo.io.FFmpegWriter("demo.mp4", inputdict={
'-r': '16',
})
gs = gridspec.GridSpec(2, 3,
width_ratios=[1, 1,1],
height_ratios=[1.5, 1]
)
gs.update(wspace=0.2,hspace=0)
# generating output video
for frame_index in range(motion_classes.shape[0]):
if high_resolution_video :
fig = plt.figure(figsize=(16, 12))
fig.suptitle("Demo for {}".format(video_name), fontsize=24)
fig.text(.125,0.91,"Average Prediction from spatial stream: {}".format(action_names[np.mean(spatial_pred,axis = 0).argmax()]),color='r', fontsize=18)
fig.text(.125,.87,"Average Prediction from motion stream: {}".format(action_names[np.mean(motion_pred,axis = 0).argmax()]),color='g',fontsize=18)
fig.text(.125,.83,"Average Prediction from both streams: {}".format(action_names[np.mean(average_pred,axis = 0).argmax()]),color='b', fontsize=18)
else :
fig = plt.figure(figsize=(9, 6))
fig.suptitle("Demo for {}".format(video_name), fontsize=16)
fig.text(.125,0.91,"Average Prediction from spatial stream: {}".format(action_names[np.mean(spatial_pred,axis = 0).argmax()]),color='r', fontsize=13)
fig.text(.125,.87,"Average Prediction from motion stream: {}".format(action_names[np.mean(motion_pred,axis = 0).argmax()]),color='g',fontsize=13)
fig.text(.125,.83,"Average Prediction from both streams: {}".format(action_names[np.mean(average_pred,axis = 0).argmax()]),color='b', fontsize=13)
if frame_index % 10 == 0:
print("processing frame ",frame_index)
##########################################################
# rgb frame
ax = plt.subplot(gs[0])
ax.set_title("RGB frame", fontsize=16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(cv2.cvtColor(original_rgb_frames[frame_index],cv2.COLOR_RGB2BGR))
##########################################################
# optical flow frame
ax = plt.subplot(gs[1])
ax.set_title("TVL1 Optical flow u-frame", fontsize=16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(original_u_frames[frame_index],cmap="inferno") # viridis,inferno,plasma,magma
##########################################################
# optical flow frame
ax = plt.subplot(gs[2])
ax.set_title("TVL1 Optical flow v-frame", fontsize= 16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(original_v_frames[frame_index],cmap="inferno") # viridis,inferno,plasma,magma
##########################################################
# prediction scores
ax = plt.subplot(gs[3])
ax.set_title("Spatial Stream Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(spatial_classes[frame_index],spatial_scores[frame_index])
##########################################################
# prediction scores
ax = plt.subplot(gs[4])
ax.set_title("Motion Stream Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(motion_classes[frame_index],motion_scores[frame_index])
##########################################################
# prediction scores
ax = plt.subplot(gs[5])
ax.set_title("Average Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(average_classes[frame_index],average_scores[frame_index])
##########################################################
fig.tight_layout( pad=0, h_pad=0, w_pad=0)
writer.writeFrame(get_image_from_fig(fig))
plt.close(fig)
writer.close()
video = io.open("demo.mp4" , 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video controls autoplay loop>
<source type="video/mp4" src="data:video/mp4;base64,{}"
</video>'''.format(encoded.decode('ascii')))
###Output
_____no_output_____
###Markdown
This is a live demo of video action recognition using two-stream architectureThis will clone my repo and download the models on drive and uses them to infer the output in a live frame-level demo. then an output video will be generated showing the output prediction for each frame accordingly.I suggest running all the cells and have a 5 mins-break then view the output video :D Starting by installation process This will clone my repo and download the models on drive.I used gdown.pl tool for downloading my public checkpoints on drive with no authentication overhead
###Code
import os
!git clone https://github.com/mohammed-elkomy/two-stream-action-recognition.git
os.chdir("/content/two-stream-action-recognition")
!git clone https://github.com/circulosmeos/gdown.pl.git
!./gdown.pl/gdown.pl https://drive.google.com/file/d/1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK/view "spatial.zip"
!./gdown.pl/gdown.pl https://drive.google.com/file/d/1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ/view "motion.zip"
!unzip spatial.zip
!unzip motion.zip
!pip install -U -q PyDrive 2> s.txt >> s.txt
!pip install opencv-python 2> s.txt >> s.txt
!pip install imgaug 2> s.txt >> s.txt
!pip install scikit-video 2> s.txt >> s.txt
###Output
Cloning into 'two-stream-action-recognition'...
remote: Enumerating objects: 6, done.[K
remote: Counting objects: 16% (1/6) [K
remote: Counting objects: 33% (2/6) [K
remote: Counting objects: 50% (3/6) [K
remote: Counting objects: 66% (4/6) [K
remote: Counting objects: 83% (5/6) [K
remote: Counting objects: 100% (6/6) [K
remote: Counting objects: 100% (6/6), done.[K
remote: Compressing objects: 100% (4/4), done.[K
remote: Total 224 (delta 2), reused 6 (delta 2), pack-reused 218[K
Receiving objects: 100% (224/224), 50.15 MiB | 47.02 MiB/s, done.
Resolving deltas: 100% (46/46), done.
Cloning into 'gdown.pl'...
remote: Enumerating objects: 41, done.[K
remote: Counting objects: 100% (41/41), done.[K
remote: Compressing objects: 100% (26/26), done.[K
remote: Total 81 (delta 22), reused 32 (delta 14), pack-reused 40[K
Unpacking objects: 100% (81/81), done.
Cannot open cookies file ‘gdown.cookie.temp’: No such file or directory
--2019-04-08 17:36:57-- https://docs.google.com/uc?id=1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK&export=download
Resolving docs.google.com (docs.google.com)... 74.125.141.101, 74.125.141.138, 74.125.141.100, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.101|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [text/html]
Saving to: ‘spatial.zip’
0K 46.4M=0s
2019-04-08 17:36:57 (46.4 MB/s) - ‘spatial.zip’ saved [3259]
--2019-04-08 17:36:57-- https://docs.google.com/uc?export=download&confirm=t3IN&id=1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK
Resolving docs.google.com (docs.google.com)... 74.125.141.101, 74.125.141.138, 74.125.141.102, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.101|:443... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://doc-10-50-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/kutlh0076lmlt6hid35dhgnkaceee7b6/1554739200000/13087578489450988105/*/1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK?e=download [following]
Warning: wildcards not supported in HTTP.
--2019-04-08 17:36:57-- https://doc-10-50-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/kutlh0076lmlt6hid35dhgnkaceee7b6/1554739200000/13087578489450988105/*/1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK?e=download
Resolving doc-10-50-docs.googleusercontent.com (doc-10-50-docs.googleusercontent.com)... 74.125.141.132, 2607:f8b0:400c:c06::84
Connecting to doc-10-50-docs.googleusercontent.com (doc-10-50-docs.googleusercontent.com)|74.125.141.132|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [application/zip]
Saving to: ‘spatial.zip’
0K ........ ........ ........ ........ 63.0M
32768K ........ ........ ........ ........ 184M
65536K ........ ........ ........ ........ 131M
98304K ........ ........ ........ ........ 189M
131072K ........ ........ ........ ........ 197M
163840K ........ ........ ........ ........ 166M
196608K ........ ........ ........ ....... 40.5M=2.2s
2019-04-08 17:36:59 (100 MB/s) - ‘spatial.zip’ saved [234458427]
Cannot open cookies file ‘gdown.cookie.temp’: No such file or directory
--2019-04-08 17:37:02-- https://docs.google.com/uc?id=1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ&export=download
Resolving docs.google.com (docs.google.com)... 74.125.141.101, 74.125.141.138, 74.125.141.100, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.101|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [text/html]
Saving to: ‘motion.zip’
0K 44.8M=0s
2019-04-08 17:37:02 (44.8 MB/s) - ‘motion.zip’ saved [3259]
--2019-04-08 17:37:02-- https://docs.google.com/uc?export=download&confirm=jfI3&id=1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ
Resolving docs.google.com (docs.google.com)... 74.125.141.102, 74.125.141.113, 74.125.141.101, ...
Connecting to docs.google.com (docs.google.com)|74.125.141.102|:443... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://doc-0c-2g-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/lm81f787epkvu21ukccl2a8ku38udndc/1554739200000/09175336730828036060/*/1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ?e=download [following]
Warning: wildcards not supported in HTTP.
--2019-04-08 17:37:02-- https://doc-0c-2g-docs.googleusercontent.com/docs/securesc/ha0ro937gcuc7l7deffksulhg5h7mbp1/lm81f787epkvu21ukccl2a8ku38udndc/1554739200000/09175336730828036060/*/1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ?e=download
Resolving doc-0c-2g-docs.googleusercontent.com (doc-0c-2g-docs.googleusercontent.com)... 74.125.141.132, 2607:f8b0:400c:c06::84
Connecting to doc-0c-2g-docs.googleusercontent.com (doc-0c-2g-docs.googleusercontent.com)|74.125.141.132|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [application/zip]
Saving to: ‘motion.zip’
0K ........ ........ ........ ........ 77.3M
32768K ........ ........ ........ ........ 197M
65536K ........ ........ ........ ........ 202M
98304K ........ ........ ........ ........ 181M
131072K ........ ........ ........ ........ 164M
163840K ........ ........ ........ ........ 189M
196608K ........ ........ ........ ....... 177M=1.5s
2019-04-08 17:37:04 (154 MB/s) - ‘motion.zip’ saved [234618151]
Archive: spatial.zip
inflating: spatial.log
inflating: spatial.preds
inflating: spatial.h5
Archive: motion.zip
inflating: motion.log
inflating: motion.preds
inflating: motion.h5
###Markdown
Showing the demoIt will start by selecting one random video of the videos **testing video samples** stored in my repo(it contains 100 videos from the test dataset and you can add more, originally it was possible to get a sinlge video by name using an HTTP request but the UCF101 changed their site a little bit and it's not possible now)
###Code
high_resolution_video = True # for good internet :D
# importing those only once
import cv2
from imgaug import augmenters as iaa
from evaluation import legacy_load_model
from evaluation.evaluation import *
import random
from frame_dataloader import DataUtil
import matplotlib.pyplot as plt
import numpy as np
import skvideo.io
import io
import base64
from IPython.display import HTML
from matplotlib import gridspec
# defining functions and global objects
# dictionary of class names
data_util = DataUtil(path= './UCF_list/', split="01")
action_names = {v:k for k,v in data_util.action_to_label.items()} # class name dictionary
stacked_frames = 10
# image resize augmenter to be fed into the network
augmenter = iaa.Sequential([
iaa.Scale({"height": 299, "width": 299})
])
def convert_to_image(flow_image):
"""
this is the conversion function of each flow frame
based on the cpp version of extracting optical flow https://github.com/feichtenhofer/gpu_flow/blob/master/compute_flow.cpp
"""
l, h = -20, 20
return (255 * (flow_image - l) / (h - l)).astype(np.uint8)
def stack_opticalflow(start_frame_index, stacked_frames): # returns numpy (h,w,stacked*2) = one sample
"""
Stacks "stacked_frames" u/v frames on a single numpy array : (h,w,stacked*2)
"""
first_optical_frame_u = original_u_frames[start_frame_index] # horizontal
first_optical_frame_v = original_v_frames[start_frame_index] # vertical
stacked_optical_flow_sample = np.zeros(first_optical_frame_u.shape + (2 * stacked_frames,), dtype=np.uint8) # with channel dimension of stacked_frames(u)+ stacked_frames(v)
stacked_optical_flow_sample[:, :, 0] = first_optical_frame_u
stacked_optical_flow_sample[:, :, 0 + stacked_frames] = first_optical_frame_v
for index, optical_frame_id in enumerate(range(start_frame_index + 1, start_frame_index + stacked_frames), 1): # index starts at 1 placed after the first one
stacked_optical_flow_sample[:, :, index] = original_u_frames[optical_frame_id]
stacked_optical_flow_sample[:, :, index + stacked_frames] = original_v_frames[optical_frame_id]
return stacked_optical_flow_sample
def get_image_from_fig(fig):
"""
converts matplotlib figure into a numpy array for demo video generation
"""
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
###Output
_____no_output_____
###Markdown
loading keras models just downloaded from drive (loaded once)
###Code
# legacy_load_model is an older version of keras load_model since keras API changed a little bit when I was working on action recognition
# load into ram
print("Loading Spatial stream")
spatial_model_restored = legacy_load_model(filepath="spatial.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
spatial_model_restored.summary()
# load into ram
print("Loading Motion stream")
motion_model_restored = legacy_load_model(filepath="motion.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
motion_model_restored.summary()
###Output
Loading Spatial stream
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py:435: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_spatial (InputLayer) (None, 299, 299, 3) 0
_________________________________________________________________
data_norm (BatchNormalizatio (None, 299, 299, 3) 6
_________________________________________________________________
xception (Model) (None, 10, 10, 2048) 20861480
_________________________________________________________________
avg_pool (GlobalAveragePooli (None, 2048) 0
_________________________________________________________________
predictions (Dense) (None, 101) 206949
=================================================================
Total params: 21,068,435
Trainable params: 21,013,901
Non-trainable params: 54,534
_________________________________________________________________
Loading Motion stream
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_motion (InputLayer) (None, 299, 299, 20) 0
_________________________________________________________________
data_norm (BatchNormalizatio (None, 299, 299, 20) 40
_________________________________________________________________
xception (Model) (None, 101) 21073325
=================================================================
Total params: 21,073,365
Trainable params: 21,018,797
Non-trainable params: 54,568
_________________________________________________________________
###Markdown
Selecting one video and process it for RGB frames and optical flow framesoptical flow frames are computed using TVL1 which is never real time on CPU, might take few minutes for long vidoes (I process them on CPU since GPU version requires building opencv from the source and doing some nasty things not helpful for a short demo)
###Code
# select a random video
video_name = random.choice(os.listdir("testing video samples"))
selected_video=os.path.join("testing video samples",video_name)
print("selected_video:",selected_video)
vidcap = cv2.VideoCapture(selected_video)
print("frame rate for demo:",vidcap.get(cv2.CAP_PROP_FPS))
success, image = vidcap.read()
# make the rgb frames
original_rgb_frames = []
while success:
original_rgb_frames.append(image)
success, image = vidcap.read()
print("frames count in video", len(original_rgb_frames))
# make the optical flow frames
original_v_frames = []
original_u_frames = []
frames = list(map(lambda frame: cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255.0, original_rgb_frames))
optical_flow = cv2.optflow.DualTVL1OpticalFlow_create()
for frame_index in range(len(frames) - 1):
if frame_index % 10 == 0:
print("processing tvl flow of frame ",frame_index)
flow = optical_flow.calc(frames[frame_index], frames[frame_index + 1], None)
u_frame = convert_to_image(flow[..., 0])
v_frame = convert_to_image(flow[..., 1])
original_v_frames.append(v_frame)
original_u_frames.append(u_frame)
print("original_rgb_frames:", len(original_rgb_frames), "original_u_frames:", len(original_u_frames), "original_v_frames:", len(original_v_frames))
# generate spatial batch as done in the dataloader
spatial_batch = []
for image in original_rgb_frames:
spatial_batch.append(
cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
)
spatial_batch = np.array(augmenter.augment_images(spatial_batch), dtype=np.float32) / 255.0
# generate motion batch as done in the dataloader
motion_batch = []
for first_optical_frame_id in range(len(original_u_frames) - stacked_frames):
motion_batch.append( # append one sample which is (h,w,stacked*2)
stack_opticalflow(start_frame_index=first_optical_frame_id, stacked_frames=stacked_frames)
)
motion_batch = np.array(augmenter.augment_images(motion_batch), dtype=np.float32) / 255.0
###Output
frames count in video 139
processing tvl flow of frame 0
processing tvl flow of frame 10
processing tvl flow of frame 20
processing tvl flow of frame 30
processing tvl flow of frame 40
processing tvl flow of frame 50
processing tvl flow of frame 60
processing tvl flow of frame 70
processing tvl flow of frame 80
processing tvl flow of frame 90
processing tvl flow of frame 100
processing tvl flow of frame 110
processing tvl flow of frame 120
processing tvl flow of frame 130
original_rgb_frames: 139 original_u_frames: 138 original_v_frames: 138
###Markdown
Predict the output of each frame organized in the batch
###Code
"""
predict spatial stream output
"""
spatial_pred = spatial_model_restored.predict(spatial_batch)
spatial_classes = np.argsort(spatial_pred,axis=1)[:,:-6:-1]
spatial_scores = np.sort(spatial_pred,axis=1)[:,:-6:-1]
"""
predict motion stream output
"""
motion_pred = motion_model_restored.predict(motion_batch)
motion_classes = np.argsort(motion_pred,axis=1)[:,:-6:-1]
motion_scores = np.sort(motion_pred,axis=1)[:,:-6:-1]
"""
get the average output prediction
"""
average_pred = motion_pred + spatial_pred[:motion_pred.shape[0],]
average_classes = np.argsort(average_pred,axis=1)[:,:-6:-1]
average_scores = np.sort(average_pred,axis=1)[:,:-6:-1]
def make_bar_chart(classes,scores):
height = scores.tolist()
bars = [action_names[class_index] for class_index in classes]
y_pos = np.arange(len(bars))
bar = plt.bar(y_pos, height, color=['yellow', 'red', 'green', 'blue', 'cyan'])
# plt.xticks(y_pos, bars, rotation=90) this will draw them below
# plt.tick_params(axis="x",labelsize=10,direction="in", pad=-15)
plt.ylim(top=1)
plt.ylim(bottom=0)
for bar_id,rect in enumerate(bar):
plt.text(rect.get_x() + rect.get_width()/2.0, .5, bars[bar_id], ha='center', va='center', rotation=75,fontdict={'fontsize': 13 if high_resolution_video else 10})
# Define the codec and create VideoWriter object.The output is stored in 'demo.mp4' file.
writer = skvideo.io.FFmpegWriter("demo.mp4", inputdict={
'-r': '16',
})
gs = gridspec.GridSpec(2, 3,
width_ratios=[1, 1,1],
height_ratios=[1.5, 1]
)
gs.update(wspace=0.2,hspace=0)
# generating output video
for frame_index in range(motion_classes.shape[0]):
if high_resolution_video :
fig = plt.figure(figsize=(16, 12))
fig.suptitle("Demo for {}".format(video_name), fontsize=24)
fig.text(.125,0.91,"Average Prediction from spatial stream: {}".format(action_names[np.mean(spatial_pred,axis = 0).argmax()]),color='r', fontsize=18)
fig.text(.125,.87,"Average Prediction from motion stream: {}".format(action_names[np.mean(motion_pred,axis = 0).argmax()]),color='g',fontsize=18)
fig.text(.125,.83,"Average Prediction from both streams: {}".format(action_names[np.mean(average_pred,axis = 0).argmax()]),color='b', fontsize=18)
else :
fig = plt.figure(figsize=(9, 6))
fig.suptitle("Demo for {}".format(video_name), fontsize=16)
fig.text(.125,0.91,"Average Prediction from spatial stream: {}".format(action_names[np.mean(spatial_pred,axis = 0).argmax()]),color='r', fontsize=13)
fig.text(.125,.87,"Average Prediction from motion stream: {}".format(action_names[np.mean(motion_pred,axis = 0).argmax()]),color='g',fontsize=13)
fig.text(.125,.83,"Average Prediction from both streams: {}".format(action_names[np.mean(average_pred,axis = 0).argmax()]),color='b', fontsize=13)
if frame_index % 10 == 0:
print("processing frame ",frame_index)
##########################################################
# rgb frame
ax = plt.subplot(gs[0])
ax.set_title("RGB frame", fontsize=16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(cv2.cvtColor(original_rgb_frames[frame_index],cv2.COLOR_RGB2BGR))
##########################################################
# optical flow frame
ax = plt.subplot(gs[1])
ax.set_title("TVL1 Optical flow u-frame", fontsize=16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(original_u_frames[frame_index],cmap="inferno") # viridis,inferno,plasma,magma
##########################################################
# optical flow frame
ax = plt.subplot(gs[2])
ax.set_title("TVL1 Optical flow v-frame", fontsize= 16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(original_v_frames[frame_index],cmap="inferno") # viridis,inferno,plasma,magma
##########################################################
# prediction scores
ax = plt.subplot(gs[3])
ax.set_title("Spatial Stream Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(spatial_classes[frame_index],spatial_scores[frame_index])
##########################################################
# prediction scores
ax = plt.subplot(gs[4])
ax.set_title("Motion Stream Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(motion_classes[frame_index],motion_scores[frame_index])
##########################################################
# prediction scores
ax = plt.subplot(gs[5])
ax.set_title("Average Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(average_classes[frame_index],average_scores[frame_index])
##########################################################
fig.tight_layout( pad=0, h_pad=0, w_pad=0)
writer.writeFrame(get_image_from_fig(fig))
plt.close(fig)
writer.close()
video = io.open("demo.mp4" , 'r+b').read()
encoded = base64.b64encode(video)
HTML(data='''<video controls autoplay loop>
<source type="video/mp4" src="data:video/mp4;base64,{}"
</video>'''.format(encoded.decode('ascii')))
###Output
_____no_output_____
###Markdown
This is a live demo of video action recognition using two-stream architectureThis will clone my repo and download the models on drive and uses them to infer the output in a live frame-level demo. then an output video will be generated showing the output prediction for each frame accordingly.I suggest running all the cells and have a 5 mins-break then view the output video :D
###Code
import sys
print(sys.version)
###Output
3.6.8 |Anaconda, Inc.| (default, Dec 30 2018, 01:22:34)
[GCC 7.3.0]
###Markdown
Starting by installation process This will clone my repo and download the models on drive.I used gdown.pl tool for downloading my public checkpoints on drive with no authentication overhead
###Code
import os
!git clone https://github.com/mohammed-elkomy/two-stream-action-recognition.git
os.chdir("two-stream-action-recognition")
!git clone https://github.com/circulosmeos/gdown.pl.git
!./gdown.pl/gdown.pl https://drive.google.com/file/d/1djGzpxAYFvNX-UaQ7ONqDHGgnzc8clBK/view "spatial.zip"
!./gdown.pl/gdown.pl https://drive.google.com/file/d/1kvslNL8zmZYaHRmhgAM6-l_pNDDA0EKZ/view "motion.zip"
!unzip spatial.zip
!unzip motion.zip
!pip install -U -q PyDrive 2> s.txt >> s.txt
!pip install opencv-python 2> s.txt >> s.txt
!pip install imgaug 2> s.txt >> s.txt
!pip install scikit-video 2> s.txt >> s.txt
pip install imgaug
pip install tensorflow-gpu
sudo pip install scikit-video
###Output
_____no_output_____
###Markdown
Showing the demoIt will start by selecting one random video of the videos **testing video samples** stored in my repo(it contains 100 videos from the test dataset and you can add more, originally it was possible to get a sinlge video by name using an HTTP request but the UCF101 changed their site a little bit and it's not possible now)
###Code
import os
os.chdir("two-stream-action-recognition")
os.environ["CUDA_VISIBLE_DEVICES"]="1"
!ls
high_resolution_video = True # for good internet :D
# importing those only once
import os
import cv2
from imgaug import augmenters as iaa
from evaluation import legacy_load_model
from evaluation.evaluation import *
import random
from frame_dataloader import DataUtil
import matplotlib.pyplot as plt
import numpy as np
import skvideo.io
import io
import base64
from IPython.display import HTML
from matplotlib import gridspec
# defining functions and global objects
# dictionary of class names
data_util = DataUtil(path= './UCF_list/', split="01")
action_names = {v:k for k,v in data_util.action_to_label.items()} # class name dictionary
stacked_frames = 10
# image resize augmenter to be fed into the network
augmenter = iaa.Sequential([
iaa.Scale({"height": 299, "width": 299})
])
def convert_to_image(flow_image):
"""
this is the conversion function of each flow frame
based on the cpp version of extracting optical flow https://github.com/feichtenhofer/gpu_flow/blob/master/compute_flow.cpp
"""
l, h = -20, 20
return (255 * (flow_image - l) / (h - l)).astype(np.uint8)
def stack_opticalflow(start_frame_index, stacked_frames): # returns numpy (h,w,stacked*2) = one sample
"""
Stacks "stacked_frames" u/v frames on a single numpy array : (h,w,stacked*2)
"""
first_optical_frame_u = original_u_frames[start_frame_index] # horizontal
first_optical_frame_v = original_v_frames[start_frame_index] # vertical
stacked_optical_flow_sample = np.zeros(first_optical_frame_u.shape + (2 * stacked_frames,), dtype=np.uint8) # with channel dimension of stacked_frames(u)+ stacked_frames(v)
stacked_optical_flow_sample[:, :, 0] = first_optical_frame_u
stacked_optical_flow_sample[:, :, 0 + stacked_frames] = first_optical_frame_v
for index, optical_frame_id in enumerate(range(start_frame_index + 1, start_frame_index + stacked_frames), 1): # index starts at 1 placed after the first one
stacked_optical_flow_sample[:, :, index] = original_u_frames[optical_frame_id]
stacked_optical_flow_sample[:, :, index + stacked_frames] = original_v_frames[optical_frame_id]
return stacked_optical_flow_sample
def get_image_from_fig(fig):
"""
converts matplotlib figure into a numpy array for demo video generation
"""
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:11: DeprecationWarning: Function `Scale()` is deprecated. Use `Resize` instead. Resize has the exactly same interface as Scale.
# This is added back by InteractiveShellApp.init_path()
###Markdown
loading keras models just downloaded from drive (loaded once)
###Code
# legacy_load_model is an older version of keras load_model since keras API changed a little bit when I was working on action recognition
# load into ram
print("Loading Spatial stream")
spatial_model_restored = legacy_load_model(filepath="spatial.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
spatial_model_restored.summary()
# load into ram
print("Loading Motion stream")
motion_model_restored = legacy_load_model(filepath="motion.h5", custom_objects={'sparse_categorical_cross_entropy_loss': sparse_categorical_cross_entropy_loss, "acc_top_1": acc_top_1, "acc_top_5": acc_top_5})
motion_model_restored.summary()
###Output
WARNING: Logging before flag parsing goes to stderr.
W0803 12:07:41.249042 547849293840 deprecation.py:506] From /home/odroid/.local/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:97: calling Zeros.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
W0803 12:07:41.254381 547849293840 deprecation.py:506] From /home/odroid/.local/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:97: calling Ones.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
W0803 12:07:41.269337 547849293840 deprecation.py:506] From /home/odroid/.local/lib/python3.6/site-packages/tensorflow/python/ops/init_ops.py:97: calling VarianceScaling.__init__ (from tensorflow.python.ops.init_ops) with dtype is deprecated and will be removed in a future version.
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
###Markdown
Selecting one video and process it for RGB frames and optical flow framesoptical flow frames are computed using TVL1 which is never real time on CPU, might take few minutes for long vidoes (I process them on CPU since GPU version requires building opencv from the source and doing some nasty things not helpful for a short demo)
###Code
!ls
os.chdir("two-stream-action-recognition")
!ls
# select a random video
from cv2 import __version__
print(__version__)
#video_name = random.choice(os.listdir("testing video samples"))
video_name = 'v_BabyCrawling_g19_c01.avi'
print(os.path.join("testing video samples",video_name))
selected_video=os.path.join("testing video samples",video_name)
#selected_video="v_HammerThrow_g10_c03.avi"
print("selected_video:",selected_video)
vidcap = cv2.VideoCapture(selected_video)
print("frame rate for demo:",vidcap.get(cv2.CAP_PROP_FPS))
success, image = vidcap.read()
# make the rgb frames
original_rgb_frames = []
while success:
original_rgb_frames.append(image)
success, image = vidcap.read()
print("frames count in video", len(original_rgb_frames))
# make the optical flow frames
original_v_frames = []
original_u_frames = []
frames = list(map(lambda frame: cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY).astype(np.float32) / 255.0, original_rgb_frames))
#for opencv 4
optical_flow = cv2.optflow.DualTVL1OpticalFlow_create()
#for opencv 3
#optical_flow = cv2.DualTVL1OpticalFlow_create()
for frame_index in range(len(frames) - 1):
if frame_index % 10 == 0:
print("processing tvl flow of frame ",frame_index)
flow = optical_flow.calc(frames[frame_index], frames[frame_index + 1], None)
u_frame = convert_to_image(flow[..., 0])
v_frame = convert_to_image(flow[..., 1])
original_v_frames.append(v_frame)
original_u_frames.append(u_frame)
print("original_rgb_frames:", len(original_rgb_frames), "original_u_frames:", len(original_u_frames), "original_v_frames:", len(original_v_frames))
# generate spatial batch as done in the dataloader
spatial_batch = []
for image in original_rgb_frames:
spatial_batch.append(
cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
)
spatial_batch = np.array(augmenter.augment_images(spatial_batch), dtype=np.float32) / 255.0
# generate motion batch as done in the dataloader
motion_batch = []
for first_optical_frame_id in range(len(original_u_frames) - stacked_frames):
motion_batch.append( # append one sample which is (h,w,stacked*2)
stack_opticalflow(start_frame_index=first_optical_frame_id, stacked_frames=stacked_frames)
)
motion_batch = np.array(augmenter.augment_images(motion_batch), dtype=np.float32) / 255.0
###Output
processing tvl flow of frame 0
processing tvl flow of frame 10
processing tvl flow of frame 20
processing tvl flow of frame 30
processing tvl flow of frame 40
processing tvl flow of frame 50
processing tvl flow of frame 60
processing tvl flow of frame 70
processing tvl flow of frame 80
processing tvl flow of frame 90
original_rgb_frames: 94 original_u_frames: 93 original_v_frames: 93
###Markdown
Predict the output of each frame organized in the batch
###Code
import time
"""
predict spatial stream output
"""
print(">>>>> spatial <<<<<")
startTime = time.time()
print("start time: ", startTime)
spatial_pred = spatial_model_restored.predict(spatial_batch)
endTime= time.time()
print("end time: ", endTime)
print("total time: ", endTime - startTime)
spatial_classes = np.argsort(spatial_pred,axis=1)[:,:-6:-1]
spatial_scores = np.sort(spatial_pred,axis=1)[:,:-6:-1]
"""
predict motion stream output
"""
print(">>>>> motion <<<<<")
startTime = time.time()
print("start time: ", startTime)
motion_pred = motion_model_restored.predict(motion_batch)
endTime= time.time()
print("end time: ", endTime)
print("total time: ", endTime - startTime)
motion_classes = np.argsort(motion_pred,axis=1)[:,:-6:-1]
motion_scores = np.sort(motion_pred,axis=1)[:,:-6:-1]
"""
get the average output prediction
"""
average_pred = motion_pred + spatial_pred[:motion_pred.shape[0],]
average_classes = np.argsort(average_pred,axis=1)[:,:-6:-1]
average_scores = np.sort(average_pred,axis=1)[:,:-6:-1]
def make_bar_chart(classes,scores):
height = scores.tolist()
bars = [action_names[class_index] for class_index in classes]
y_pos = np.arange(len(bars))
bar = plt.bar(y_pos, height, color=['yellow', 'red', 'green', 'blue', 'cyan'])
# plt.xticks(y_pos, bars, rotation=90) this will draw them below
# plt.tick_params(axis="x",labelsize=10,direction="in", pad=-15)
plt.ylim(top=1)
plt.ylim(bottom=0)
for bar_id,rect in enumerate(bar):
plt.text(rect.get_x() + rect.get_width()/2.0, .5, bars[bar_id], ha='center', va='center', rotation=75,fontdict={'fontsize': 13 if high_resolution_video else 10})
# Define the codec and create VideoWriter object.The output is stored in 'demo.mp4' file.
writer = skvideo.io.FFmpegWriter("demo.mp4", inputdict={
'-r': '16',
})
gs = gridspec.GridSpec(2, 3,
width_ratios=[1, 1,1],
height_ratios=[1.5, 1]
)
gs.update(wspace=0.2,hspace=0)
# generating output video
for frame_index in range(motion_classes.shape[0]):
if high_resolution_video :
fig = plt.figure(figsize=(16, 12))
fig.suptitle("Demo for {}".format(video_name), fontsize=24)
fig.text(.125,0.91,"Average Prediction from spatial stream: {}".format(action_names[np.mean(spatial_pred,axis = 0).argmax()]),color='r', fontsize=18)
fig.text(.125,.87,"Average Prediction from motion stream: {}".format(action_names[np.mean(motion_pred,axis = 0).argmax()]),color='g',fontsize=18)
fig.text(.125,.83,"Average Prediction from both streams: {}".format(action_names[np.mean(average_pred,axis = 0).argmax()]),color='b', fontsize=18)
else :
fig = plt.figure(figsize=(9, 6))
fig.suptitle("Demo for {}".format(video_name), fontsize=16)
fig.text(.125,0.91,"Average Prediction from spatial stream: {}".format(action_names[np.mean(spatial_pred,axis = 0).argmax()]),color='r', fontsize=13)
fig.text(.125,.87,"Average Prediction from motion stream: {}".format(action_names[np.mean(motion_pred,axis = 0).argmax()]),color='g',fontsize=13)
fig.text(.125,.83,"Average Prediction from both streams: {}".format(action_names[np.mean(average_pred,axis = 0).argmax()]),color='b', fontsize=13)
if frame_index % 10 == 0:
print("processing frame ",frame_index)
##########################################################
# rgb frame
ax = plt.subplot(gs[0])
ax.set_title("RGB frame", fontsize=16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(cv2.cvtColor(original_rgb_frames[frame_index],cv2.COLOR_RGB2BGR))
##########################################################
# optical flow frame
ax = plt.subplot(gs[1])
ax.set_title("TVL1 Optical flow u-frame", fontsize=16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(original_u_frames[frame_index],cmap="inferno") # viridis,inferno,plasma,magma
##########################################################
# optical flow frame
ax = plt.subplot(gs[2])
ax.set_title("TVL1 Optical flow v-frame", fontsize= 16 if high_resolution_video else 13)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.imshow(original_v_frames[frame_index],cmap="inferno") # viridis,inferno,plasma,magma
##########################################################
# prediction scores
ax = plt.subplot(gs[3])
ax.set_title("Spatial Stream Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(spatial_classes[frame_index],spatial_scores[frame_index])
##########################################################
# prediction scores
ax = plt.subplot(gs[4])
ax.set_title("Motion Stream Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(motion_classes[frame_index],motion_scores[frame_index])
##########################################################
# prediction scores
ax = plt.subplot(gs[5])
ax.set_title("Average Output scores",fontsize= 16 if high_resolution_video else 13)
make_bar_chart(average_classes[frame_index],average_scores[frame_index])
##########################################################
fig.tight_layout( pad=0, h_pad=0, w_pad=0)
writer.writeFrame(get_image_from_fig(fig))
plt.close(fig)
writer.close()
###Output
processing frame 0
|
nbs/04_feature_finding.ipynb | ###Markdown
Feature Finding> Functions related to feature finding This part describes the implementation of the feature-finding algorithm. The core of the algorithm is described in the [MaxQuant-Paper](https://www.nature.com/articles/nbt.1511).The supplementary material explains the underlying methodology in great detail and is the foundation of the theoretical background that is described here.A refined version of the algorithm was presented with [Dinosaur](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4933939/), which was also used as a reference for the Python implementation.For the algorithm, we need serval modules:1. Connecting Centroids to Hills2. Refinement of Hills3. Calculating Hill Statistics4. Combining Hills to Isotope Patterns5. Deconvolution of Isotope Patterns Loading DataFrom the `IO` library, we already have an `*.ms_data.hdf` container that contains centroided data. To use it in feature finding, we directly load the data. Connecting Centroids to Hills> Note: Feature finding relies heavily on the performance function decorator from the performance notebook: `@alphapept.performance.performance_function`. Part of this is that the functions will not have return values to be GPU compatible. Please check out this notebook for further information. Connecting centroidsFeature finding starts with connecting centroids. For this we look at subsequent scans and compare peaks that are withing a defined mass tolerance (`centroid_tol`). Imagine you have three scans with the following centroids:* Scan 0: 10, 20, 30* Scan 1: 10.2, 40.1* Scan 2: 40, 50, 60When comparing consecutive scans and defining the maximum delta mass to be 0.5 find the following connections: (Scan No, Centroid No) -> (Scan No, Centroid No). As we cannot easily store tuples in the matrix, we convert tuple containing the position of the connected centroid to an integer.* (0,0) -> (1,0) -> (3): 10 & 10.2 -> delta = 0.2* (1,1) -> (2,0) -> (6): 40.1 & 40 -> delta = 0.1Finally, we store this in the `results` matrix:$\begin{bmatrix}3 & -1 & -1 \\ -1 & 6 & -1\\ -1 & -1 & -1 \end{bmatrix}$The coressponding `scores` matrix will look as follows:$\begin{bmatrix}0.2 & -1 & -1 \\ -1 & 0.1 & -1\\ -1 & -1 & -1 \end{bmatrix}$This allows us to not only easily store connections between centroids but also perform a quick lookup for the delta of an existing connection. Note that it also only stores the best connection for each centroid. To extract the connected centroids, we can use `np.where(results >= 0)`. This implementation allows getting millions of connections within seconds. As we are also allowing gaps, refering to that we can have connections between Scan 0 and Scan 2, we make the aforementioned matrix multdimensional, so that e.g. a first matrix stores the conncetions for no gap, the second matrix the connections with a gap of 1.The functionality for this step is implemented in `connect_centroids_unidirection` and the wrapper `find_centroid_connections`.
###Code
#export
import numpy as np
import alphapept.performance
#This function is tested by being called from find_centroid_connections
@alphapept.performance.performance_function
def connect_centroids_unidirection(x:np.ndarray, row_borders:np.ndarray, connections:np.ndarray, scores:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Connect centroids.
Args:
x (np.ndarray): Index to datapoint. Note that this using the performance_function, so one passes an ndarray.
row_borders (np.ndarray): Row borders of the centroids array.
connections (np.ndarray): Connections matrix to store the connections
scores (np.ndarray): Score matrix to store the connections
centroids (np.ndarray): 1D Array containing the masses of the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
for gap in range(max_gap + 1):
y = x + gap + 1
if y >= row_borders.shape[0]:
return
start_index_f = 0
if x > 0:
start_index_f = row_borders[x - 1]
centroids_1 = centroids[start_index_f: row_borders[x]]
start_index_b = row_borders[y - 1]
centroids_2 = centroids[start_index_b: row_borders[y]]
i = 0
j = 0
while (i < len(centroids_1)) & (j < len(centroids_2)):
mz1, mz2 = centroids_1[i], centroids_2[j]
diff = mz1 - mz2
mz_sum = mz1 + mz2
delta = 2 * 1e6 * abs(diff) / mz_sum
if delta < centroid_tol:
if scores[x, i, gap] > delta:
scores[x, i, gap] = delta
connections[x, i, gap] = (connections.shape[1] * y) + j
if diff > 0:
j += 1
else:
i += 1
def find_centroid_connections(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Wrapper function to call connect_centroids_unidirection
Args:
rowwise_peaks (np.ndarray): Length of centroids with respect to the row borders.
row_borders (np.ndarray): Row borders of the centroids array.
centroids (np.ndarray): Array containing the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
max_centroids = int(cupy.max(rowwise_peaks))
spectra_cnt = len(row_borders) - 1
connections = cupy.full((spectra_cnt, max_centroids, max_gap + 1), -1, dtype=np.int32)
score = cupy.full((spectra_cnt, max_centroids, max_gap + 1), np.inf)
connect_centroids_unidirection(range(len(row_borders)),
row_borders,
connections,
score,
centroids,
max_gap,
centroid_tol)
score = score[cupy.where(score < np.inf)]
score_median = cupy.median(score)
score_std = cupy.std(score)
del score, max_centroids, spectra_cnt
c_shape = connections.shape
from_r, from_c, from_g = cupy.where(connections >= 0)
to_r = connections[from_r, from_c, from_g] // c_shape[1]
to_c = connections[from_r, from_c, from_g] - to_r * c_shape[1]
del connections, from_g
return from_r, from_c, to_r, to_c, score_median, score_std
#hide
def test_find_centroid_connections():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 40.1, 40, 50, 60])
centroid_tol = 0.5*1e6
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_r, np.array([0, 0, 1, 1])) #e.g. 0,0 is connected to 0,1 -> 10 to 10.2
assert np.allclose(from_c, np.array([0, 2, 1, 2]))
assert np.allclose(to_r, np.array([1, 1, 2, 2]))
assert np.allclose(to_c, np.array([0, 1, 0, 0]))
test_find_centroid_connections()
###Output
_____no_output_____
###Markdown
We wrap the centroid connections in the function `connect_centroids`. This function converts the connections into an usable array.
###Code
#export
#the performance functions are tested with the wrapper function connect_centroids
@alphapept.performance.performance_function
def convert_connections_to_array(x:np.ndarray, from_r:np.ndarray, from_c:np.ndarray, to_r:np.ndarray, to_c:np.ndarray, row_borders:np.ndarray, out_from_idx:np.ndarray, out_to_idx:np.ndarray):
"""Convert integer indices of a matrix to coordinates.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_r (np.ndarray): From array with row coordinates.
from_c (np.ndarray): From array with column coordinates.
to_r (np.ndarray): To array with row coordinates.
to_c (np.ndarray): To array with column coordinates.
row_borders (np.ndarray): Row borders (for indexing).
out_from_idx (np.ndarray): Reporting array: 1D index from.
out_to_idx (np.ndarray): Reporting array: 1D index to.
"""
row = from_r[x]
col = from_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_from_idx[x] = start_index_f + col
row = to_r[x]
col = to_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_to_idx[x] = start_index_f + col
@alphapept.performance.performance_function
def eliminate_overarching_vertex(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray):
"""Eliminate overacrhing vertex.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
"""
if x == 0:
return
if from_idx[x - 1] == from_idx[x]:
to_idx[x] = -1
def connect_centroids(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, float, float):
"""Function to connect centroids.
Args:
rowwise_peaks (np.ndarray): Indexes for centroids.
row_borders (np.ndarray): Row borders (for indexing).
centroids (np.ndarray): Centroid data.
max_gap: Maximum gap.
centroid_tol: Centroid tol for matching centroids.
Returns:
np.ndarray: From index.
np.ndarray: To index.
float: Median score.
float: Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks,
row_borders,
centroids,
max_gap,
centroid_tol)
from_idx = cupy.zeros(len(from_r), np.int32)
to_idx = cupy.zeros(len(from_r), np.int32)
convert_connections_to_array(range(len(from_r)),
from_r,
from_c,
to_r,
to_c,
row_borders,
from_idx,
to_idx)
eliminate_overarching_vertex(range(len(from_idx)), from_idx, to_idx)
relavent_idx = cupy.where(to_idx >= 0)
from_idx = cupy.take(from_idx, relavent_idx)[0]
to_idx = cupy.take(to_idx, relavent_idx)[0]
del from_r, from_c, to_r, to_c, relavent_idx
return from_idx, to_idx, score_median, score_std
#Sample snippet to show centroid conncetions
import matplotlib.pyplot as plt
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
scan_no = np.array([0, 0, 0, 1, 1, 2, 2, 2])
plt.figure(figsize=(5,5))
for i, _ in enumerate(row_borders):
ctrd = centroids[_-rowwise_peaks[i]:_]
plt.plot(ctrd, np.ones_like(ctrd)*i, 'o')
for i, _ in enumerate(from_idx):
from_ = _
to_ = to_idx[i]
plt.plot([centroids[from_], centroids[to_]], [scan_no[from_], scan_no[to_]], 'k:')
plt.ylabel('scan')
plt.xlabel('m/z')
plt.ylim(len(row_borders)+0.5, -1.5)
plt.title('Peak connections')
plt.show()
#hide
def test_connect_centroids():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_idx, np.array([0, 1, 2]))
assert np.allclose(to_idx, np.array([3, 4, 6]))
test_connect_centroids()
###Output
_____no_output_____
###Markdown
Extracting hills.To extract hills we extract connected components from the connections.
###Code
#export
@alphapept.performance.performance_function
def path_finder(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, forward:np.ndarray, backward:np.ndarray):
"""Extracts path information and writes to path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): Array containing from indices.
to_idx (np.ndarray): Array containing to indices.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
"""
fr = from_idx[x]
to = to_idx[x]
forward[fr] = to
backward[to] = fr
@alphapept.performance.performance_function
def find_path_start(x:np.ndarray, forward:np.ndarray, backward:np.ndarray, path_starts:np.ndarray):
"""Function to find the start of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
path_starts (np.ndarray): Array to report path starts.
"""
if forward[x] > -1 and backward[x] == -1:
path_starts[x] = 0
@alphapept.performance.performance_function
def find_path_length(x:np.ndarray, path_starts:np.ndarray, forward:np.ndarray, path_cnt:np.ndarray):
"""Function to extract the length of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forward (np.ndarray): Array that stores forward information.
path_cnt (np.ndarray): Reporting array to count the paths.
"""
ctr = 1
idx = path_starts[x]
while forward[idx] > -1:
ctr += 1
idx = forward[idx]
path_cnt[x] = ctr
@alphapept.performance.performance_function
def fill_path_matrix(x:np.ndarray, path_start:np.ndarray, forwards:np.ndarray, out_hill_data:np.ndarray, out_hill_ptr:np.ndarray):
"""Function to fill the path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forwards (np.ndarray): Forward array.
out_hill_data (np.ndarray): Array containing the indices to hills.
out_hill_ptr (np.ndarray): Array containing the bounds to out_hill_data.
"""
path_position = 0
idx = path_start[x]
while idx > -1:
out_hill_data[out_hill_ptr[x] + path_position] = idx
idx = forwards[idx]
path_position += 1
def get_hills(centroids:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, hill_length_min:int=3)-> (np.ndarray, np.ndarray, int):
"""Function to get hills from centroid connections.
Args:
centroids (np.ndarray): 1D Array containing the masses of the centroids.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
hill_length_min (int): Minimum hill length:
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
forward = cupy.full(centroids.shape[0], -1)
backward = cupy.full(centroids.shape[0], -1)
path_starts = cupy.full(centroids.shape[0], -1)
path_finder(range(len(from_idx)), from_idx, to_idx, forward, backward)
find_path_start(range(len(forward)), forward, backward, path_starts)
# path_starts will now container the first index of all connected centroids
path_starts = cupy.where(path_starts == 0)[0]
path_node_cnt = cupy.full(path_starts.shape[0], -1)
find_path_length(range(len(path_starts)), path_starts, forward, path_node_cnt)
relavant_path_node = cupy.where(path_node_cnt >= hill_length_min)[0]
path_starts = cupy.take(path_starts, relavant_path_node)
path_node_cnt = cupy.take(path_node_cnt, relavant_path_node)
del relavant_path_node
# Generate the hill matix indice ptr data
hill_ptrs = cupy.empty((path_starts.shape[0] + 1), dtype=cupy.int32)
hill_ptrs[0] = 0
hill_ptrs[1:] = path_node_cnt.cumsum()
hill_data = cupy.empty((int(hill_ptrs[-1])), np.int32)
fill_path_matrix(range(len(path_starts)), path_starts, forward, hill_data, hill_ptrs)
del from_idx, to_idx, path_starts, forward, backward
return hill_ptrs, hill_data, path_node_cnt
def extract_hills(query_data:dict, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, int, float, float):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
score_median (float): Median score.
score_std (float): Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
indices = cupy.array(query_data['indices_ms1'])
mass_data = cupy.array(query_data['mass_list_ms1'])
rowwise_peaks = indices[1:] - indices[:-1]
row_borders = indices[1:]
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, mass_data, max_gap, centroid_tol)
hill_ptrs, hill_data, path_node_cnt = get_hills(mass_data, from_idx, to_idx)
del mass_data
del indices
if cupy.__name__ != 'numpy':
hill_ptrs = hill_ptrs.get()
hill_data = hill_data.get()
path_node_cnt = path_node_cnt.get()
score_median = score_median.get()
score_std = score_std.get()
return hill_ptrs, hill_data, path_node_cnt, score_median, score_std
from numba import njit
@njit
def remove_duplicate_hills(hill_ptrs, hill_data, path_node_cnt):
"""
Removes hills that share datapoints. Starts from the largest hills.
"""
taken_points = np.zeros(hill_data.max()+1)
c = 0
current_idx = 0
hill_ptrs_new = np.zeros_like(hill_ptrs)
hill_data_new = np.zeros_like(hill_data)
for i, _ in enumerate(np.argsort(path_node_cnt)[::-1]):
s, e = hill_ptrs[_], hill_ptrs[_+1]
point_idx = hill_data[s:e]
hill_pts = taken_points[point_idx]
if hill_pts.sum() == 0:
hill_data_new[current_idx:current_idx+len(hill_pts)] = point_idx
current_idx += len(hill_pts)
hill_ptrs_new[c+1] = current_idx
c +=1
taken_points[point_idx] +=1
hill_data_new = hill_data_new[:current_idx]
hill_ptrs_new = hill_ptrs_new[:c]
return hill_ptrs_new, hill_data_new
###Output
_____no_output_____
###Markdown
Hill SplittingWhen having a hill with two or more maxima, we would like to split it at the minimum position. For this, we use a recursive approach. First, the minimum of a hill is detected. A hill is split at this minimum if the smaller of the surrounding maxima is at least the factor `hill_split_level` larger than the minimum. For each split, the process is repeated.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def fast_minima(y:np.ndarray)->np.ndarray:
"""Function to calculate the local minimas of an array.
Args:
y (np.ndarray): Input array.
Returns:
np.ndarray: Array containing minima positions.
"""
minima = np.zeros(len(y))
start = 0
end = len(y)
for i in range(start + 2, end - 2):
if ((y[i - 1] > y[i]) & (y[i + 1] > y[i])) \
or ((y[i - 1] > y[i]) & (y[i + 1] == y[i]) & (y[i + 2] > y[i])) \
or ((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] > y[i])) \
or (((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] == y[i]) & \
(y[i + 2] > y[i]))):
minima[i] = 1
minima = minima.nonzero()[0]
return minima
#hide
def test_fast_minima():
assert fast_minima(np.array([3,2,1,0,1,2,3])) == 3
assert fast_minima(np.array([4,3,2,1,0,1,2])) == 4
assert len(fast_minima(np.array([5,4,3,2,1,0,1]))) == 0
assert len(fast_minima(np.array([6,5,4,3,2,1,0]))) == 0
test_fast_minima()
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def split(k:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_data:np.ndarray, splits:np.ndarray, hill_split_level:float, window:int):
"""Function to split hills.
Args:
k (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_data (np.ndarray): Array containing the indices to hills.
splits (np.ndarray): Array containing splits.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
"""
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_trace = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.median(int_trace[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.mean(int_trace[min_index:max_index])
#minima = (np.diff(np.sign(np.diff(int_trace))) > 0).nonzero()[0] + 1 #This works also but is slower
minima = fast_minima(int_trace)
sorted_minima = np.argsort(int_trace[minima])
minima = minima[sorted_minima]
for min_ in minima:
minval = int_trace[min_]
left_max = max(int_trace[:min_])
right_max = max(int_trace[min_:])
min_max = min(left_max, right_max)
if (minval == 0) or ((min_max / minval) > hill_split_level):
splits[k] = start+min_
break # Split only once per iteration
def split_hills(hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, hill_split_level:float, window:int)->np.ndarray:
"""Wrapper function to split hills
Args:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
Returns:
np.ndarray: Array containing the bounds to the hill_data with splits.
"""
splits = np.zeros(len(int_data), dtype=np.int32)
to_check = np.arange(len(hill_ptrs)-1)
while len(to_check) > 0:
split(to_check, hill_ptrs, int_data, hill_data, splits, hill_split_level, window)
splitpoints = splits.nonzero()[0]
to_check = np.zeros(len(hill_ptrs))
to_check[splitpoints] = 1
to_check = np.insert(to_check, splitpoints+1, np.ones(len(splitpoints))).nonzero()[0] #array, index, what
hill_ptrs = np.insert(hill_ptrs, splitpoints+1, splits[splitpoints]) #array, index, what
splits = np.zeros(len(hill_ptrs), dtype=np.int32) #was cupy np.int32
return hill_ptrs
###Output
_____no_output_____
###Markdown
Filter HillsTo filter hills, we define a minimum length `hill_min_length`. All peaks below the threshold `hill_peak_min_length` are accepted as is. For longer hills, the intensity at the start and the end are compared to the maximum intensity. If the ratio of the maximum raw intensity to the smoothed intensity and the beginning and end are larger than `hill_peak_factor` the hills are accepted.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def check_large_hills(idx:np.ndarray, large_peaks:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, to_remove:np.ndarray, large_peak:int = 40, hill_peak_factor:float = 2, window:int=1):
"""Function to check large hills and flag them for removal.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
large_peaks (np.ndarray): Array containing large peaks.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
to_remove (np.ndarray): Array with indexes to remove.
large_peak (int, optional): Length criterion when a peak is large. Defaults to 40.
hill_peak_factor (float, optional): Hill maximum criterion. Defaults to 2.
window (int, optional): Smoothing window.. Defaults to 1.
"""
k = large_peaks[idx]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_smooth_ = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.median(int_smooth_[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.mean(int_smooth_[min_index:max_index])
int_ = int_data[int_idx]
max_ = np.max(int_)
if (max_ / int_smooth_[0] > hill_peak_factor) & (max_ / int_smooth_[-1] > hill_peak_factor):
to_remove[idx] = 0
def filter_hills(hill_data:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_check_large:int =40, window:int = 1) -> (np.ndarray, np.ndarray):
"""Filters large hills.
Args:
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_check_large (int, optional): Length criterion when a hill is considered large.. Defaults to 40.
window (int, optional): Smoothing window. Defaults to 1.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
"""
large_peaks = np.where(np.diff(hill_ptrs)>=hill_check_large)[0]
to_remove = np.ones(len(large_peaks), dtype=np.int32)
check_large_hills(range(len(large_peaks)), large_peaks, hill_ptrs, hill_data, int_data, to_remove, window)
idx_ = np.ones(len(hill_data), dtype = np.int32)
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
to_remove = to_remove.nonzero()[0]
for _ in to_remove:
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_
###Output
_____no_output_____
###Markdown
Since the mass estimate min the equation above is more complicated than just an average of the mj, a standard deviation based estimate of the error would not be appropriate. Therefore we calculate the error as a bootstrap2 estimate over B=150 bootstrap replications Calculating Hill StatisticsNext, we calculate summary statistics for the connected centroids. We can obtain a high precision mass estimate for each hill by taking the average of the the masses and weighting this by their intensiteis:$$\overline{m} = \frac{\sum_{j=1}^nm_jI_j}{\sum_{j=1}^nI_j}$$To estimate the mass error, we calculate the error as a boostrap estimate: $$\Delta \overline{m} = \sqrt{\frac{\sum_{b=1}^{B}(\overline{m}_b - \overline{m} )}{(B-1)}}$$The calculation of hill statistics for a single hill is implemented in `get_hill_stats`. To calculate the hill stats for a list of hills, we can call the wrapper `get_hill_data`.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def hill_stats(idx:np.ndarray, hill_range:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, mass_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, stats:np.ndarray, hill_nboot_max:int, hill_nboot:int):
"""Function to calculate hill stats.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_range (np.ndarray): Hill range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
mass_data (np.ndarray): Array containing mass data.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
"""
np.random.seed(42)
start = hill_ptrs[idx]
end = hill_ptrs[idx + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
mz_ = mass_data[idx_]
ms1_int_apex = np.max(int_)
ms1_int_area = np.abs(np.trapz(int_, rt_[rt_idx[idx_]])) #Area
rt_min = rt_[rt_idx[idx_]].min()
rt_max = rt_[rt_idx[idx_]].max()
if len(idx_) > hill_nboot_max:
bootsize = hill_nboot_max
else:
bootsize = len(idx_)
averages = np.zeros(hill_nboot)
average = 0
for i in range(hill_nboot):
boot = np.random.choice(len(int_), bootsize, replace=True)
boot_mz = np.sum((mz_[boot] * int_[boot])) / np.sum(int_[boot])
averages[i] = boot_mz
average += boot_mz
average_mz = average/hill_nboot
delta = 0
for i in range(hill_nboot):
delta += (average_mz - averages[i]) ** 2 #maybe easier?
delta_m = np.sqrt(delta / (hill_nboot - 1))
stats[idx,0] = average_mz
stats[idx,1] = delta_m
stats[idx,2] = ms1_int_area
stats[idx,3] = ms1_int_apex
stats[idx,4] = rt_min
stats[idx,5] = rt_max
def remove_duplicates(stats:np.ndarray, hill_data:np.ndarray, hill_ptrs:np.ndarray)-> (np.ndarray, np.ndarray, np.ndarray):
"""Remove duplicate hills.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
np.ndarray: Filtered hill stats.
"""
dups = pd.DataFrame(stats).duplicated() #all duplicated hills
idx_ = np.ones(len(hill_data), dtype = np.int32) #keep all
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
for _ in np.arange(len(stats))[dups]: #duplicates will be assigned zeros
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_, stats[~dups]
def get_hill_data(query_data:dict, hill_ptrs:np.ndarray, hill_data:np.ndarray, hill_nboot_max:int = 300, hill_nboot:int = 150) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to get the hill data.
Args:
query_data (dict): Data structure containing the query data.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
Returns:
np.ndarray: Hill stats.
np.ndarray: Sortindex.
np.ndarray: Upper index.
np.ndarray: Scan index.
np.ndarray: Hill data.
np.ndarray: Hill points.
"""
indices_ = np.array(query_data['indices_ms1'])
rt_ = np.array(query_data['rt_list_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
scan_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
stats = np.zeros((len(hill_ptrs)-1, 6)) #mz, delta, rt_min, rt_max, sum_max
hill_stats(range(len(hill_ptrs)-1), np.arange(len(hill_ptrs)-1), hill_ptrs, hill_data, int_data, mass_data, rt_, scan_idx, stats, hill_nboot_max, hill_nboot)
# sort the stats
sortindex = np.argsort(stats[:,4]) #Sorted by rt_min
stats = stats[sortindex,:]
idxs_upper = stats[:,4].searchsorted(stats[:,5], side="right")
sortindex_ = np.arange(len(sortindex))[sortindex]
return stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs
###Output
_____no_output_____
###Markdown
Combining Hills to Isotope PatternsAfter obtaining summary statistics of hills, the next step is to check whether they belong together to form an isotope pattern. For this, we check wheter it is possible that they are neighbors in an isotope pattern, e.g. one having a 12C atom that has been replaced by a 13C version. The detailed criterion for the check is implemented in `check_isotope_pattern` and is as follows:$$\left | \Delta m-\frac{\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m_{1}}^{2} +\Delta {m_{2}}^{2}}$$The left side contains $\Delta m$, being the delta of the precise mass estimates from the summary statistics and $\Delta M = 1.00286864$, which is the mass difference ebtween the 13C peak and the monoisotopic peak in an averagine molecule of 1500 Da mass divided by the charge $z$.The right side contains $\Delta S = 0.0109135$, which is the maximum shift that a sulphur atom can cause ($\Delta S = 2m(^{13}C) - 2m(^{12}C) - m(^{34}S) + m(^{32}S)$) and $\Delta {m_{1}}$ and $\Delta {m_{2}}$, which are the bootstrapped mass standard deviations.
###Code
#export
from alphapept.constants import mass_dict
DELTA_M = mass_dict['delta_M']
DELTA_S = mass_dict['delta_S']
maximum_offset = DELTA_M + DELTA_S
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, iso_mass_range:int = 5)-> bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
iso_mass_range (int, optional): Mass range. Defaults to 5.
Returns:
bool: Flag to see if pattern belongs to the same pattern.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
delta_mass = np.abs(mass1 - mass2)
left_side = np.abs(delta_mass - DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
#hide
def test_check_isotope_pattern():
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == True
mass2, delta_mass2 = 102.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == False
test_check_isotope_pattern()
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
mass2, delta_mass2 = 102.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
###Output
True
False
###Markdown
Cosine Correlation of two hills An additional criterion that is being checked is that the intensity profiles have sufficient overalp in retention time. This is validated by ensuring that two hills have a cosine correlation of at least 0.6.$$\frac{\sum_{s=s_{min}}^{s_{max}}I_sJ_s}{\sum_{s=s_{min}}^{s_{max}}I_s^{2} \sum_{s=s_{min}}^{s_{max}}J_s^{2}} \geq 0.6$$The intensities of two hills are only compared if both have an intensity value in a particular scan. Otherwise, the intensity is set to zero. Additionally, an overlap of at least three elements is required.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def correlate(scans_:np.ndarray, scans_2:np.ndarray, int_:np.ndarray, int_2:np.ndarray)->float:
"""Correlate two scans.
Args:
scans_ (np.ndarray): Masses of the first scan.
scans_2 (np.ndarray): Masses of the second scan.
int_ (np.ndarray): Intensity of the first scan.
int_2 (np.ndarray): Intensity of the second scan.
Returns:
float: Correlation.
"""
min_one, max_one = scans_[0], scans_[-1]
min_two, max_two = scans_2[0], scans_2[-1]
if min_one + 3 > max_two: # at least an overlap of 3 elements
corr = 0
elif min_two + 3 > max_one:
corr = 0
else:
min_s = min(min_one, min_two)
max_s = max(max_one, max_two)
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[scans_ - min_s] = int_
int_two_scaled[scans_2 - min_s] = int_2
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
###Output
_____no_output_____
###Markdown
Extracting pre-Isotope PatternsNow having two criteria to check whether hills could, in principle, belong together, we define the wrapper functions `extract_edge` and `get_edges` to extract the connected hills. To minimize the number of comparisons we need to perform, we only compare the hills that overlap in time (i.e., the start of one hill `rt_min` needs to be before the end of the other hill `rt_max`) and are less than the sum of $\Delta M$ and $\Delta S$ apart. To extract all hills that belong together, we again rely on the `NetworkX`-package to extract the connected components.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list:
"""Extract edges.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparing.
runner (int): Index.
max_index (int): Unused.
maximum_offset (float): Maximum offset when comparing edges.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
Returns:
list: List of edges.
"""
edges = []
mass1 = stats[runner, 0]
delta_mass1 = stats[runner, 1]
for j in range(runner+1, idxs_upper[runner]):
mass2 = stats[j, 0]
if np.abs(mass2 - mass1) <= maximum_offset:
delta_mass2 = stats[j, 1]
for charge in range(iso_charge_min, iso_charge_max + 1):
if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range):
edges.append((runner, j))
break
return edges
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def edge_correlation(idx:np.ndarray, to_keep:np.ndarray, sortindex_:np.ndarray, pre_edges:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float):
"""Correlates two edges and flag them it they should be kept.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
to_keep (np.ndarray): Array with indices which edges should be kept.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
pre_edges (np.ndarray): Array with pre edges.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
"""
edge = pre_edges[idx,:]
y = sortindex_[edge[0]]
start = hill_ptrs[y]
end = hill_ptrs[y + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
con = sortindex_[edge[1]]
start = hill_ptrs[con]
end = hill_ptrs[con + 1]
idx_2 = hill_data[start:end]
int_2 = int_data[idx_2]
scans_2 = scan_idx[idx_2]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
to_keep[idx] = 1
#export
import networkx as nx
def get_pre_isotope_patterns(stats:np.ndarray, idxs_upper:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, maximum_offset:float, iso_charge_min:int=1, iso_charge_max:int=6, iso_mass_range:float=5, cc_cutoff:float=0.6)->list:
"""Function to extract pre isotope patterns.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparison.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
maximum_offset (float): Maximum offset when matching.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
cc_cutoff (float, optional): Correlation cutoff. Defaults to 0.6.
Returns:
list: List of pre isotope patterns.
"""
pre_edges = []
# Step 1
for runner in range(len(stats)):
pre_edges.extend(extract_edge(stats, idxs_upper, runner, idxs_upper[runner], maximum_offset, iso_charge_min, iso_charge_max, iso_mass_range))
to_keep = np.zeros(len(pre_edges), dtype='int')
pre_edges = np.array(pre_edges)
edge_correlation(range(len(to_keep)), to_keep, sortindex_, pre_edges, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
edges = pre_edges[to_keep.nonzero()]
G2 = nx.Graph()
for i in range(len(edges)):
G2.add_edge(edges[i][0], edges[i][1])
pre_isotope_patterns = [
sorted(list(c))
for c in sorted(nx.connected_components(G2), key=len, reverse=True)
]
return pre_isotope_patterns
###Output
_____no_output_____
###Markdown
Extracting Isotope PatternsThe extracted pre-isotope patterns may not be consistent because their pair-wise mass differences may not correspond to the same charge. To extract isotope patterns from pre-isotope patterns, we need to ensure that they are consistent for a single charge. To do this, we start with the 100 most intense peaks from a pre-isotope pattern to be used as a seed. For each seed and charge we then try to extract the longest consistent isotope pattern. To check wheter a hill is consistent with the seed we employ a modified checking criterion (`check_isotope_pattern_directed`) to be as follows:$$\left | m-m_j-\frac{j\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m}^{2} +\Delta {m_{j}}^{2}}$$Here $m$ is the mass of a seed peak, and $m_{j}$ refers to a peak relative to the seed. $j$ refers to the peaks to the left or right (negative or positive index) within the pattern. $j$ needs to run over consecutive values so that gaps are not allowed. Besides this consistency check, two hills are also checked to have a cosine correlation of at least 0.6.Programmatically, this is implemented in `grow_trail` and `grow`. These function uses a recursive approach that adds matching hills to the seed on the left and right side until no more hills can be added.
###Code
#export
from numba.typed import List
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern_directed(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, index:int, iso_mass_range:float)->bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
index (int): Index (unused).
iso_mass_range (float): Isotope mass ranges.
Returns:
bool: Flag if two isotope patterns belong together.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
left_side = np.abs(mass1 - mass2 - index * DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
@alphapept.performance.compile_function(compilation_mode="numba")
def grow(trail:List, seed:int, direction:int, relative_pos:int, index:int, stats:np.ndarray, pattern:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Grows isotope pattern based on a seed and direction.
Args:
trail (List): List of hills belonging to a pattern.
seed (int): Seed position.
direction (int): Direction in which to grow the trail
relative_pos (int): Relative position.
index (int): Index.
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: List of hills belonging to a pattern.
"""
x = pattern[seed] # This is the seed
mass1 = stats[x,0]
delta_mass1 = stats[x,1]
k = sortindex_[x]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
growing = True
while growing:
if direction == 1:
if seed + relative_pos == len(pattern):
growing = False
break
else:
if seed + relative_pos < 0:
growing = False
break
y = pattern[seed + relative_pos] # This is a reference peak
l = sortindex_[y]
mass2 = stats[y,0]
delta_mass2 = stats[y,1]
start = hill_ptrs[l]
end = hill_ptrs[l + 1]
idx_ = hill_data[start:end]
int_2 = int_data[idx_]
scans_2 = scan_idx[idx_]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
if check_isotope_pattern_directed(mass1, mass2, delta_mass1, delta_mass2, charge, -direction * index, iso_mass_range):
if direction == 1:
trail.append(y)
else:
trail.insert(0, y)
index += (
1
) # Greedy matching: Only one edge for a specific distance, will not affect the following matches
delta_mass = np.abs(mass1 - mass2)
if (delta_mass > (DELTA_M+DELTA_S) * index): # the pattern is sorted so there is a maximum to look back
break
relative_pos += direction
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def grow_trail(seed:int, pattern:np.ndarray, stats:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to grow an isotope pattern to the left and right side.
Args:
seed (int): Seed position.
pattern (np.ndarray): Isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Isotope pattern.
"""
x = pattern[seed]
trail = List()
trail.append(x)
trail = grow(trail, seed, -1, -1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trail = grow(trail, seed, 1, 1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def get_trails(seed:int, pattern:np.ndarray, stats:np.ndarray, charge_range:List, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to extract trails for a given charge range.
Args:
seed (int): Seed index.
pattern (np.ndarray): Pre isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge_range (List): Charge range.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Trail of consistent hills.
"""
trails = []
for charge in charge_range:
trail = grow_trail(seed, pattern, stats, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trails.append(trail)
return trails
#export
def plot_pattern(pattern:np.ndarray, sorted_hills:np.ndarray, centroids:np.ndarray, hill_data:np.ndarray):
"""Helper function to plot a pattern.
Args:
pattern (np.ndarray): Pre isotope pattern.
sorted_hills (np.ndarray): Hills, sorted.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
hill_data (np.ndarray): Array containing the indices to hills.
"""
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,10))
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
mzs = []
rts = []
ints = []
for entry in pattern:
hill = sorted_hills[entry]
hill_data = np.array([centroids[_[0]][_[1]] for _ in hill], dtype=centroid_dtype)
int_profile = hill_data["int"]
ax1.plot(hill_data["rt"], hill_data["int"])
ax2.scatter(hill_data["rt"], hill_data["mz"], s = hill_data["int"]/5e5 )
ax1.set_title('Pattern')
ax1.set_xlabel('RT (min)')
ax1.set_ylabel('Intensity')
ax2.set_xlabel('RT (min)')
ax2.set_ylabel('m/z')
plt.show()
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def get_minpos(y:np.ndarray, iso_split_level:float)->List:
"""Function to get a list of minima in a trace.
A minimum is returned if the ratio of lower of the surrounding maxima to the minimum is larger than the splitting factor.
Args:
y (np.ndarray): Input array.
iso_split_level (float): Isotope split level.
Returns:
List: List with min positions.
"""
minima = get_local_minima(y)
minima_list = List()
for minpos in minima:
minval = y[minpos]
left_max = (y[:minpos]).max()
right_max = (y[minpos:]).max()
minimum_max = min(left_max, right_max)
if minimum_max / minval >= iso_split_level:
minima_list.append(minpos)
return minima_list
@alphapept.performance.compile_function(compilation_mode="numba")
def get_local_minima(y:np.ndarray)->List:
"""Function to return all local minima of a array
Args:
y (np.ndarray): Input array.
Returns:
List: List with indices to minima.
"""
minima = List()
for i in range(1, len(y) - 1):
if is_local_minima(y, i):
minima.append(i)
return minima
@alphapept.performance.compile_function(compilation_mode="numba")
def is_local_minima(y:np.ndarray, i:int)->bool:
"""Check if position is a local minima.
Args:
y (np.ndarray): Input array.
i (int): Position to check.
Returns:
bool: Flag if position is minima or not.
"""
return (y[i - 1] > y[i]) & (y[i + 1] > y[i])
@alphapept.performance.compile_function(compilation_mode="numba")
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array
#hide
def test_get_minpos():
"""
Generate an intensity profile with local minima
Check that the minima are found
"""
intensity_profile = np.ones(20) * 10
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
minima = get_minpos(intensity_profile, 2)
minima_list = [_ for _ in minima]
assert minima_list == minima_ref
test_get_minpos()
###Output
_____no_output_____
###Markdown
Isolating Isotope_patternsThe extraction of the longest consistent isotope pattern is implemented in `isolate_isotope_pattern`. Here, three additional checks for an isotope pattern are implemented. The first one is `truncate`. Here, one checks the seed position, whether it has a minimum to its left or right side. If a minimum is found, the isotope pattern is cut off at this position.The second one is a mass filter. If the seed has a mass of smaller than 1000, the intensity maximum is detected, and all smaller masses are discarded. This reflects the averagine distribution for small masses where no minimum on the left side can be found.The third one is `check_averagine` that relies on `pattern_to_mz` and `cosine_averagine`. It is used to ensure that the extracted isotope pattern has a cosine correlation of the averagine isotope pattern of the same mass of at least 0.6.After the longest consistent isotope pattern is found, the hills are removed from the pre-isotope pattern, and the process is repeated until no more isotope patterns can be extracted from the pre-isotope patterns.
###Code
#export
from alphapept.chem import mass_to_dist
from alphapept.constants import averagine_aa, isotopes, Isotope
from numba.typed import Dict
@alphapept.performance.compile_function(compilation_mode="numba")
def check_averagine(stats:np.ndarray, pattern:np.ndarray, charge:int, averagine_aa:Dict, isotopes:Dict)->float:
"""Function to compare a pattern to an averagine model.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
Returns:
float: Averagine correlation.
"""
masses, intensity = pattern_to_mz(stats, pattern, charge)
spec_one = np.floor(masses).astype(np.int64)
int_one = intensity
spec_two, int_two = mass_to_dist(np.min(masses), averagine_aa, isotopes) # maybe change to no rounded version
spec_two = np.floor(spec_two).astype(np.int64)
return cosine_averagine(int_one, int_two, spec_one, spec_two)
@alphapept.performance.compile_function(compilation_mode="numba")
def pattern_to_mz(stats:np.ndarray, pattern:np.ndarray, charge:int)-> (np.ndarray, np.ndarray):
"""Function to calculate masses and intensities from pattern for a given charge.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge of the pattern.
Returns:
np.ndarray: masses
np.ndarray: intensity
"""
mzs = np.zeros(len(pattern))
ints = np.zeros(len(pattern))
for i in range(len(pattern)):
entry = pattern[i]
mzs[i] = mz_to_mass(stats[entry,0], charge)
ints[i] = stats[entry,2]
sortindex = np.argsort(mzs)
masses = mzs[sortindex]
intensity = ints[sortindex]
return masses, intensity
@alphapept.performance.compile_function(compilation_mode="numba")
def cosine_averagine(int_one:np.ndarray, int_two:np.ndarray, spec_one:np.ndarray, spec_two:np.ndarray)-> float:
"""Calculate the cosine correlation of two hills.
Args:
int_one (np.ndarray): Intensity of the first hill.
int_two (np.ndarray): Intensity of the second hill.
spec_one (np.ndarray): Scan numbers of the first hill.
spec_two (np.ndarray): Scan numbers of the second hill.
Returns:
float: Cosine
"""
min_one, max_one = spec_one[0], spec_one[-1]
min_two, max_two = spec_two[0], spec_two[-1]
min_s = np.min(np.array([min_one, min_two]))
max_s = np.max(np.array([max_one, max_two]))
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[spec_one - min_s] = int_one
int_two_scaled[spec_two - min_s] = int_two
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
@alphapept.performance.compile_function(compilation_mode="numba")
def int_list_to_array(numba_list:List)->np.ndarray:
"""Numba compatbilte function to convert a numba list with integers to a numpy array
Args:
numba_list (List): Input numba-typed List.
Returns:
np.ndarray: Output numpy array.
"""
array = np.zeros(len(numba_list), dtype=np.int64)
for i in range(len(array)):
array[i] = numba_list[i]
return array
M_PROTON = mass_dict['Proton']
@alphapept.performance.compile_function(compilation_mode="numba")
def mz_to_mass(mz:float, charge:int)->float:
"""Function to calculate the mass from a mz value.
Args:
mz (float): M/z
charge (int): Charge.
Raises:
NotImplementedError: When a negative charge is used.
Returns:
float: mass
"""
if charge < 0:
raise NotImplementedError("Negative Charges not implemented.")
mass = mz * charge - charge * M_PROTON
return mass
#hide
if False:
def test_truncate():
"""
Generate an intensity profile with local minima
Check wheter the the profile is correctly truncated with respect to the seed
"""
array = np.arange(0, 20)
intensity_profile = np.ones(20) * 10
iso_split_level = 1.3
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
seedpos = 5
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([3, 4, 5, 6, 7]))
seedpos = 0
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([0, 1, 2, 3]))
seedpos = len(array)
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([17, 18, 19]))
test_truncate()
###Output
_____no_output_____
###Markdown
Isotope PatternsThe wrapper function `get_isotope_patterns` iterates over all pre_isotope_patterns.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def isolate_isotope_pattern(pre_pattern:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, iso_mass_range:float, charge_range:List, averagine_aa:Dict, isotopes:Dict, iso_n_seeds:int, cc_cutoff:float, iso_split_level:float)->(np.ndarray, int):
"""Isolate isotope patterns.
Args:
pre_pattern (np.ndarray): Pre isotope pattern.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
iso_mass_range (float): Mass range for checking isotope patterns.
charge_range (List): Charge range.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_n_seeds (int): Number of seeds.
cc_cutoff (float): Cutoff value for what is considered correlating.
iso_split_level (float): Split level when isotopes are split.
Returns:
np.ndarray: Array with the best pattern.
int: Charge of the best pattern.
"""
longest_trace = 0
champion_trace = None
champion_charge = 0
champion_intensity = 0
# Sort patterns by mass
sortindex = np.argsort(stats[pre_pattern][:,0]) #intensity
sorted_pattern = pre_pattern[sortindex]
massindex = np.argsort(stats[sorted_pattern][:,2])[::-1][:iso_n_seeds]
# Use all the elements in the pre_pattern as seed
for seed in massindex: # Loop through all seeds
seed_global = sorted_pattern[seed]
trails = get_trails(seed, sorted_pattern, stats, charge_range, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
for index, trail in enumerate(trails):
if len(trail) >= longest_trace: # Needs to be longer than the current champion
arr = int_list_to_array(trail)
intensity_profile = stats[arr][:,2]
seedpos = np.nonzero(arr==seed_global)[0][0]
# truncate around the seed...
arr = truncate(arr, intensity_profile, seedpos, iso_split_level)
intensity_profile = stats[arr][:,2]
# Remove lower masses:
# Take the index of the maximum and remove all masses on the left side
if charge_range[index] * stats[seed_global, 0] < 1000:
maxpos = np.argmax(intensity_profile)
arr = arr[maxpos:]
intensity_profile = stats[arr][:,2]
if (len(arr) > longest_trace) | ((len(arr) == longest_trace) & (intensity_profile.sum() > champion_intensity)):
# Averagine check
cc = check_averagine(stats, arr, charge_range[index], averagine_aa, isotopes)
if cc > 0.6:
# Update the champion
champion_trace = arr
champion_charge = charge_range[index]
longest_trace = len(arr)
champion_intensity = intensity_profile.sum()
return champion_trace, champion_charge
#hide
if False:
def test_get_isotope_patterns():
test_centroids = [
[
(300, 50, 1, 1),
(300.501, 40, 1, 1),
(301.003, 30, 1, 1),
(301.504, 20, 1, 1),
(302.006, 10, 1, 1),
],
[
(300, 50, 2, 2),
(300.501, 40, 2, 2),
(301.003, 30, 2, 2),
(301.504, 20, 2, 2),
(302.006, 10, 2, 2),
],
[
(300, 50, 3, 3),
(300.501, 40, 3, 3),
(301.003, 30, 3, 3),
(301.504, 20, 3, 3),
(302.006, 10, 3, 3),
],
[
(300, 50, 4, 4),
(300.501, 40, 4, 4),
(301.003, 30, 4, 4),
(301.504, 20, 4, 4),
(302.006, 10, 4, 4),
],
[
(300, 50, 5, 5),
(300.501, 40, 5, 5),
(301.003, 30, 5, 5),
(301.504, 20, 5, 5),
(302.006, 10, 5, 5),
],
[(400, 10, 6, 6), (401, 10, 6, 6), (402, 10, 6, 6)],
[(400, 10, 7, 7), (401, 10, 7, 7), (402, 10, 7, 7)],
[(400, 10, 8, 8), (401, 10, 8, 8), (402, 10, 8, 8)],
[(400, 10, 9, 9), (401, 10, 9, 9), (402, 10, 9, 9)],
]
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
test_centroids_tmp = [np.array(_, dtype=centroid_dtype) for _ in test_centroids]
test_centroids = List([_ for _ in test_centroids_tmp])
test_hills = get_hills(test_centroids)
sorted_hills, stats, data, hill_data, hill_ptrs = get_hill_data(test_hills, test_centroids)
pre_patterns = get_edges(stats, data)
isotope_patterns, isotope_charges = get_isotope_patterns(pre_patterns, stats, data, averagine_aa, isotopes)
assert np.all(isotope_patterns[0] == np.array([0, 1, 2, 3, 4]))
assert isotope_charges[0] == 2
assert np.all(isotope_patterns[1] == np.array([5,6,7]))
assert isotope_charges[1] == 1
test_get_isotope_patterns()
#export
from numba.typed import List
from typing import Callable, Union
def get_isotope_patterns(pre_isotope_patterns:list, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, averagine_aa:Dict, isotopes:Dict, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:float = 5, iso_n_seeds:int = 100, cc_cutoff:float=0.6, iso_split_level:float = 1.3, callback:Union[Callable, None]=None) -> (np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to iterate over pre_isotope_patterns.
Args:
pre_isotope_patterns (list): List of pre-isotope patterns.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
iso_n_seeds (int, optional): Number of isotope seeds. Defaults to 100.
cc_cutoff (float, optional): Cuttoff for correlation.. Defaults to 0.6.
iso_split_level (float, optional): Isotope split level.. Defaults to 1.3.
callback (Union[Callable, None], optional): Callback function for progress. Defaults to None.
Returns:
list: List of isotope patterns.
np.ndarray: Iso idx.
np.ndarray: Array containing isotope charges.
"""
isotope_patterns = []
isotope_charges = []
charge_range = List()
for i in range(iso_charge_min, iso_charge_max + 1):
charge_range.append(i)
isotope_patterns = []
isotope_charges = []
for idx, pre_pattern in enumerate(pre_isotope_patterns):
extract = True
while extract:
isotope_pattern, isotope_charge = isolate_isotope_pattern(np.array(pre_pattern), hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, iso_mass_range, charge_range, averagine_aa, isotopes, iso_n_seeds, cc_cutoff, iso_split_level)
if isotope_pattern is None:
length = 0
else:
length = len(isotope_pattern)
if length > 1:
isotope_charges.append(isotope_charge)
isotope_patterns.append(isotope_pattern)
pre_pattern = [_ for _ in pre_pattern if _ not in isotope_pattern]
if len(pre_pattern) <= 1:
extract = False
else:
extract = False
if callback:
callback((idx+1)/len(pre_isotope_patterns))
iso_patterns = np.zeros(sum([len(_) for _ in isotope_patterns]), dtype=np.int64)
iso_idx = np.zeros(len(isotope_patterns)+1, dtype='int')
start = 0
for idx, _ in enumerate(isotope_patterns):
iso_patterns[start:start+len(_)] = _
start += len(_)
iso_idx[idx+1] = start
return iso_patterns, iso_idx, np.array(isotope_charges)
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def report_(idx:np.ndarray, isotope_charges:list, isotope_patterns:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, results:np.ndarray, lookup_idx:np.ndarray):
"""Function to extract summary statstics from a list of isotope patterns and charges.
MS1 feature intensity estimation. For each isotope envelope we interpolate the signal over the retention time
range. All isotope enevelopes are summed up together to estimate the peak sahpe
Lastly, we report three estimates for the intensity:
- ms1_int_sum_apex: The intensity at the peak of the summed signal.
- ms1_int_sum_area: The area of the summed signal
- ms1_int_max_apex: The intensity at the peak of the most intense isotope envelope
- ms1_int_max_area: The area of the the most intense isotope envelope
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
results (np.ndarray): Recordarray with isotope pattern summary statistics.
lookup_idx (np.ndarray): Lookup array for each centroid.
"""
pattern = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
isotope_data = stats[pattern]
mz = np.min(isotope_data[:, 0])
mz_std = np.mean(isotope_data[:, 1])
charge = isotope_charges[idx]
mass = mz_to_mass(mz, charge)
int_max_idx = np.argmax(isotope_data[:, 2])
mz_most_abundant = isotope_data[:, 0][int_max_idx]
int_max = isotope_data[:,2][int_max_idx]
rt_start = isotope_data[int_max_idx, 4] # This is the start of the most abundant trace
rt_end = isotope_data[int_max_idx, 5]
rt_min_ = min(isotope_data[:, 4])
rt_max_ = max(isotope_data[:, 5])
rt_range = np.linspace(rt_min_, rt_max_, 100) #TODO this is a fixed value - is there an optimum?
trace_sum = np.zeros_like(rt_range)
most_intense_pattern = -np.inf
for i, k in enumerate(pattern):
x = sortindex_[k]
start = hill_ptrs[x]
end = hill_ptrs[x + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
rts = rt_[rt_idx[idx_]]
lookup_idx[idx_, 0] = idx
lookup_idx[idx_, 1] = i
interpolation = np.interp(rt_range, rts, int_)
#Filter
interpolation[:(rt_range < rts[0]).sum()] = 0
right_cut = (rt_range > rts[-1]).sum()
if right_cut > 0:
interpolation[-right_cut:]= 0
trace_sum += interpolation
if int_.sum() > most_intense_pattern:
most_intense_pattern = int_.sum()
ms1_int_max_apex = int_.max()
ms1_int_max_area = np.trapz(int_, rts)
rt_apex_idx = trace_sum.argmax()
rt_apex = rt_range[rt_apex_idx]
trace = trace_sum
half_max = trace.max()/2
if rt_apex_idx == 0:
left_apex = 0
else:
left_apex = np.abs(trace[:rt_apex_idx]-half_max).argmin()
right_apex = np.abs(trace[rt_apex_idx:]-half_max).argmin()+rt_apex_idx
ms1_int_sum_apex = trace_sum[rt_apex_idx]
fwhm = rt_range[right_apex] - rt_range[left_apex]
n_isotopes = len(pattern)
rt_cutoff = 0.95 #5%
if rt_apex_idx == 0:
rt_min_idx = 0
else:
rt_min_idx = np.abs(trace[:rt_apex_idx]-trace.max()*(1-rt_cutoff)).argmin()
rt_max_idx = np.abs(trace[rt_apex_idx:]-trace.max()*(1-rt_cutoff)).argmin()+rt_apex_idx
#plt.xlabel('rt')
#plt.ylabel('int')
#plt.show()
#plt.plot(rt_range, trace_sum)
#plt.plot([rt_range[left_apex], rt_range[right_apex]], [(trace[left_apex] + trace[right_apex])/2]*2, 'k:')
#plt.plot(rt_range[rt_apex_idx], trace[rt_apex_idx], 'k*')
#plt.plot(rt_range[rt_min_idx], trace[rt_min_idx], 'k*')
#plt.plot(rt_range[rt_max_idx], trace[rt_max_idx], 'k*')
#plt.show()
rt_start = rt_range[rt_min_idx]
rt_end = rt_range[rt_max_idx]
ms1_int_sum_area = np.trapz(trace_sum[rt_min_idx:rt_max_idx], rt_range[rt_min_idx:rt_max_idx])
results[idx,:] = np.array([mz, mz_std, mz_most_abundant, charge, rt_start, rt_apex, rt_end, fwhm, n_isotopes, mass, ms1_int_sum_apex, ms1_int_sum_area, ms1_int_max_apex, ms1_int_max_area])
#export
import pandas as pd
def feature_finder_report(query_data:dict, isotope_patterns:list, isotope_charges:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray)->pd.DataFrame:
"""Creates a report dataframe with summary statistics of the found isotope patterns.
Args:
query_data (dict): Data structure containing the query data.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to the isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
Returns:
pd.DataFrame: DataFrame with isotope pattern summary statistics.
"""
rt_ = np.array(query_data['rt_list_ms1'])
indices_ = np.array(query_data['indices_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
rt_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
lookup_idx= np.zeros((len(mass_data),2), dtype=np.int)-1
int_data = np.array(query_data['int_list_ms1'])
results = np.zeros((len(isotope_charges), 14))
report_(range(len(isotope_charges)), isotope_charges, isotope_patterns, iso_idx, stats, sortindex_, hill_ptrs, hill_data, int_data, rt_, rt_idx, results, lookup_idx)
df = pd.DataFrame(results, columns = ['mz','mz_std','mz_most_abundant','charge','rt_start','rt_apex','rt_end','fwhm','n_isotopes','mass', 'ms1_int_sum_apex', 'ms1_int_sum_area', 'ms1_int_max_apex', 'ms1_int_max_area'])
df.sort_values(['rt_start','mz'])
return df, lookup_idx
###Output
_____no_output_____
###Markdown
Data OutputFor each feature that is found we extract summary statistics and put it in tabular form to be used as as pandas dataframe. PlottingFor quality control reasons we also employ a function to plot a feature in its local environment. External Feature FinderTo utilize the command-line Feature Finder from Bruker `4DFF-3.13` - `uff-cmdline2.exe`, we call it via a subprocess and wait until completion.
###Code
#export
import subprocess
import os
import platform
def extract_bruker(file:str, base_dir:str = "ext/bruker/FF", config:str = "proteomics_4d.config"):
"""Call Bruker Feautre Finder via subprocess.
Args:
file (str): Filename for feature finding.
base_dir (str, optional): Base dir where the feature finder is stored.. Defaults to "ext/bruker/FF".
config (str, optional): Config file for feature finder. Defaults to "proteomics_4d.config".
Raises:
NotImplementedError: Unsupported operating system.
FileNotFoundError: Feature finder not found.
FileNotFoundError: Config file not found.
FileNotFoundError: Feature file not found.
"""
feature_path = file + '/'+ os.path.split(file)[-1] + '.features'
base_dir = os.path.join(os.path.dirname(__file__), base_dir)
operating_system = platform.system()
if operating_system == 'Linux':
ff_dir = os.path.join(base_dir, 'linux64','uff-cmdline2')
logging.info('Using Linux FF')
elif operating_system == 'Windows':
ff_dir = os.path.join(base_dir, 'win64','uff-cmdline2.exe')
logging.info('Using Windows FF')
else:
raise NotImplementedError(f"System {operating_system} not supported.")
if os.path.exists(feature_path):
return feature_path
else:
if not os.path.isfile(ff_dir):
raise FileNotFoundError(f'Bruker feature finder cmd not found here {ff_dir}.')
config_path = base_dir + '/'+ config
if not os.path.isfile(config_path):
raise FileNotFoundError(f'Config file not found here {config_path}.')
if operating_system == 'Windows':
FF_parameters = [ff_dir,'--ff 4d',f'--readconfig "{config_path}"', f'--analysisDirectory "{file}"']
process = subprocess.Popen(' '.join(FF_parameters), stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, b''):
logtxt = line.decode('utf8')
logging.info(logtxt[48:].rstrip()) #Remove logging info from FF
elif operating_system == 'Linux':
FF_parameters = [
ff_dir,
'--ff',
'4d',
'--readconfig',
config_path,
'--analysisDirectory',
file
]
process = subprocess.run(FF_parameters, stdout=subprocess.PIPE)
if os.path.exists(feature_path):
return feature_path
else:
raise FileNotFoundError(f"Feature file {feature_path} does not exist.")
import sqlalchemy as db
def convert_bruker(feature_path:str)->pd.DataFrame:
"""Reads feature table and converts to feature table to be used with AlphaPept.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
feature_table = pd.read_sql_table('LcTimsMsFeature', engine_featurefile)
feature_cluster_mapping = pd.read_sql_table('FeatureClusterMapping', engine_featurefile)
from alphapept.constants import mass_dict
M_PROTON = mass_dict['Proton']
feature_table['Mass'] = feature_table['MZ'].values * feature_table['Charge'].values - feature_table['Charge'].values*M_PROTON
feature_table = feature_table.rename(columns={"MZ": "mz","Mass": "mass", "RT": "rt_apex", "RT_lower":"rt_start", "RT_upper":"rt_end", "Mobility": "mobility", "Mobility_lower": "mobility_lower", "Mobility_upper": "mobility_upper", "Charge":"charge","Intensity":'ms1_int_sum_apex',"ClusterCount":'n_isotopes'})
feature_table['rt_apex'] = feature_table['rt_apex']/60
feature_table['rt_start'] = feature_table['rt_start']/60
feature_table['rt_end'] = feature_table['rt_end']/60
feature_cluster_mapping = feature_cluster_mapping.rename(columns={"FeatureId": "feature_id", "ClusterId": "cluster_id", "Monoisotopic": "monoisotopic", "Intensity": "ms1_int_sum_apex"})
return feature_table, feature_cluster_mapping
def map_bruker(feature_path:str, feature_table:pd.DataFrame, query_data:dict)->pd.DataFrame:
"""Map Ms1 to Ms2 via Table FeaturePrecursorMapping from Bruker FF.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
feature_table (pd.DataFrame): Pandas DataFrame containing the features.
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
mapping = pd.read_sql_table('FeaturePrecursorMapping', engine_featurefile)
mapping = mapping.set_index('PrecursorId')
feature_table= feature_table.set_index('Id')
query_prec_id = query_data['prec_id']
#Now look up the feature for each precursor
mass_matched = []
mz_matched = []
rt_matched = []
query_idx = []
f_idx = []
for idx, prec_id in tqdm(enumerate(query_prec_id)):
try:
f_id = mapping.loc[prec_id]['FeatureId']
all_matches = feature_table.loc[f_id]
if type(f_id) == np.int64:
match = all_matches
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
else:
for k in range(len(all_matches)):
match = all_matches.iloc[k]
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
except KeyError:
pass
features = pd.DataFrame(np.array([mass_matched, mz_matched, rt_matched, query_idx, f_idx]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched', 'query_idx', 'feature_idx'])
features['query_idx'] = features['query_idx'].astype('int')
return features
###Output
_____no_output_____
###Markdown
Isotope Export
###Code
#export
def get_stats(isotope_patterns, iso_idx, stats):
columns = ['mz_average','delta_m','int_sum','int_area','rt_min','rt_max']
stats_idx = np.zeros(iso_idx[-1], dtype=np.int64)
stats_map = np.zeros(iso_idx[-1], dtype=np.int64)
start_ = 0
end_ = 0
for idx in range(len(iso_idx)-1):
k = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
end_ += len(k)
stats_idx[start_:end_] = k
stats_map[start_:end_] = idx
start_ = end_
k = pd.DataFrame(stats[stats_idx], columns=columns)
k['feature_id'] = stats_map
return k
###Output
_____no_output_____
###Markdown
Wrapper
###Code
#export
import numpy as np
import logging
import os
from alphapept.search import query_data_to_features
import alphapept.io
import functools
def find_features(to_process:tuple, callback:Union[Callable, None] = None, parallel:bool = False)-> Union[str, bool]:
"""Wrapper for feature finding.
Args:
to_process (tuple): to_process tuple, to be used from a proces spool.
callback (Union[Callable, None], optional): Optional callback function. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Currently unused. Defaults to False.
Raises:
NotImplementedError: Error if the file extension is not understood.
Returns:
Union[str, bool]: Returns true if function was sucessfull, otherwise the exception as string.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base, ext = os.path.splitext(file_name)
if ext.lower() == '.raw':
datatype='thermo'
elif ext.lower() == '.d':
datatype='bruker'
elif ext.lower() == '.mzml':
datatype='mzml'
else:
raise NotImplementedError('File extension {} not understood.'.format(ext))
out_file = f"{base}.ms_data.hdf"
skip = True
if os.path.isfile(out_file):
try:
alphapept.io.MS_Data_File(
out_file
).read(dataset_name="features")
logging.info(
'Found *.hdf with features for {}'.format(out_file)
)
except KeyError:
logging.info(
'No *.hdf file with features found for {}. Adding to feature finding list.'.format(out_file)
)
skip = False
if not skip:
ms_file = alphapept.io.MS_Data_File(out_file, is_read_only=False)
query_data = ms_file.read_DDA_query_data()
feature_cluster_mapping = pd.DataFrame()
if not settings['workflow']["find_features"]:
features = query_data_to_features(query_data)
else:
if datatype in ['thermo','mzml']:
from alphapept.constants import averagine_aa, isotopes
f_settings = settings['features']
max_gap = f_settings['max_gap']
centroid_tol = f_settings['centroid_tol']
hill_split_level = f_settings['hill_split_level']
iso_split_level = f_settings['iso_split_level']
#Cleanup if
int_data = np.array(query_data['int_list_ms1'])
window = f_settings['hill_smoothing']
hill_check_large = f_settings['hill_check_large']
iso_charge_min = f_settings['iso_charge_min']
iso_charge_max = f_settings['iso_charge_max']
iso_n_seeds = f_settings['iso_n_seeds']
hill_nboot_max = f_settings['hill_nboot_max']
hill_nboot = f_settings['hill_nboot']
iso_mass_range = f_settings['iso_mass_range']
iso_corr_min = f_settings['iso_corr_min']
logging.info('Feature finding on {}'.format(file_name))
logging.info(f'Hill extraction with centroid_tol {centroid_tol} and max_gap {max_gap}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, centroid_tol)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
logging.info(f'Repeating hill extraction with centroid_tol {score_median+score_std*3:.2f}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, score_median+score_std*3)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
hill_ptrs, hill_data = remove_duplicate_hills(hill_ptrs, hill_data, path_node_cnt)
logging.info(f'After duplicate removal of hills {len(hill_ptrs):,}')
hill_ptrs = split_hills(hill_ptrs, hill_data, int_data, hill_split_level=hill_split_level, window = window) #hill lenght is inthere already
logging.info(f'After split hill_ptrs {len(hill_ptrs):,}')
hill_data, hill_ptrs = filter_hills(hill_data, hill_ptrs, int_data, hill_check_large = hill_check_large, window=window)
logging.info(f'After filter hill_ptrs {len(hill_ptrs):,}')
stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs = get_hill_data(query_data, hill_ptrs, hill_data, hill_nboot_max = hill_nboot_max, hill_nboot = hill_nboot)
logging.info('Extracting hill stats complete')
pre_isotope_patterns = get_pre_isotope_patterns(stats, idxs_upper, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, maximum_offset, iso_charge_min=iso_charge_min, iso_charge_max=iso_charge_max, iso_mass_range=iso_mass_range, cc_cutoff=iso_corr_min)
logging.info('Found {:,} pre isotope patterns.'.format(len(pre_isotope_patterns)))
isotope_patterns, iso_idx, isotope_charges = get_isotope_patterns(pre_isotope_patterns, hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, averagine_aa, isotopes, iso_charge_min = iso_charge_min, iso_charge_max = iso_charge_max, iso_mass_range = iso_mass_range, iso_n_seeds = iso_n_seeds, cc_cutoff = iso_corr_min, iso_split_level=iso_split_level, callback=None)
logging.info('Extracted {:,} isotope patterns.'.format(len(isotope_charges)))
feature_table, lookup_idx = feature_finder_report(query_data, isotope_patterns, isotope_charges, iso_idx, stats, sortindex_, hill_ptrs, hill_data)
lookup_idx_df = pd.DataFrame(lookup_idx, columns = ['isotope_pattern', 'isotope_pattern_hill'])
ms_file.write(lookup_idx_df, dataset_name="feature_table_idx")
feature_cluster_mapping = get_stats(isotope_patterns, iso_idx, stats)
logging.info('Report complete.')
elif datatype == 'bruker':
logging.info('Feature finding on {}'.format(file_name))
feature_path = extract_bruker(file_name)
feature_table, feature_cluster_mapping = convert_bruker(feature_path)
logging.info('Bruker featurer finder complete. Extracted {:,} features.'.format(len(feature_table)))
# Calculate additional params
feature_table['rt_length'] = feature_table['rt_end'] - feature_table['rt_start']
feature_table['rt_right'] = feature_table['rt_end'] - feature_table['rt_apex']
feature_table['rt_left'] = feature_table['rt_apex'] - feature_table['rt_start']
feature_table['rt_tail'] = feature_table['rt_right'] / feature_table['rt_left']
logging.info('Matching features to query data.')
if 'mono_mzs2' not in query_data.keys():
logging.info('No MS2-data to match.')
features = pd.DataFrame()
else:
features = map_ms2(feature_table, query_data, **settings['features'])
ms_file.write(feature_cluster_mapping, dataset_name="feature_cluster_mapping")
logging.info('Saving feature table.')
ms_file.write(feature_table, dataset_name="feature_table")
logging.info('Feature table saved to {}'.format(out_file))
logging.info('Saving features.')
ms_file.write(features, dataset_name="features")
logging.info(f'Feature finding of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Feature finding of file {file_name} failed. Exception {e}')
return f"{e}" #Can't return exception object, cast as string
###Output
_____no_output_____
###Markdown
MappingMapping MS1 to MS2
###Code
#export
from sklearn.neighbors import KDTree
import pandas as pd
import numpy as np
def replace_infs(array:np.ndarray)->np.ndarray:
"""Replace nans and infs with 0
Args:
array (np.ndarray): Input array.
Returns:
np.ndarray: Output array without nans and infs.
"""
array[array == -np.inf] = 0
array[array == np.inf] = 0
array[np.isnan(array)] = 0
return array
def map_ms2(feature_table:pd.DataFrame, query_data:dict, map_mz_range:float = 1, map_rt_range:float = 0.5, map_mob_range:float = 0.3, map_n_neighbors:int=5, search_unidentified:bool = False, **kwargs)->pd.DataFrame:
"""Map MS1 features to MS2 based on rt and mz.
If ccs is included also add.
Args:
feature_table (pd.DataFrame): Pandas DataFrame with features.
query_data (dict): Data structure containing the query data.
map_mz_range (float, optional): Mapping range for mz (Da). Defaults to 1.
map_rt_range (float, optional): Mapping range for rt (min). Defaults to 0.5.
map_mob_range (float, optional): Mapping range for mobility (%). Defaults to 0.3.
map_n_neighbors (int, optional): Maximum number of neighbors to be extracted. Defaults to 5.
search_unidentified (bool, optional): Flag to perform search on features that have no isotope pattern. Defaults to False.
Returns:
pd.DataFrame: Table with features.
"""
feature_table['rt'] = feature_table['rt_apex']
range_dict = {}
range_dict['mz'] = ('mono_mzs2', map_mz_range)
range_dict['rt'] = ('rt_list_ms2', map_rt_range)
range_dict['mobility'] = ('mobility', map_mob_range)
query_dict = {}
query_dict['rt'] = 'rt_list_ms2'
query_dict['mass'] = 'prec_mass_list2'
query_dict['mz'] = 'mono_mzs2'
query_dict['charge'] = 'charge2'
query_dict['mobility'] = 'mobility'
if 'mobility' not in feature_table.columns:
del range_dict['mobility']
del query_dict['mobility']
use_mob = False
else:
use_mob = True
tree_points = feature_table[list(range_dict.keys())].values
for i, key in enumerate(range_dict):
tree_points[:,i] = tree_points[:,i]/range_dict[key][1]
matching_tree = KDTree(tree_points, metric="euclidean")
ref_points = np.array([query_data[range_dict[_][0]] / range_dict[_][1] for _ in range_dict]).T
ref_points = replace_infs(ref_points)
dist, idx = matching_tree.query(ref_points, k=map_n_neighbors)
ref_matched = np.zeros(ref_points.shape[0], dtype=np.bool_)
all_df = []
for neighbor in range(map_n_neighbors):
ref_df = pd.DataFrame(np.array([query_data[query_dict[_]] for _ in query_dict]).T, columns = query_dict.keys())
for _ in query_dict:
ref_df[_+'_matched'] = feature_table.iloc[idx[:,neighbor]][_].values
ref_df[_+'_offset'] = ref_df[_+'_matched'] - ref_df[_]
ref_df['query_idx'] = ref_df.index
ref_df['feature_idx'] = idx[:,neighbor]
for field in ['ms1_int_sum_area','ms1_int_sum_apex','ms1_int_max_area','ms1_int_max_apex','rt_start','rt_apex','rt_end','fwhm','mobility_lower','mobility_upper']:
if field in feature_table.keys():
ref_df[field] = feature_table.iloc[idx[:,neighbor]][field].values
rt_check = (ref_df['rt_start'] <= ref_df['rt']) & (ref_df['rt'] <= ref_df['rt_end'])
# check isolation window (win=3)
mass_check = np.abs(ref_df['mz_offset'].values) <= 3
_check = rt_check & mass_check
if use_mob:
mob_check = (ref_df['mobility_lower'] <= ref_df['mobility']) & (ref_df['mobility'] <= ref_df['mobility_upper'])
_check &= mob_check
ref_matched |= _check
ref_df['feature_dist'] = dist[:,neighbor]
ref_df = ref_df[_check]
all_df.append(ref_df)
if search_unidentified:
if use_mob:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2'], query_data['mobility']]).T, columns=['rt', 'mass', 'mz', 'charge','mobility'])
else:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2']]).T, columns=['rt', 'mass', 'mz', 'charge'])
unmatched_ref = unmatched_ref[~ref_matched]
unmatched_ref['mass_matched'] = unmatched_ref['mass']
unmatched_ref['mass_offset'] = 0
unmatched_ref['rt_matched'] = unmatched_ref['rt']
unmatched_ref['rt_offset'] = 0
unmatched_ref['mz_matched'] = unmatched_ref['mz']
unmatched_ref['mz_offset'] = 0
unmatched_ref['charge_matched'] = unmatched_ref['charge']
unmatched_ref['query_idx'] = unmatched_ref.index
unmatched_ref['feature_idx'] = np.nan
if use_mob:
ref_df['mobility_matched'] = unmatched_ref['mobility']
ref_df['mobility_offset'] = np.nan
for field in ['ms1_int_sum_area','ms1_int_sum_apex','ms1_int_max_area','ms1_int_max_apex','rt_start','rt_apex','rt_end','fwhm']:
if field in feature_table.keys():
unmatched_ref[field] = np.nan
unmatched_ref['feature_dist'] = np.nan
all_df.append(unmatched_ref)
features = pd.concat(all_df)
features = features.sort_values('mass_matched', ascending=True)
features = features.reset_index(drop=True)
return features
#hide
from nbdev.showdoc import *
#hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_settings.ipynb.
Converted 01_chem.ipynb.
Converted 02_io.ipynb.
Converted 03_fasta.ipynb.
Converted 04_feature_finding.ipynb.
Converted 05_search.ipynb.
Converted 06_score.ipynb.
Converted 07_recalibration.ipynb.
Converted 08_quantification.ipynb.
Converted 09_matching.ipynb.
Converted 10_constants.ipynb.
Converted 11_interface.ipynb.
Converted 12_performance.ipynb.
Converted 13_export.ipynb.
Converted 14_display.ipynb.
Converted 15_label.ipynb.
Converted additional_code.ipynb.
Converted contributing.ipynb.
Converted file_formats.ipynb.
Converted index.ipynb.
###Markdown
Feature Finding> Functions related to feature finding This part describes the implementation of the feature-finding algorithm. The core of the algorithm is described in the [MaxQuant-Paper](https://www.nature.com/articles/nbt.1511).The supplementary material explains the underlying methodology in great detail and is the foundation of the theoretical background that is described here.A refined version of the algorithm was presented with [Dinosaur](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4933939/), which was also used as a reference for the Python implementation.For the algorithm, we need serval modules:1. Connecting Centroids to Hills2. Refinement of Hills3. Calculating Hill Statistics4. Combining Hills to Isotope Patterns5. Deconvolution of Isotope Patterns Loading DataFrom the `IO` library, we already have an `*.ms_data.hdf` container that contains centroided data. To use it in feature finding, we directly load the data. Connecting Centroids to Hills> Note: Feature finding relies heavily on the performance function decorator from the performance notebook: `@alphapept.performance.performance_function`. Part of this is that the functions will not have return values to be GPU compatible. Please check out this notebook for further information. Connecting centroidsFeature finding starts with connecting centroids. For this we look at subsequent scans and compare peaks that are withing a defined mass tolerance (`centroid_tol`). Imagine you have three scans with the following centroids:* Scan 0: 10, 20, 30* Scan 1: 10.2, 40.1* Scan 2: 40, 50, 60When comparing consecutive scans and defining the maximum delta mass to be 0.5 find the following connections: (Scan No, Centroid No) -> (Scan No, Centroid No). As we cannot easily store tuples in the matrix, we convert tuple containing the position of the connected centroid to an integer.* (0,0) -> (1,0) -> (3): 10 & 10.2 -> delta = 0.2* (1,1) -> (2,0) -> (6): 40.1 & 40 -> delta = 0.1Finally, we store this in the `results` matrix:$\begin{bmatrix}3 & -1 & -1 \\ -1 & 6 & -1\\ -1 & -1 & -1 \end{bmatrix}$The coressponding `scores` matrix will look as follows:$\begin{bmatrix}0.2 & -1 & -1 \\ -1 & 0.1 & -1\\ -1 & -1 & -1 \end{bmatrix}$This allows us to not only easily store connections between centroids but also perform a quick lookup for the delta of an existing connection. Note that it also only stores the best connection for each centroid. To extract the connected centroids, we can use `np.where(results >= 0)`. This implementation allows getting millions of connections within seconds. As we are also allowing gaps, refering to that we can have connections between Scan 0 and Scan 2, we make the aforementioned matrix multdimensional, so that e.g. a first matrix stores the conncetions for no gap, the second matrix the connections with a gap of 1.The functionality for this step is implemented in `connect_centroids_unidirection` and the wrapper `find_centroid_connections`.
###Code
#export
import numpy as np
import alphapept.performance
#This function is tested by being called from find_centroid_connections
@alphapept.performance.performance_function
def connect_centroids_unidirection(x:np.ndarray, row_borders:np.ndarray, connections:np.ndarray, scores:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Connect centroids.
Args:
x (np.ndarray): Index to datapoint. Note that this using the performance_function, so one passes an ndarray.
row_borders (np.ndarray): Row borders of the centroids array.
connections (np.ndarray): Connections matrix to store the connections
scores (np.ndarray): Score matrix to store the connections
centroids (np.ndarray): 1D Array containing the masses of the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
for gap in range(max_gap + 1):
y = x + gap + 1
if y >= row_borders.shape[0]:
return
start_index_f = 0
if x > 0:
start_index_f = row_borders[x - 1]
centroids_1 = centroids[start_index_f: row_borders[x]]
start_index_b = row_borders[y - 1]
centroids_2 = centroids[start_index_b: row_borders[y]]
i = 0
j = 0
while (i < len(centroids_1)) & (j < len(centroids_2)):
mz1, mz2 = centroids_1[i], centroids_2[j]
diff = mz1 - mz2
mz_sum = mz1 + mz2
delta = 2 * 1e6 * abs(diff) / mz_sum
if delta < centroid_tol:
if scores[x, i, gap] > delta:
scores[x, i, gap] = delta
connections[x, i, gap] = (connections.shape[1] * y) + j
if diff > 0:
j += 1
else:
i += 1
def find_centroid_connections(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Wrapper function to call connect_centroids_unidirection
Args:
rowwise_peaks (np.ndarray): Length of centroids with respect to the row borders.
row_borders (np.ndarray): Row borders of the centroids array.
centroids (np.ndarray): Array containing the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
max_centroids = int(cupy.max(rowwise_peaks))
spectra_cnt = len(row_borders) - 1
connections = cupy.full((spectra_cnt, max_centroids, max_gap + 1), -1, dtype=np.int32)
score = cupy.full((spectra_cnt, max_centroids, max_gap + 1), np.inf)
connect_centroids_unidirection(range(len(row_borders)),
row_borders,
connections,
score,
centroids,
max_gap,
centroid_tol)
score = score[cupy.where(score < np.inf)]
score_median = cupy.median(score)
score_std = cupy.std(score)
del score, max_centroids, spectra_cnt
c_shape = connections.shape
from_r, from_c, from_g = cupy.where(connections >= 0)
to_r = connections[from_r, from_c, from_g] // c_shape[1]
to_c = connections[from_r, from_c, from_g] - to_r * c_shape[1]
del connections, from_g
return from_r, from_c, to_r, to_c, score_median, score_std
#hide
def test_find_centroid_connections():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 40.1, 40, 50, 60])
centroid_tol = 0.5*1e6
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_r, np.array([0, 0, 1, 1])) #e.g. 0,0 is connected to 0,1 -> 10 to 10.2
assert np.allclose(from_c, np.array([0, 2, 1, 2]))
assert np.allclose(to_r, np.array([1, 1, 2, 2]))
assert np.allclose(to_c, np.array([0, 1, 0, 0]))
test_find_centroid_connections()
###Output
_____no_output_____
###Markdown
We wrap the centroid connections in the function `connect_centroids`. This function converts the connections into an usable array.
###Code
#export
#the performance functions are tested with the wrapper function connect_centroids
@alphapept.performance.performance_function
def convert_connections_to_array(x:np.ndarray, from_r:np.ndarray, from_c:np.ndarray, to_r:np.ndarray, to_c:np.ndarray, row_borders:np.ndarray, out_from_idx:np.ndarray, out_to_idx:np.ndarray):
"""Convert integer indices of a matrix to coordinates.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_r (np.ndarray): From array with row coordinates.
from_c (np.ndarray): From array with column coordinates.
to_r (np.ndarray): To array with row coordinates.
to_c (np.ndarray): To array with column coordinates.
row_borders (np.ndarray): Row borders (for indexing).
out_from_idx (np.ndarray): Reporting array: 1D index from.
out_to_idx (np.ndarray): Reporting array: 1D index to.
"""
row = from_r[x]
col = from_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_from_idx[x] = start_index_f + col
row = to_r[x]
col = to_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_to_idx[x] = start_index_f + col
@alphapept.performance.performance_function
def eliminate_overarching_vertex(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray):
"""Eliminate overacrhing vertex.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
"""
if x == 0:
return
if from_idx[x - 1] == from_idx[x]:
to_idx[x] = -1
def connect_centroids(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, float, float):
"""Function to connect centroids.
Args:
rowwise_peaks (np.ndarray): Indexes for centroids.
row_borders (np.ndarray): Row borders (for indexing).
centroids (np.ndarray): Centroid data.
max_gap: Maximum gap.
centroid_tol: Centroid tol for matching centroids.
Returns:
np.ndarray: From index.
np.ndarray: To index.
float: Median score.
float: Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks,
row_borders,
centroids,
max_gap,
centroid_tol)
from_idx = cupy.zeros(len(from_r), np.int32)
to_idx = cupy.zeros(len(from_r), np.int32)
convert_connections_to_array(range(len(from_r)),
from_r,
from_c,
to_r,
to_c,
row_borders,
from_idx,
to_idx)
eliminate_overarching_vertex(range(len(from_idx)), from_idx, to_idx)
relavent_idx = cupy.where(to_idx >= 0)
from_idx = cupy.take(from_idx, relavent_idx)[0]
to_idx = cupy.take(to_idx, relavent_idx)[0]
del from_r, from_c, to_r, to_c, relavent_idx
return from_idx, to_idx, score_median, score_std
#Sample snippet to show centroid conncetions
import matplotlib.pyplot as plt
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
scan_no = np.array([0, 0, 0, 1, 1, 2, 2, 2])
plt.figure(figsize=(5,5))
for i, _ in enumerate(row_borders):
ctrd = centroids[_-rowwise_peaks[i]:_]
plt.plot(ctrd, np.ones_like(ctrd)*i, 'o')
for i, _ in enumerate(from_idx):
from_ = _
to_ = to_idx[i]
plt.plot([centroids[from_], centroids[to_]], [scan_no[from_], scan_no[to_]], 'k:')
plt.ylabel('scan')
plt.xlabel('m/z')
plt.ylim(len(row_borders)+0.5, -1.5)
plt.title('Peak connections')
plt.show()
#hide
def test_connect_centroids():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_idx, np.array([0, 1, 2]))
assert np.allclose(to_idx, np.array([3, 4, 6]))
test_connect_centroids()
###Output
_____no_output_____
###Markdown
Extracting hills.To extract hills we extract connected components from the connections.
###Code
#export
@alphapept.performance.performance_function
def path_finder(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, forward:np.ndarray, backward:np.ndarray):
"""Extracts path information and writes to path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): Array containing from indices.
to_idx (np.ndarray): Array containing to indices.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
"""
fr = from_idx[x]
to = to_idx[x]
forward[fr] = to
backward[to] = fr
@alphapept.performance.performance_function
def find_path_start(x:np.ndarray, forward:np.ndarray, backward:np.ndarray, path_starts:np.ndarray):
"""Function to find the start of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
path_starts (np.ndarray): Array to report path starts.
"""
if forward[x] > -1 and backward[x] == -1:
path_starts[x] = 0
@alphapept.performance.performance_function
def find_path_length(x:np.ndarray, path_starts:np.ndarray, forward:np.ndarray, path_cnt:np.ndarray):
"""Function to extract the length of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forward (np.ndarray): Array that stores forward information.
path_cnt (np.ndarray): Reporting array to count the paths.
"""
ctr = 1
idx = path_starts[x]
while forward[idx] > -1:
ctr += 1
idx = forward[idx]
path_cnt[x] = ctr
@alphapept.performance.performance_function
def fill_path_matrix(x:np.ndarray, path_start:np.ndarray, forwards:np.ndarray, out_hill_data:np.ndarray, out_hill_ptr:np.ndarray):
"""Function to fill the path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forwards (np.ndarray): Forward array.
out_hill_data (np.ndarray): Array containing the indices to hills.
out_hill_ptr (np.ndarray): Array containing the bounds to out_hill_data.
"""
path_position = 0
idx = path_start[x]
while idx > -1:
out_hill_data[out_hill_ptr[x] + path_position] = idx
idx = forwards[idx]
path_position += 1
def get_hills(centroids:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, hill_length_min:int=3)-> (np.ndarray, np.ndarray, int):
"""Function to get hills from centroid connections.
Args:
centroids (np.ndarray): 1D Array containing the masses of the centroids.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
hill_length_min (int): Minimum hill length:
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
forward = cupy.full(centroids.shape[0], -1)
backward = cupy.full(centroids.shape[0], -1)
path_starts = cupy.full(centroids.shape[0], -1)
path_finder(range(len(from_idx)), from_idx, to_idx, forward, backward)
find_path_start(range(len(forward)), forward, backward, path_starts)
# path_starts will now container the first index of all connected centroids
path_starts = cupy.where(path_starts == 0)[0]
path_node_cnt = cupy.full(path_starts.shape[0], -1)
find_path_length(range(len(path_starts)), path_starts, forward, path_node_cnt)
relavant_path_node = cupy.where(path_node_cnt >= hill_length_min)[0]
path_starts = cupy.take(path_starts, relavant_path_node)
path_node_cnt = cupy.take(path_node_cnt, relavant_path_node)
del relavant_path_node
# Generate the hill matix indice ptr data
hill_ptrs = cupy.empty((path_starts.shape[0] + 1), dtype=cupy.int32)
hill_ptrs[0] = 0
hill_ptrs[1:] = path_node_cnt.cumsum()
hill_data = cupy.empty((int(hill_ptrs[-1])), np.int32)
fill_path_matrix(range(len(path_starts)), path_starts, forward, hill_data, hill_ptrs)
del from_idx, to_idx, path_starts, forward, backward
return hill_ptrs, hill_data, path_node_cnt
def extract_hills(query_data:dict, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, int, float, float):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
score_median (float): Median score.
score_std (float): Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
indices = cupy.array(query_data['indices_ms1'])
mass_data = cupy.array(query_data['mass_list_ms1'])
rowwise_peaks = indices[1:] - indices[:-1]
row_borders = indices[1:]
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, mass_data, max_gap, centroid_tol)
hill_ptrs, hill_data, path_node_cnt = get_hills(mass_data, from_idx, to_idx)
del mass_data
del indices
if cupy.__name__ != 'numpy':
hill_ptrs = hill_ptrs.get()
hill_data = hill_data.get()
path_node_cnt = path_node_cnt.get()
score_median = score_median.get()
score_std = score_std.get()
return hill_ptrs, hill_data, path_node_cnt, score_median, score_std
###Output
_____no_output_____
###Markdown
Hill SplittingWhen having a hill with two or more maxima, we would like to split it at the minimum position. For this, we use a recursive approach. First, the minimum of a hill is detected. A hill is split at this minimum if the smaller of the surrounding maxima is at least the factor `hill_split_level` larger than the minimum. For each split, the process is repeated.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def fast_minima(y:np.ndarray)->np.ndarray:
"""Function to calculate the local minimas of an array.
Args:
y (np.ndarray): Input array.
Returns:
np.ndarray: Array containing minima positions.
"""
minima = np.zeros(len(y))
start = 0
end = len(y)
for i in range(start + 2, end - 2):
if ((y[i - 1] > y[i]) & (y[i + 1] > y[i])) \
or ((y[i - 1] > y[i]) & (y[i + 1] == y[i]) & (y[i + 2] > y[i])) \
or ((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] > y[i])) \
or (((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] == y[i]) & \
(y[i + 2] > y[i]))):
minima[i] = 1
minima = minima.nonzero()[0]
return minima
#hide
def test_fast_minima():
assert fast_minima(np.array([3,2,1,0,1,2,3])) == 3
assert fast_minima(np.array([4,3,2,1,0,1,2])) == 4
assert len(fast_minima(np.array([5,4,3,2,1,0,1]))) == 0
assert len(fast_minima(np.array([6,5,4,3,2,1,0]))) == 0
test_fast_minima()
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def split(k:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_data:np.ndarray, splits:np.ndarray, hill_split_level:float, window:int):
"""Function to split hills.
Args:
k (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_data (np.ndarray): Array containing the indices to hills.
splits (np.ndarray): Array containing splits.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
"""
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_trace = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.median(int_trace[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.mean(int_trace[min_index:max_index])
#minima = (np.diff(np.sign(np.diff(int_trace))) > 0).nonzero()[0] + 1 #This works also but is slower
minima = fast_minima(int_trace)
sorted_minima = np.argsort(int_trace[minima])
minima = minima[sorted_minima]
for min_ in minima:
minval = int_trace[min_]
left_max = max(int_trace[:min_])
right_max = max(int_trace[min_:])
min_max = min(left_max, right_max)
if (minval == 0) or ((min_max / minval) > hill_split_level):
splits[k] = start+min_
break # Split only once per iteration
def split_hills(hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, hill_split_level:float, window:int)->np.ndarray:
"""Wrapper function to split hills
Args:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
Returns:
np.ndarray: Array containing the bounds to the hill_data with splits.
"""
splits = np.zeros(len(int_data), dtype=np.int32)
to_check = np.arange(len(hill_ptrs)-1)
while len(to_check) > 0:
split(to_check, hill_ptrs, int_data, hill_data, splits, hill_split_level, window)
splitpoints = splits.nonzero()[0]
to_check = np.zeros(len(hill_ptrs))
to_check[splitpoints] = 1
to_check = np.insert(to_check, splitpoints+1, np.ones(len(splitpoints))).nonzero()[0] #array, index, what
hill_ptrs = np.insert(hill_ptrs, splitpoints+1, splits[splitpoints]) #array, index, what
splits = np.zeros(len(hill_ptrs), dtype=np.int32) #was cupy np.int32
return hill_ptrs
###Output
_____no_output_____
###Markdown
Filter HillsTo filter hills, we define a minimum length `hill_min_length`. All peaks below the threshold `hill_peak_min_length` are accepted as is. For longer hills, the intensity at the start and the end are compared to the maximum intensity. If the ratio of the maximum raw intensity to the smoothed intensity and the beginning and end are larger than `hill_peak_factor` the hills are accepted.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def check_large_hills(idx:np.ndarray, large_peaks:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, to_remove:np.ndarray, large_peak:int = 40, hill_peak_factor:float = 2, window:int=1):
"""Function to check large hills and flag them for removal.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
large_peaks (np.ndarray): Array containing large peaks.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
to_remove (np.ndarray): Array with indexes to remove.
large_peak (int, optional): Length criterion when a peak is large. Defaults to 40.
hill_peak_factor (float, optional): Hill maximum criterion. Defaults to 2.
window (int, optional): Smoothing window.. Defaults to 1.
"""
k = large_peaks[idx]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_smooth_ = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.median(int_smooth_[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.mean(int_smooth_[min_index:max_index])
int_ = int_data[int_idx]
max_ = np.max(int_)
if (max_ / int_smooth_[0] > hill_peak_factor) & (max_ / int_smooth_[-1] > hill_peak_factor):
to_remove[idx] = 0
def filter_hills(hill_data:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_check_large:int =40, window:int = 1) -> (np.ndarray, np.ndarray):
"""Filters large hills.
Args:
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_check_large (int, optional): Length criterion when a hill is considered large.. Defaults to 40.
window (int, optional): Smoothing window. Defaults to 1.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
"""
large_peaks = np.where(np.diff(hill_ptrs)>=hill_check_large)[0]
to_remove = np.ones(len(large_peaks), dtype=np.int32)
check_large_hills(range(len(large_peaks)), large_peaks, hill_ptrs, hill_data, int_data, to_remove, window)
idx_ = np.ones(len(hill_data), dtype = np.int32)
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
to_remove = to_remove.nonzero()[0]
for _ in to_remove:
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_
###Output
_____no_output_____
###Markdown
Since the mass estimate min the equation above is more complicated than just an average of the mj, a standard deviation based estimate of the error would not be appropriate. Therefore we calculate the error as a bootstrap2 estimate over B=150 bootstrap replications Calculating Hill StatisticsNext, we calculate summary statistics for the connected centroids. We can obtain a high precision mass estimate for each hill by taking the average of the the masses and weighting this by their intensiteis:$$\overline{m} = \frac{\sum_{j=1}^nm_jI_j}{\sum_{j=1}^nI_j}$$To estimate the mass error, we calculate the error as a boostrap estimate: $$\Delta \overline{m} = \sqrt{\frac{\sum_{b=1}^{B}(\overline{m}_b - \overline{m} )}{(B-1)}}$$The calculation of hill statistics for a single hill is implemented in `get_hill_stats`. To calculate the hill stats for a list of hills, we can call the wrapper `get_hill_data`.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def hill_stats(idx:np.ndarray, hill_range:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, mass_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, stats:np.ndarray, hill_nboot_max:int, hill_nboot:int):
"""Function to calculate hill stats.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_range (np.ndarray): Hill range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
mass_data (np.ndarray): Array containing mass data.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
"""
np.random.seed(42)
start = hill_ptrs[idx]
end = hill_ptrs[idx + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
mz_ = mass_data[idx_]
int_sum = np.sum(int_)
int_area = np.abs(np.trapz(rt_[rt_idx[idx_]], int_)) #Area
rt_min = rt_[rt_idx[idx_]].min()
rt_max = rt_[rt_idx[idx_]].max()
if len(idx_) > hill_nboot_max:
bootsize = hill_nboot_max
else:
bootsize = len(idx_)
averages = np.zeros(hill_nboot)
average = 0
for i in range(hill_nboot):
boot = np.random.choice(len(int_), bootsize, replace=True)
boot_mz = np.sum((mz_[boot] * int_[boot])) / np.sum(int_[boot])
averages[i] = boot_mz
average += boot_mz
average_mz = average/hill_nboot
delta = 0
for i in range(hill_nboot):
delta += (average_mz - averages[i]) ** 2 #maybe easier?
delta_m = np.sqrt(delta / (hill_nboot - 1))
stats[idx,0] = average_mz
stats[idx,1] = delta_m
stats[idx,2] = int_sum
stats[idx,3] = int_area
stats[idx,4] = rt_min
stats[idx,5] = rt_max
def remove_duplicates(stats:np.ndarray, hill_data:np.ndarray, hill_ptrs:np.ndarray)-> (np.ndarray, np.ndarray, np.ndarray):
"""Remove duplicate hills.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
np.ndarray: Filtered hill stats.
"""
dups = pd.DataFrame(stats).duplicated() #all duplicated hills
idx_ = np.ones(len(hill_data), dtype = np.int32) #keep all
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
for _ in np.arange(len(stats))[dups]: #duplicates will be assigned zeros
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_, stats[~dups]
def get_hill_data(query_data:dict, hill_ptrs:np.ndarray, hill_data:np.ndarray, hill_nboot_max:int = 300, hill_nboot:int = 150) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to get the hill data.
Args:
query_data (dict): Data structure containing the query data.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
Returns:
np.ndarray: Hill stats.
np.ndarray: Sortindex.
np.ndarray: Upper index.
np.ndarray: Scan index.
np.ndarray: Hill data.
np.ndarray: Hill points.
"""
indices_ = np.array(query_data['indices_ms1'])
rt_ = np.array(query_data['rt_list_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
scan_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
stats = np.zeros((len(hill_ptrs)-1, 6)) #mz, delta, rt_min, rt_max, sum_max
hill_stats(range(len(hill_ptrs)-1), np.arange(len(hill_ptrs)-1), hill_ptrs, hill_data, int_data, mass_data, rt_, scan_idx, stats, hill_nboot_max, hill_nboot)
# sort the stats
sortindex = np.argsort(stats[:,4]) #Sorted by rt_min
stats = stats[sortindex,:]
idxs_upper = stats[:,4].searchsorted(stats[:,5], side="right")
sortindex_ = np.arange(len(sortindex))[sortindex]
return stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs
###Output
_____no_output_____
###Markdown
Combining Hills to Isotope PatternsAfter obtaining summary statistics of hills, the next step is to check whether they belong together to form an isotope pattern. For this, we check wheter it is possible that they are neighbors in an isotope pattern, e.g. one having a 12C atom that has been replaced by a 13C version. The detailed criterion for the check is implemented in `check_isotope_pattern` and is as follows:$$\left | \Delta m-\frac{\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m_{1}}^{2} +\Delta {m_{2}}^{2}}$$The left side contains $\Delta m$, being the delta of the precise mass estimates from the summary statistics and $\Delta M = 1.00286864$, which is the mass difference ebtween the 13C peak and the monoisotopic peak in an averagine molecule of 1500 Da mass divided by the charge $z$.The right side contains $\Delta S = 0.0109135$, which is the maximum shift that a sulphur atom can cause ($\Delta S = 2m(^{13}C) - 2m(^{12}C) - m(^{34}S) + m(^{32}S)$) and $\Delta {m_{1}}$ and $\Delta {m_{2}}$, which are the bootstrapped mass standard deviations.
###Code
#export
from alphapept.constants import mass_dict
DELTA_M = mass_dict['delta_M']
DELTA_S = mass_dict['delta_S']
maximum_offset = DELTA_M + DELTA_S
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, iso_mass_range:int = 5)-> bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
iso_mass_range (int, optional): Mass range. Defaults to 5.
Returns:
bool: Flag to see if pattern belongs to the same pattern.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
delta_mass = np.abs(mass1 - mass2)
left_side = np.abs(delta_mass - DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
#hide
def test_check_isotope_pattern():
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == True
mass2, delta_mass2 = 102.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == False
test_check_isotope_pattern()
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
mass2, delta_mass2 = 102.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
###Output
True
False
###Markdown
Cosine Correlation of two hills An additional criterion that is being checked is that the intensity profiles have sufficient overalp in retention time. This is validated by ensuring that two hills have a cosine correlation of at least 0.6.$$\frac{\sum_{s=s_{min}}^{s_{max}}I_sJ_s}{\sum_{s=s_{min}}^{s_{max}}I_s^{2} \sum_{s=s_{min}}^{s_{max}}J_s^{2}} \geq 0.6$$The intensities of two hills are only compared if both have an intensity value in a particular scan. Otherwise, the intensity is set to zero. Additionally, an overlap of at least three elements is required.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def correlate(scans_:np.ndarray, scans_2:np.ndarray, int_:np.ndarray, int_2:np.ndarray)->float:
"""Correlate two scans.
Args:
scans_ (np.ndarray): Masses of the first scan.
scans_2 (np.ndarray): Masses of the second scan.
int_ (np.ndarray): Intensity of the first scan.
int_2 (np.ndarray): Intensity of the second scan.
Returns:
float: Correlation.
"""
min_one, max_one = scans_[0], scans_[-1]
min_two, max_two = scans_2[0], scans_2[-1]
if min_one + 3 > max_two: # at least an overlap of 3 elements
corr = 0
elif min_two + 3 > max_one:
corr = 0
else:
min_s = min(min_one, min_two)
max_s = max(max_one, max_two)
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[scans_ - min_s] = int_
int_two_scaled[scans_2 - min_s] = int_2
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
###Output
_____no_output_____
###Markdown
Extracting pre-Isotope PatternsNow having two criteria to check whether hills could, in principle, belong together, we define the wrapper functions `extract_edge` and `get_edges` to extract the connected hills. To minimize the number of comparisons we need to perform, we only compare the hills that overlap in time (i.e., the start of one hill `rt_min` needs to be before the end of the other hill `rt_max`) and are less than the sum of $\Delta M$ and $\Delta S$ apart. To extract all hills that belong together, we again rely on the `NetworkX`-package to extract the connected components.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list:
"""Extract edges.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparing.
runner (int): Index.
max_index (int): Unused.
maximum_offset (float): Maximum offset when comparing edges.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
Returns:
list: List of edges.
"""
edges = []
mass1 = stats[runner, 0]
delta_mass1 = stats[runner, 1]
for j in range(runner+1, idxs_upper[runner]):
mass2 = stats[j, 0]
if np.abs(mass2 - mass1) <= maximum_offset:
delta_mass2 = stats[j, 1]
for charge in range(iso_charge_min, iso_charge_max + 1):
if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range):
edges.append((runner, j))
break
return edges
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def edge_correlation(idx:np.ndarray, to_keep:np.ndarray, sortindex_:np.ndarray, pre_edges:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float):
"""Correlates two edges and flag them it they should be kept.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
to_keep (np.ndarray): Array with indices which edges should be kept.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
pre_edges (np.ndarray): Array with pre edges.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
"""
edge = pre_edges[idx,:]
y = sortindex_[edge[0]]
start = hill_ptrs[y]
end = hill_ptrs[y + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
con = sortindex_[edge[1]]
start = hill_ptrs[con]
end = hill_ptrs[con + 1]
idx_2 = hill_data[start:end]
int_2 = int_data[idx_2]
scans_2 = scan_idx[idx_2]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
to_keep[idx] = 1
#export
import networkx as nx
def get_pre_isotope_patterns(stats:np.ndarray, idxs_upper:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, maximum_offset:float, iso_charge_min:int=1, iso_charge_max:int=6, iso_mass_range:float=5, cc_cutoff:float=0.6)->list:
"""Function to extract pre isotope patterns.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparison.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
maximum_offset (float): Maximum offset when matching.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
cc_cutoff (float, optional): Correlation cutoff. Defaults to 0.6.
Returns:
list: List of pre isotope patterns.
"""
pre_edges = []
# Step 1
for runner in range(len(stats)):
pre_edges.extend(extract_edge(stats, idxs_upper, runner, idxs_upper[runner], maximum_offset, iso_charge_min, iso_charge_max, iso_mass_range))
to_keep = np.zeros(len(pre_edges), dtype='int')
pre_edges = np.array(pre_edges)
edge_correlation(range(len(to_keep)), to_keep, sortindex_, pre_edges, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
edges = pre_edges[to_keep.nonzero()]
G2 = nx.Graph()
for i in range(len(edges)):
G2.add_edge(edges[i][0], edges[i][1])
pre_isotope_patterns = [
sorted(list(c))
for c in sorted(nx.connected_components(G2), key=len, reverse=True)
]
return pre_isotope_patterns
###Output
_____no_output_____
###Markdown
Extracting Isotope PatternsThe extracted pre-isotope patterns may not be consistent because their pair-wise mass differences may not correspond to the same charge. To extract isotope patterns from pre-isotope patterns, we need to ensure that they are consistent for a single charge. To do this, we start with the 100 most intense peaks from a pre-isotope pattern to be used as a seed. For each seed and charge we then try to extract the longest consistent isotope pattern. To check wheter a hill is consistent with the seed we employ a modified checking criterion (`check_isotope_pattern_directed`) to be as follows:$$\left | m-m_j-\frac{j\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m}^{2} +\Delta {m_{j}}^{2}}$$Here $m$ is the mass of a seed peak, and $m_{j}$ refers to a peak relative to the seed. $j$ refers to the peaks to the left or right (negative or positive index) within the pattern. $j$ needs to run over consecutive values so that gaps are not allowed. Besides this consistency check, two hills are also checked to have a cosine correlation of at least 0.6.Programmatically, this is implemented in `grow_trail` and `grow`. These function uses a recursive approach that adds matching hills to the seed on the left and right side until no more hills can be added.
###Code
#export
from numba.typed import List
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern_directed(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, index:int, iso_mass_range:float)->bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
index (int): Index (unused).
iso_mass_range (float): Isotope mass ranges.
Returns:
bool: Flag if two isotope patterns belong together.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
left_side = np.abs(mass1 - mass2 - index * DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
@alphapept.performance.compile_function(compilation_mode="numba")
def grow(trail:List, seed:int, direction:int, relative_pos:int, index:int, stats:np.ndarray, pattern:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Grows isotope pattern based on a seed and direction.
Args:
trail (List): List of hills belonging to a pattern.
seed (int): Seed position.
direction (int): Direction in which to grow the trail
relative_pos (int): Relative position.
index (int): Index.
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: List of hills belonging to a pattern.
"""
x = pattern[seed] # This is the seed
mass1 = stats[x,0]
delta_mass1 = stats[x,1]
k = sortindex_[x]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
growing = True
while growing:
if direction == 1:
if seed + relative_pos == len(pattern):
growing = False
break
else:
if seed + relative_pos < 0:
growing = False
break
y = pattern[seed + relative_pos] # This is a reference peak
l = sortindex_[y]
mass2 = stats[y,0]
delta_mass2 = stats[y,1]
start = hill_ptrs[l]
end = hill_ptrs[l + 1]
idx_ = hill_data[start:end]
int_2 = int_data[idx_]
scans_2 = scan_idx[idx_]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
if check_isotope_pattern_directed(mass1, mass2, delta_mass1, delta_mass2, charge, -direction * index, iso_mass_range):
if direction == 1:
trail.append(y)
else:
trail.insert(0, y)
index += (
1
) # Greedy matching: Only one edge for a specific distance, will not affect the following matches
delta_mass = np.abs(mass1 - mass2)
if (delta_mass > (DELTA_M+DELTA_S) * index): # the pattern is sorted so there is a maximum to look back
break
relative_pos += direction
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def grow_trail(seed:int, pattern:np.ndarray, stats:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to grow an isotope pattern to the left and right side.
Args:
seed (int): Seed position.
pattern (np.ndarray): Isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Isotope pattern.
"""
x = pattern[seed]
trail = List()
trail.append(x)
trail = grow(trail, seed, -1, -1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trail = grow(trail, seed, 1, 1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def get_trails(seed:int, pattern:np.ndarray, stats:np.ndarray, charge_range:List, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to extract trails for a given charge range.
Args:
seed (int): Seed index.
pattern (np.ndarray): Pre isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge_range (List): Charge range.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Trail of consistent hills.
"""
trails = []
for charge in charge_range:
trail = grow_trail(seed, pattern, stats, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trails.append(trail)
return trails
#export
def plot_pattern(pattern:np.ndarray, sorted_hills:np.ndarray, centroids:np.ndarray, hill_data:np.ndarray):
"""Helper function to plot a pattern.
Args:
pattern (np.ndarray): Pre isotope pattern.
sorted_hills (np.ndarray): Hills, sorted.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
hill_data (np.ndarray): Array containing the indices to hills.
"""
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,10))
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
mzs = []
rts = []
ints = []
for entry in pattern:
hill = sorted_hills[entry]
hill_data = np.array([centroids[_[0]][_[1]] for _ in hill], dtype=centroid_dtype)
int_profile = hill_data["int"]
ax1.plot(hill_data["rt"], hill_data["int"])
ax2.scatter(hill_data["rt"], hill_data["mz"], s = hill_data["int"]/5e5 )
ax1.set_title('Pattern')
ax1.set_xlabel('RT (min)')
ax1.set_ylabel('Intensity')
ax2.set_xlabel('RT (min)')
ax2.set_ylabel('m/z')
plt.show()
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def get_minpos(y:np.ndarray, iso_split_level:float)->List:
"""Function to get a list of minima in a trace.
A minimum is returned if the ratio of lower of the surrounding maxima to the minimum is larger than the splitting factor.
Args:
y (np.ndarray): Input array.
iso_split_level (float): Isotope split level.
Returns:
List: List with min positions.
"""
minima = get_local_minima(y)
minima_list = List()
for minpos in minima:
minval = y[minpos]
left_max = (y[:minpos]).max()
right_max = (y[minpos:]).max()
minimum_max = min(left_max, right_max)
if minimum_max / minval >= iso_split_level:
minima_list.append(minpos)
return minima_list
@alphapept.performance.compile_function(compilation_mode="numba")
def get_local_minima(y:np.ndarray)->List:
"""Function to return all local minima of a array
Args:
y (np.ndarray): Input array.
Returns:
List: List with indices to minima.
"""
minima = List()
for i in range(1, len(y) - 1):
if is_local_minima(y, i):
minima.append(i)
return minima
@alphapept.performance.compile_function(compilation_mode="numba")
def is_local_minima(y:np.ndarray, i:int)->bool:
"""Check if position is a local minima.
Args:
y (np.ndarray): Input array.
i (int): Position to check.
Returns:
bool: Flag if position is minima or not.
"""
return (y[i - 1] > y[i]) & (y[i + 1] > y[i])
@alphapept.performance.compile_function(compilation_mode="numba")
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array
#hide
def test_get_minpos():
"""
Generate an intensity profile with local minima
Check that the minima are found
"""
intensity_profile = np.ones(20) * 10
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
minima = get_minpos(intensity_profile, 2)
minima_list = [_ for _ in minima]
assert minima_list == minima_ref
test_get_minpos()
###Output
_____no_output_____
###Markdown
Isolating Isotope_patternsThe extraction of the longest consistent isotope pattern is implemented in `isolate_isotope_pattern`. Here, three additional checks for an isotope pattern are implemented. The first one is `truncate`. Here, one checks the seed position, whether it has a minimum to its left or right side. If a minimum is found, the isotope pattern is cut off at this position.The second one is a mass filter. If the seed has a mass of smaller than 1000, the intensity maximum is detected, and all smaller masses are discarded. This reflects the averagine distribution for small masses where no minimum on the left side can be found.The third one is `check_averagine` that relies on `pattern_to_mz` and `cosine_averagine`. It is used to ensure that the extracted isotope pattern has a cosine correlation of the averagine isotope pattern of the same mass of at least 0.6.After the longest consistent isotope pattern is found, the hills are removed from the pre-isotope pattern, and the process is repeated until no more isotope patterns can be extracted from the pre-isotope patterns.
###Code
#export
from alphapept.chem import mass_to_dist
from alphapept.constants import averagine_aa, isotopes, Isotope
from numba.typed import Dict
@alphapept.performance.compile_function(compilation_mode="numba")
def check_averagine(stats:np.ndarray, pattern:np.ndarray, charge:int, averagine_aa:Dict, isotopes:Dict)->float:
"""Function to compare a pattern to an averagine model.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
Returns:
float: Averagine correlation.
"""
masses, intensity = pattern_to_mz(stats, pattern, charge)
spec_one = np.floor(masses).astype(np.int64)
int_one = intensity
spec_two, int_two = mass_to_dist(np.min(masses), averagine_aa, isotopes) # maybe change to no rounded version
spec_two = np.floor(spec_two).astype(np.int64)
return cosine_averagine(int_one, int_two, spec_one, spec_two)
@alphapept.performance.compile_function(compilation_mode="numba")
def pattern_to_mz(stats:np.ndarray, pattern:np.ndarray, charge:int)-> (np.ndarray, np.ndarray):
"""Function to calculate masses and intensities from pattern for a given charge.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge of the pattern.
Returns:
np.ndarray: masses
np.ndarray: intensity
"""
mzs = np.zeros(len(pattern))
ints = np.zeros(len(pattern))
for i in range(len(pattern)):
entry = pattern[i]
mzs[i] = mz_to_mass(stats[entry,0], charge)
ints[i] = stats[entry,2]
sortindex = np.argsort(mzs)
masses = mzs[sortindex]
intensity = ints[sortindex]
return masses, intensity
@alphapept.performance.compile_function(compilation_mode="numba")
def cosine_averagine(int_one:np.ndarray, int_two:np.ndarray, spec_one:np.ndarray, spec_two:np.ndarray)-> float:
"""Calculate the cosine correlation of two hills.
Args:
int_one (np.ndarray): Intensity of the first hill.
int_two (np.ndarray): Intensity of the second hill.
spec_one (np.ndarray): Scan numbers of the first hill.
spec_two (np.ndarray): Scan numbers of the second hill.
Returns:
float: Cosine
"""
min_one, max_one = spec_one[0], spec_one[-1]
min_two, max_two = spec_two[0], spec_two[-1]
min_s = np.min(np.array([min_one, min_two]))
max_s = np.max(np.array([max_one, max_two]))
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[spec_one - min_s] = int_one
int_two_scaled[spec_two - min_s] = int_two
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
@alphapept.performance.compile_function(compilation_mode="numba")
def int_list_to_array(numba_list:List)->np.ndarray:
"""Numba compatbilte function to convert a numba list with integers to a numpy array
Args:
numba_list (List): Input numba-typed List.
Returns:
np.ndarray: Output numpy array.
"""
array = np.zeros(len(numba_list), dtype=np.int64)
for i in range(len(array)):
array[i] = numba_list[i]
return array
M_PROTON = mass_dict['Proton']
@alphapept.performance.compile_function(compilation_mode="numba")
def mz_to_mass(mz:float, charge:int)->float:
"""Function to calculate the mass from a mz value.
Args:
mz (float): M/z
charge (int): Charge.
Raises:
NotImplementedError: When a negative charge is used.
Returns:
float: mass
"""
if charge < 0:
raise NotImplementedError("Negative Charges not implemented.")
mass = mz * charge - charge * M_PROTON
return mass
#hide
if False:
def test_truncate():
"""
Generate an intensity profile with local minima
Check wheter the the profile is correctly truncated with respect to the seed
"""
array = np.arange(0, 20)
intensity_profile = np.ones(20) * 10
iso_split_level = 1.3
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
seedpos = 5
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([3, 4, 5, 6, 7]))
seedpos = 0
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([0, 1, 2, 3]))
seedpos = len(array)
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([17, 18, 19]))
test_truncate()
###Output
_____no_output_____
###Markdown
Isotope PatternsThe wrapper function `get_isotope_patterns` iterates over all pre_isotope_patterns.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def isolate_isotope_pattern(pre_pattern:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, iso_mass_range:float, charge_range:List, averagine_aa:Dict, isotopes:Dict, iso_n_seeds:int, cc_cutoff:float, iso_split_level:float)->(np.ndarray, int):
"""Isolate isotope patterns.
Args:
pre_pattern (np.ndarray): Pre isotope pattern.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
iso_mass_range (float): Mass range for checking isotope patterns.
charge_range (List): Charge range.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_n_seeds (int): Number of seeds.
cc_cutoff (float): Cutoff value for what is considered correlating.
iso_split_level (float): Split level when isotopes are split.
Returns:
np.ndarray: Array with the best pattern.
int: Charge of the best pattern.
"""
longest_trace = 0
champion_trace = None
champion_charge = 0
champion_intensity = 0
# Sort patterns by mass
sortindex = np.argsort(stats[pre_pattern][:,0]) #intensity
sorted_pattern = pre_pattern[sortindex]
massindex = np.argsort(stats[sorted_pattern][:,2])[::-1][:iso_n_seeds]
# Use all the elements in the pre_pattern as seed
for seed in massindex: # Loop through all seeds
seed_global = sorted_pattern[seed]
trails = get_trails(seed, sorted_pattern, stats, charge_range, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
for index, trail in enumerate(trails):
if len(trail) >= longest_trace: # Needs to be longer than the current champion
arr = int_list_to_array(trail)
intensity_profile = stats[arr][:,2]
seedpos = np.nonzero(arr==seed_global)[0][0]
# truncate around the seed...
arr = truncate(arr, intensity_profile, seedpos, iso_split_level)
intensity_profile = stats[arr][:,2]
# Remove lower masses:
# Take the index of the maximum and remove all masses on the left side
if charge_range[index] * stats[seed_global, 0] < 1000:
maxpos = np.argmax(intensity_profile)
arr = arr[maxpos:]
intensity_profile = stats[arr][:,2]
if (len(arr) > longest_trace) | ((len(arr) == longest_trace) & (intensity_profile.sum() > champion_intensity)):
# Averagine check
cc = check_averagine(stats, arr, charge_range[index], averagine_aa, isotopes)
if cc > 0.6:
# Update the champion
champion_trace = arr
champion_charge = charge_range[index]
longest_trace = len(arr)
champion_intensity = intensity_profile.sum()
return champion_trace, champion_charge
if False:
def test_get_isotope_patterns():
test_centroids = [
[
(300, 50, 1, 1),
(300.501, 40, 1, 1),
(301.003, 30, 1, 1),
(301.504, 20, 1, 1),
(302.006, 10, 1, 1),
],
[
(300, 50, 2, 2),
(300.501, 40, 2, 2),
(301.003, 30, 2, 2),
(301.504, 20, 2, 2),
(302.006, 10, 2, 2),
],
[
(300, 50, 3, 3),
(300.501, 40, 3, 3),
(301.003, 30, 3, 3),
(301.504, 20, 3, 3),
(302.006, 10, 3, 3),
],
[
(300, 50, 4, 4),
(300.501, 40, 4, 4),
(301.003, 30, 4, 4),
(301.504, 20, 4, 4),
(302.006, 10, 4, 4),
],
[
(300, 50, 5, 5),
(300.501, 40, 5, 5),
(301.003, 30, 5, 5),
(301.504, 20, 5, 5),
(302.006, 10, 5, 5),
],
[(400, 10, 6, 6), (401, 10, 6, 6), (402, 10, 6, 6)],
[(400, 10, 7, 7), (401, 10, 7, 7), (402, 10, 7, 7)],
[(400, 10, 8, 8), (401, 10, 8, 8), (402, 10, 8, 8)],
[(400, 10, 9, 9), (401, 10, 9, 9), (402, 10, 9, 9)],
]
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
test_centroids_tmp = [np.array(_, dtype=centroid_dtype) for _ in test_centroids]
test_centroids = List([_ for _ in test_centroids_tmp])
test_hills = get_hills(test_centroids)
sorted_hills, stats, data, hill_data, hill_ptrs = get_hill_data(test_hills, test_centroids)
pre_patterns = get_edges(stats, data)
isotope_patterns, isotope_charges = get_isotope_patterns(pre_patterns, stats, data, averagine_aa, isotopes)
assert np.all(isotope_patterns[0] == np.array([0, 1, 2, 3, 4]))
assert isotope_charges[0] == 2
assert np.all(isotope_patterns[1] == np.array([5,6,7]))
assert isotope_charges[1] == 1
test_get_isotope_patterns()
#export
from numba.typed import List
from typing import Callable, Union
def get_isotope_patterns(pre_isotope_patterns:list, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, averagine_aa:Dict, isotopes:Dict, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:float = 5, iso_n_seeds:int = 100, cc_cutoff:float=0.6, iso_split_level:float = 1.3, callback:Union[Callable, None]=None) -> (np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to iterate over pre_isotope_patterns.
Args:
pre_isotope_patterns (list): List of pre-isotope patterns.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
iso_n_seeds (int, optional): Number of isotope seeds. Defaults to 100.
cc_cutoff (float, optional): Cuttoff for correlation.. Defaults to 0.6.
iso_split_level (float, optional): Isotope split level.. Defaults to 1.3.
callback (Union[Callable, None], optional): Callback function for progress. Defaults to None.
Returns:
list: List of isotope patterns.
np.ndarray: Iso idx.
np.ndarray: Array containing isotope charges.
"""
isotope_patterns = []
isotope_charges = []
charge_range = List()
for i in range(iso_charge_min, iso_charge_max + 1):
charge_range.append(i)
isotope_patterns = []
isotope_charges = []
for idx, pre_pattern in enumerate(pre_isotope_patterns):
extract = True
while extract:
isotope_pattern, isotope_charge = isolate_isotope_pattern(np.array(pre_pattern), hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, iso_mass_range, charge_range, averagine_aa, isotopes, iso_n_seeds, cc_cutoff, iso_split_level)
if isotope_pattern is None:
length = 0
else:
length = len(isotope_pattern)
if length > 1:
isotope_charges.append(isotope_charge)
isotope_patterns.append(isotope_pattern)
pre_pattern = [_ for _ in pre_pattern if _ not in isotope_pattern]
if len(pre_pattern) <= 1:
extract = False
else:
extract = False
if callback:
callback((idx+1)/len(pre_isotope_patterns))
iso_patterns = np.zeros(sum([len(_) for _ in isotope_patterns]), dtype=np.int64)
iso_idx = np.zeros(len(isotope_patterns)+1, dtype='int')
start = 0
for idx, _ in enumerate(isotope_patterns):
iso_patterns[start:start+len(_)] = _
start += len(_)
iso_idx[idx+1] = start
return iso_patterns, iso_idx, np.array(isotope_charges)
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def report_(idx:np.ndarray, isotope_charges:list, isotope_patterns:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, results:np.ndarray):
"""Function to extract summary statstics from a list of isotope patterns and charges.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
results (np.ndarray): Recordarray with isotope pattern summary statistics.
"""
pattern = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
isotope_data = stats[pattern]
mz = np.min(isotope_data[:, 0])
mz_std = np.mean(isotope_data[:, 1])
charge = isotope_charges[idx]
mass = mz_to_mass(mz, charge)
int_max_idx = np.argmax(isotope_data[:, 2])
mz_most_abundant = isotope_data[:, 0][int_max_idx]
int_max = isotope_data[:,2][int_max_idx]
rt_start = isotope_data[int_max_idx, 4] # This is the start of the most abundant trace
rt_end = isotope_data[int_max_idx, 5]
# better measurement of the peak with interpolation
rt_min_ = min(isotope_data[:, 4])
rt_max_ = max(isotope_data[:, 5])
rt_range = np.linspace(rt_min_, rt_max_, 100)
trace_sum = np.zeros_like(rt_range)
for k in pattern:
x = sortindex_[k]
start = hill_ptrs[x]
end = hill_ptrs[x + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
rts = rt_[rt_idx[idx_]]
interpolation = np.interp(rt_range, rts, int_)
#Filter
interpolation[:(rt_range < rts[0]).sum()] = 0
right_cut = (rt_range > rts[-1]).sum()
if right_cut > 0:
interpolation[-right_cut:]= 0
trace_sum += interpolation
rt_apex_idx = trace_sum.argmax()
rt_apex = rt_range[rt_apex_idx]
trace = trace_sum
half_max = trace.max()/2
if rt_apex_idx == 0:
left_apex = 0
else:
left_apex = np.abs(trace[:rt_apex_idx]-half_max).argmin()
right_apex = np.abs(trace[rt_apex_idx:]-half_max).argmin()+rt_apex_idx
int_apex = trace_sum[rt_apex_idx]
fwhm = rt_range[right_apex] - rt_range[left_apex]
n_isotopes = len(pattern)
rt_cutoff = 0.95 #5%
if rt_apex_idx == 0:
rt_min_idx = 0
else:
rt_min_idx = np.abs(trace[:rt_apex_idx]-trace.max()*(1-rt_cutoff)).argmin()
rt_max_idx = np.abs(trace[rt_apex_idx:]-trace.max()*(1-rt_cutoff)).argmin()+rt_apex_idx
#plt.xlabel('rt')
#plt.ylabel('int')
#plt.show()
#plt.plot(rt_range, trace_sum)
#plt.plot([rt_range[left_apex], rt_range[right_apex]], [(trace[left_apex] + trace[right_apex])/2]*2, 'k:')
#plt.plot(rt_range[rt_apex_idx], trace[rt_apex_idx], 'k*')
#plt.plot(rt_range[rt_min_idx], trace[rt_min_idx], 'k*')
#plt.plot(rt_range[rt_max_idx], trace[rt_max_idx], 'k*')
#plt.show()
rt_start = rt_range[rt_min_idx]
rt_end = rt_range[rt_max_idx]
int_area = np.abs(np.trapz(trace_sum[rt_min_idx:rt_max_idx], rt_range[rt_min_idx:rt_max_idx]))
int_sum = trace_sum.sum()
results[idx,:] = np.array([mz, mz_std, mz_most_abundant, charge, rt_start, rt_apex, rt_end, fwhm, n_isotopes, mass, int_apex, int_area, int_sum])
#export
import pandas as pd
def feature_finder_report(query_data:dict, isotope_patterns:list, isotope_charges:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray)->pd.DataFrame:
"""Creates a report dataframe with summary statistics of the found isotope patterns.
Args:
query_data (dict): Data structure containing the query data.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to the isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
Returns:
pd.DataFrame: DataFrame with isotope pattern summary statistics.
"""
rt_ = np.array(query_data['rt_list_ms1'])
indices_ = np.array(query_data['indices_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
rt_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
results = np.zeros((len(isotope_charges), 13))
report_(range(len(isotope_charges)), isotope_charges, isotope_patterns, iso_idx, stats, sortindex_, hill_ptrs, hill_data, int_data, rt_, rt_idx, results)
df = pd.DataFrame(results, columns = ['mz','mz_std','mz_most_abundant','charge','rt_start','rt_apex','rt_end','fwhm','n_isotopes','mass','int_apex','int_area', 'int_sum'])
df.sort_values(['rt_start','mz'])
return df
###Output
_____no_output_____
###Markdown
Data OutputFor each feature that is found we extract summary statistics and put it in tabular form to be used as as pandas dataframe. PlottingFor quality control reasons we also employ a function to plot a feature in its local environment.
###Code
#export
def plot_isotope_pattern(index:int, df:pd.DataFrame, sorted_stats:np.ndarray, centroids:np.ndarray, scan_range:int=100, mz_range:float=2, plot_hills:bool = False):
"""Plot an isotope pattern in its local environment.
Args:
index (int): Index to the pattern.
df (pd.DataFrame): Pandas DataFrame containing the patterns.
sorted_stats (np.ndarray): Stats array that contains summary statistics of hills.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
scan_range (int, optional): Scan range to plot. Defaults to 100.
mz_range (float, optional): MZ range to plot. Defaults to 2.
plot_hills (bool, optional): Flag to plot hills. Defaults to False.
"""
markersize = 10
plot_offset_mz = 1
plot_offset_rt = 2
feature = df.loc[index]
scan = rt_dict[feature['rt_apex']]
start_scan = scan-scan_range
end_scan = scan+scan_range
mz_min = feature['mz']-mz_range-plot_offset_mz
mz_max = feature['mz']+mz_range+plot_offset_mz
sub_data = np.hstack(centroids[start_scan:end_scan])
selection = sub_data[(sub_data['mz']>mz_min) & (sub_data['mz']<mz_max)]
min_rt = selection['rt'].min() - plot_offset_rt
max_rt = selection['rt'].max() + plot_offset_rt
hill_selection = sorted_stats[(sorted_stats['mz_avg']>mz_min) & (sorted_stats['mz_avg']<mz_max) & (sorted_stats['rt_max']<max_rt) & (sorted_stats['rt_min']>min_rt)]
plt.style.use('dark_background')
plt.figure(figsize=(15,15))
plt.scatter(selection['rt'], selection['mz'], c= np.log(selection['int']), marker='s', s=markersize, alpha=0.9)
plt.colorbar()
plt.grid(False)
plt.xlabel('RT (min)')
plt.ylabel('m/z')
box_height = mz_range/50
if plot_hills:
for hill in hill_selection:
bbox = [hill['rt_min'], hill['mz_avg']-box_height, hill['rt_max'], hill['mz_avg']+box_height]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='w', linewidth=1, alpha = 0.3)
plt.gca().add_patch(rect)
feature_selection = df[(df['mz']>mz_min) & (df['mz']<mz_max) & (df['rt_end']<max_rt) & (df['rt_start']>min_rt)]
for f_idx in feature_selection.index:
for c_idx in range(len(sorted_stats[isotope_patterns[f_idx]])-1):
start = sorted_stats[isotope_patterns[f_idx]][c_idx]
end = sorted_stats[isotope_patterns[f_idx]][c_idx+1]
start_mass = start['mz_avg']
start_rt = (start['rt_min']+start['rt_max'])/2
end_mass = end['mz_avg']
end_rt = (end['rt_min']+end['rt_max'])/2
plt.plot([start_rt, end_rt], [start_mass, end_mass], '+', color='y')
plt.plot([start_rt, end_rt], [start_mass, end_mass], ':', color='y')
if plot_hills:
for hill_idx in isotope_patterns[f_idx]:
hill = sorted_stats[hill_idx]
bbox = [hill['rt_min'], hill['mz_avg']-box_height, hill['rt_max'], hill['mz_avg']+box_height]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=1, alpha = 0.8)
plt.gca().add_patch(rect)
plt.xlim([min_rt+plot_offset_rt, max_rt-plot_offset_rt])
plt.ylim([mz_min+plot_offset_mz, mz_max-plot_offset_mz])
plt.title('Pattern')
plt.show()
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
External Feature FinderTo utilize the command-line Feature Finder from Bruker `4DFF-3.13` - `uff-cmdline2.exe`, we call it via a subprocess and wait until completion.
###Code
#export
import subprocess
import os
import platform
def extract_bruker(file:str, base_dir:str = "ext/bruker/FF", config:str = "proteomics_4d.config"):
"""Call Bruker Feautre Finder via subprocess.
Args:
file (str): Filename for feature finding.
base_dir (str, optional): Base dir where the feature finder is stored.. Defaults to "ext/bruker/FF".
config (str, optional): Config file for feature finder. Defaults to "proteomics_4d.config".
Raises:
NotImplementedError: Unsupported operating system.
FileNotFoundError: Feature finder not found.
FileNotFoundError: Config file not found.
FileNotFoundError: Feature file not found.
"""
feature_path = file + '/'+ os.path.split(file)[-1] + '.features'
base_dir = os.path.join(os.path.dirname(__file__), base_dir)
operating_system = platform.system()
if operating_system == 'Linux':
ff_dir = os.path.join(base_dir, 'linux64','uff-cmdline2')
logging.info('Using Linux FF')
elif operating_system == 'Windows':
ff_dir = os.path.join(base_dir, 'win64','uff-cmdline2.exe')
logging.info('Using Windows FF')
else:
raise NotImplementedError(f"System {operating_system} not supported.")
if os.path.exists(feature_path):
return feature_path
else:
if not os.path.isfile(ff_dir):
raise FileNotFoundError(f'Bruker feature finder cmd not found here {ff_dir}.')
config_path = base_dir + '/'+ config
if not os.path.isfile(config_path):
raise FileNotFoundError(f'Config file not found here {config_path}.')
if operating_system == 'Windows':
FF_parameters = [ff_dir,'--ff 4d',f'--readconfig "{config_path}"', f'--analysisDirectory "{file}"']
process = subprocess.Popen(' '.join(FF_parameters), stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, b''):
logtxt = line.decode('utf8')
logging.info(logtxt[48:].rstrip()) #Remove logging info from FF
elif operating_system == 'Linux':
FF_parameters = [
ff_dir,
'--ff',
'4d',
'--readconfig',
config_path,
'--analysisDirectory',
file
]
process = subprocess.run(FF_parameters, stdout=subprocess.PIPE)
if os.path.exists(feature_path):
return feature_path
else:
raise FileNotFoundError(f"Feature file {feature_path} does not exist.")
import sqlalchemy as db
def convert_bruker(feature_path:str)->pd.DataFrame:
"""Reads feature table and converts to feature table to be used with AlphaPept.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
feature_table = pd.read_sql_table('LcTimsMsFeature', engine_featurefile)
from alphapept.constants import mass_dict
M_PROTON = mass_dict['Proton']
feature_table['Mass'] = feature_table['MZ'].values * feature_table['Charge'].values - feature_table['Charge'].values*M_PROTON
feature_table = feature_table.rename(columns={"MZ": "mz","Mass": "mass", "RT": "rt_apex", "RT_lower":"rt_start", "RT_upper":"rt_end", "Mobility": "mobility", "Mobility_lower": "mobility_lower", "Mobility_upper": "mobility_upper", "Charge":"charge","Intensity":'int_sum',"ClusterCount":'n_isotopes'})
feature_table['rt_apex'] = feature_table['rt_apex']/60
feature_table['rt_start'] = feature_table['rt_start']/60
feature_table['rt_end'] = feature_table['rt_end']/60
return feature_table
def map_bruker(feature_path:str, feature_table:pd.DataFrame, query_data:dict)->pd.DataFrame:
"""Map Ms1 to Ms2 via Table FeaturePrecursorMapping from Bruker FF.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
feature_table (pd.DataFrame): Pandas DataFrame containing the features.
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
mapping = pd.read_sql_table('FeaturePrecursorMapping', engine_featurefile)
mapping = mapping.set_index('PrecursorId')
feature_table= feature_table.set_index('Id')
query_prec_id = query_data['prec_id']
#Now look up the feature for each precursor
mass_matched = []
mz_matched = []
rt_matched = []
query_idx = []
f_idx = []
for idx, prec_id in tqdm(enumerate(query_prec_id)):
try:
f_id = mapping.loc[prec_id]['FeatureId']
all_matches = feature_table.loc[f_id]
if type(f_id) == np.int64:
match = all_matches
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
else:
for k in range(len(all_matches)):
match = all_matches.iloc[k]
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
except KeyError:
pass
features = pd.DataFrame(np.array([mass_matched, mz_matched, rt_matched, query_idx, f_idx]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched', 'query_idx', 'feature_idx'])
features['query_idx'] = features['query_idx'].astype('int')
return features
###Output
_____no_output_____
###Markdown
Wrapper
###Code
#export
import numpy as np
import logging
import os
from alphapept.search import query_data_to_features
import alphapept.io
import functools
def find_features(to_process:tuple, callback:Union[Callable, None] = None, parallel:bool = False)-> Union[str, bool]:
"""Wrapper for feature finding.
Args:
to_process (tuple): to_process tuple, to be used from a proces spool.
callback (Union[Callable, None], optional): Optional callback function. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Currently unused. Defaults to False.
Raises:
NotImplementedError: Error if the file extension is not understood.
Returns:
Union[str, bool]: Returns true if function was sucessfull, otherwise the exception as string.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base, ext = os.path.splitext(file_name)
if ext.lower() == '.raw':
datatype='thermo'
elif ext.lower() == '.d':
datatype='bruker'
elif ext.lower() == '.mzml':
datatype='mzml'
else:
raise NotImplementedError('File extension {} not understood.'.format(ext))
out_file = f"{base}.ms_data.hdf"
skip = True
if os.path.isfile(out_file):
try:
alphapept.io.MS_Data_File(
out_file
).read(dataset_name="features")
logging.info(
'Found *.hdf with features for {}'.format(out_file)
)
except KeyError:
logging.info(
'No *.hdf file with features found for {}. Adding to feature finding list.'.format(out_file)
)
skip = False
if not skip:
ms_file = alphapept.io.MS_Data_File(out_file, is_read_only=False)
query_data = ms_file.read_DDA_query_data()
if not settings['workflow']["find_features"]:
features = query_data_to_features(query_data)
else:
if datatype in ['thermo','mzml']:
from alphapept.constants import averagine_aa, isotopes
f_settings = settings['features']
max_gap = f_settings['max_gap']
centroid_tol = f_settings['centroid_tol']
hill_split_level = f_settings['hill_split_level']
iso_split_level = f_settings['iso_split_level']
window = f_settings['hill_smoothing']
hill_check_large = f_settings['hill_check_large']
iso_charge_min = f_settings['iso_charge_min']
iso_charge_max = f_settings['iso_charge_max']
iso_n_seeds = f_settings['iso_n_seeds']
hill_nboot_max = f_settings['hill_nboot_max']
hill_nboot = f_settings['hill_nboot']
iso_mass_range = f_settings['iso_mass_range']
iso_corr_min = f_settings['iso_corr_min']
logging.info('Feature finding on {}'.format(file_name))
logging.info(f'Hill extraction with centroid_tol {centroid_tol} and max_gap {max_gap}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, centroid_tol)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
logging.info(f'Repeating hill extraction with centroid_tol {score_median+score_std*3:.2f}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, score_median+score_std*3)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
int_data = np.array(query_data['int_list_ms1'])
hill_ptrs = split_hills(hill_ptrs, hill_data, int_data, hill_split_level=hill_split_level, window = window) #hill lenght is inthere already
logging.info(f'After split hill_ptrs {len(hill_ptrs):,}')
hill_data, hill_ptrs = filter_hills(hill_data, hill_ptrs, int_data, hill_check_large =hill_check_large, window=window)
logging.info(f'After filter hill_ptrs {len(hill_ptrs):,}')
stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs = get_hill_data(query_data, hill_ptrs, hill_data, hill_nboot_max = hill_nboot_max, hill_nboot = hill_nboot)
logging.info('Extracting hill stats complete')
pre_isotope_patterns = get_pre_isotope_patterns(stats, idxs_upper, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, maximum_offset, iso_charge_min=iso_charge_min, iso_charge_max=iso_charge_max, iso_mass_range=iso_mass_range, cc_cutoff=iso_corr_min)
logging.info('Found {:,} pre isotope patterns.'.format(len(pre_isotope_patterns)))
isotope_patterns, iso_idx, isotope_charges = get_isotope_patterns(pre_isotope_patterns, hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, averagine_aa, isotopes, iso_charge_min = iso_charge_min, iso_charge_max = iso_charge_max, iso_mass_range = iso_mass_range, iso_n_seeds = iso_n_seeds, cc_cutoff = iso_corr_min, iso_split_level=iso_split_level, callback=None)
logging.info('Extracted {:,} isotope patterns.'.format(len(isotope_charges)))
feature_table = feature_finder_report(query_data, isotope_patterns, isotope_charges, iso_idx, stats, sortindex_, hill_ptrs, hill_data)
logging.info('Report complete.')
elif datatype == 'bruker':
logging.info('Feature finding on {}'.format(file_name))
feature_path = extract_bruker(file_name)
feature_table = convert_bruker(feature_path)
logging.info('Bruker featurer finder complete. Extracted {:,} features.'.format(len(feature_table)))
# Calculate additional params
feature_table['rt_length'] = feature_table['rt_end'] - feature_table['rt_start']
feature_table['rt_right'] = feature_table['rt_end'] - feature_table['rt_apex']
feature_table['rt_left'] = feature_table['rt_apex'] - feature_table['rt_start']
feature_table['rt_tail'] = feature_table['rt_right'] / feature_table['rt_left']
logging.info('Matching features to query data.')
if 'mono_mzs2' not in query_data.keys():
logging.info('No MS2-data to match.')
features = pd.DataFrame()
else:
features = map_ms2(feature_table, query_data, **settings['features'])
logging.info('Saving feature table.')
ms_file.write(feature_table, dataset_name="feature_table")
logging.info('Feature table saved to {}'.format(out_file))
logging.info('Saving features.')
ms_file.write(features, dataset_name="features")
logging.info(f'Feature finding of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Feature finding of file {file_name} failed. Exception {e}')
return f"{e}" #Can't return exception object, cast as string
###Output
_____no_output_____
###Markdown
MappingMapping MS1 to MS2
###Code
#export
from sklearn.neighbors import KDTree
import pandas as pd
import numpy as np
def replace_infs(array:np.ndarray)->np.ndarray:
"""Replace nans and infs with 0
Args:
array (np.ndarray): Input array.
Returns:
np.ndarray: Output array without nans and infs.
"""
array[array == -np.inf] = 0
array[array == np.inf] = 0
array[np.isnan(array)] = 0
return array
def map_ms2(feature_table:pd.DataFrame, query_data:dict, map_mz_range:float = 1, map_rt_range:float = 0.5, map_mob_range:float = 0.3, map_n_neighbors:int=5, search_unidentified:bool = False, **kwargs)->pd.DataFrame:
"""Map MS1 features to MS2 based on rt and mz.
If ccs is included also add.
Args:
feature_table (pd.DataFrame): Pandas DataFrame with features.
query_data (dict): Data structure containing the query data.
map_mz_range (float, optional): Mapping range for mz (Da). Defaults to 1.
map_rt_range (float, optional): Mapping range for rt (min). Defaults to 0.5.
map_mob_range (float, optional): Mapping range for mobility (%). Defaults to 0.3.
map_n_neighbors (int, optional): Maximum number of neighbors to be extracted. Defaults to 5.
search_unidentified (bool, optional): Flag to perform search on features that have no isotope pattern. Defaults to False.
Returns:
pd.DataFrame: Table with features.
"""
feature_table['rt'] = feature_table['rt_apex']
range_dict = {}
range_dict['mz'] = ('mono_mzs2', map_mz_range)
range_dict['rt'] = ('rt_list_ms2', map_rt_range)
range_dict['mobility'] = ('mobility', map_mob_range)
query_dict = {}
query_dict['rt'] = 'rt_list_ms2'
query_dict['mass'] = 'prec_mass_list2'
query_dict['mz'] = 'mono_mzs2'
query_dict['charge'] = 'charge2'
query_dict['mobility'] = 'mobility'
if 'mobility' not in feature_table.columns:
del range_dict['mobility']
del query_dict['mobility']
use_mob = False
else:
use_mob = True
tree_points = feature_table[list(range_dict.keys())].values
for i, key in enumerate(range_dict):
tree_points[:,i] = tree_points[:,i]/range_dict[key][1]
matching_tree = KDTree(tree_points, metric="minkowski")
ref_points = np.array([query_data[range_dict[_][0]] / range_dict[_][1] for _ in range_dict]).T
ref_points = replace_infs(ref_points)
dist, idx = matching_tree.query(ref_points, k=map_n_neighbors)
ref_matched = np.zeros(ref_points.shape[0], dtype=np.bool_)
all_df = []
for neighbor in range(map_n_neighbors):
ref_df = pd.DataFrame(np.array([query_data[query_dict[_]] for _ in query_dict]).T, columns = query_dict.keys())
for _ in query_dict:
ref_df[_+'_matched'] = feature_table.iloc[idx[:,neighbor]][_].values
ref_df[_+'_offset'] = ref_df[_+'_matched'] - ref_df[_]
ref_df['query_idx'] = ref_df.index
ref_df['feature_idx'] = idx[:,neighbor]
for field in ['int_sum','int_apex','rt_start','rt_apex','rt_end','fwhm','mobility_lower','mobility_upper']:
if field in feature_table.keys():
ref_df[field] = feature_table.iloc[idx[:,neighbor]][field].values
rt_check = (ref_df['rt_start'] <= ref_df['rt']) & (ref_df['rt'] <= ref_df['rt_end'])
# check isolation window (win=3)
mass_check = np.abs(ref_df['mz_offset'].values) <= 3
_check = rt_check & mass_check
if use_mob:
mob_check = (ref_df['mobility_lower'] <= ref_df['mobility']) & (ref_df['mobility'] <= ref_df['mobility_upper'])
_check &= mob_check
ref_matched |= _check
ref_df['dist'] = dist[:,neighbor]
ref_df = ref_df[_check]
all_df.append(ref_df)
if search_unidentified:
if use_mob:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2'], query_data['mobility']]).T, columns=['rt', 'mass', 'mz', 'charge','mobility'])
else:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2']]).T, columns=['rt', 'mass', 'mz', 'charge'])
unmatched_ref = unmatched_ref[~ref_matched]
unmatched_ref['mass_matched'] = unmatched_ref['mass']
unmatched_ref['mass_offset'] = 0
unmatched_ref['rt_matched'] = unmatched_ref['rt']
unmatched_ref['rt_offset'] = 0
unmatched_ref['mz_matched'] = unmatched_ref['mz']
unmatched_ref['mz_offset'] = 0
unmatched_ref['charge_matched'] = unmatched_ref['charge']
unmatched_ref['query_idx'] = unmatched_ref.index
unmatched_ref['feature_idx'] = np.nan
if use_mob:
ref_df['mobility_matched'] = unmatched_ref['mobility']
ref_df['mobility_offset'] = np.nan
for field in ['int_sum','int_apex','rt_start','rt_apex','rt_end','fwhm']:
if field in feature_table.keys():
unmatched_ref[field] = np.nan
unmatched_ref['dist'] = np.nan
all_df.append(unmatched_ref)
features = pd.concat(all_df)
features = features.sort_values('mass_matched', ascending=True)
features = features.reset_index(drop=True)
return features
#hide
from nbdev.showdoc import *
#hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_settings.ipynb.
Converted 01_chem.ipynb.
Converted 02_io.ipynb.
Converted 03_fasta.ipynb.
Converted 04_feature_finding.ipynb.
Converted 05_search.ipynb.
Converted 06_score.ipynb.
Converted 07_recalibration.ipynb.
Converted 08_quantification.ipynb.
Converted 09_matching.ipynb.
Converted 10_constants.ipynb.
Converted 11_interface.ipynb.
Converted 12_performance.ipynb.
Converted 13_export.ipynb.
Converted additional_code.ipynb.
Converted contributing.ipynb.
Converted file_formats.ipynb.
Converted index.ipynb.
###Markdown
Feature Finding> Functions related to feature finding This part describes the implementation of the feature-finding algorithm. The core of the algorithm is described in the [MaxQuant-Paper](https://www.nature.com/articles/nbt.1511).The supplementary material explains the underlying methodology in great detail and is the foundation of the theoretical background that is described here.A refined version of the algorithm was presented with [Dinosaur](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4933939/), which was also used as a reference for the Python implementation.For the algorithm, we need serval modules:1. Connecting Centroids to Hills2. Refinement of Hills3. Calculating Hill Statistics4. Combining Hills to Isotope Patterns5. Deconvolution of Isotope Patterns Loading DataFrom the `IO` library, we already have an `*.ms_data.hdf` container that contains centroided data. To use it in feature finding, we directly load the data. Connecting Centroids to Hills> Note: Feature finding relies heavily on the performance function decorator from the performance notebook: `@alphapept.performance.performance_function`. Part of this is that the functions will not have return values to be GPU compatible. Please check out this notebook for further information. Connecting centroidsFeature finding starts with connecting centroids. For this we look at subsequent scans and compare peaks that are withing a defined mass tolerance (`centroid_tol`). Imagine you have three scans with the following centroids:* Scan 0: 10, 20, 30* Scan 1: 10.2, 40.1* Scan 2: 40, 50, 60When comparing consecutive scans and defining the maximum delta mass to be 0.5 find the following connections: (Scan No, Centroid No) -> (Scan No, Centroid No). As we cannot easily store tuples in the matrix, we convert tuple containing the position of the connected centroid to an integer.* (0,0) -> (1,0) -> (3): 10 & 10.2 -> delta = 0.2* (1,1) -> (2,0) -> (6): 40.1 & 40 -> delta = 0.1Finally, we store this in the `results` matrix:$\begin{bmatrix}3 & -1 & -1 \\ -1 & 6 & -1\\ -1 & -1 & -1 \end{bmatrix}$The coressponding `scores` matrix will look as follows:$\begin{bmatrix}0.2 & -1 & -1 \\ -1 & 0.1 & -1\\ -1 & -1 & -1 \end{bmatrix}$This allows us to not only easily store connections between centroids but also perform a quick lookup for the delta of an existing connection. Note that it also only stores the best connection for each centroid. To extract the connected centroids, we can use `np.where(results >= 0)`. This implementation allows getting millions of connections within seconds. As we are also allowing gaps, refering to that we can have connections between Scan 0 and Scan 2, we make the aforementioned matrix multdimensional, so that e.g. a first matrix stores the conncetions for no gap, the second matrix the connections with a gap of 1.The functionality for this step is implemented in `connect_centroids_unidirection` and the wrapper `find_centroid_connections`.
###Code
#export
import numpy as np
import alphapept.performance
#This function is tested by being called from find_centroid_connections
@alphapept.performance.performance_function
def connect_centroids_unidirection(x:np.ndarray, row_borders:np.ndarray, connections:np.ndarray, scores:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Connect centroids.
Args:
x (np.ndarray): Index to datapoint. Note that this using the performance_function, so one passes an ndarray.
row_borders (np.ndarray): Row borders of the centroids array.
connections (np.ndarray): Connections matrix to store the connections
scores (np.ndarray): Score matrix to store the connections
centroids (np.ndarray): 1D Array containing the masses of the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
for gap in range(max_gap + 1):
y = x + gap + 1
if y >= row_borders.shape[0]:
return
start_index_f = 0
if x > 0:
start_index_f = row_borders[x - 1]
centroids_1 = centroids[start_index_f: row_borders[x]]
start_index_b = row_borders[y - 1]
centroids_2 = centroids[start_index_b: row_borders[y]]
i = 0
j = 0
while (i < len(centroids_1)) & (j < len(centroids_2)):
mz1, mz2 = centroids_1[i], centroids_2[j]
diff = mz1 - mz2
mz_sum = mz1 + mz2
delta = 2 * 1e6 * abs(diff) / mz_sum
if delta < centroid_tol:
if scores[x, i, gap] > delta:
scores[x, i, gap] = delta
connections[x, i, gap] = (connections.shape[1] * y) + j
if diff > 0:
j += 1
else:
i += 1
def find_centroid_connections(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Wrapper function to call connect_centroids_unidirection
Args:
rowwise_peaks (np.ndarray): Length of centroids with respect to the row borders.
row_borders (np.ndarray): Row borders of the centroids array.
centroids (np.ndarray): Array containing the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
max_centroids = int(cupy.max(rowwise_peaks))
spectra_cnt = len(row_borders) - 1
connections = cupy.full((spectra_cnt, max_centroids, max_gap + 1), -1, dtype=np.int32)
score = cupy.full((spectra_cnt, max_centroids, max_gap + 1), np.inf)
connect_centroids_unidirection(range(len(row_borders)),
row_borders,
connections,
score,
centroids,
max_gap,
centroid_tol)
score = score[cupy.where(score < np.inf)]
score_median = cupy.median(score)
score_std = cupy.std(score)
del score, max_centroids, spectra_cnt
c_shape = connections.shape
from_r, from_c, from_g = cupy.where(connections >= 0)
to_r = connections[from_r, from_c, from_g] // c_shape[1]
to_c = connections[from_r, from_c, from_g] - to_r * c_shape[1]
del connections, from_g
return from_r, from_c, to_r, to_c, score_median, score_std
#hide
def test_find_centroid_connections():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 40.1, 40, 50, 60])
centroid_tol = 0.5*1e6
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_r, np.array([0, 0, 1, 1])) #e.g. 0,0 is connected to 0,1 -> 10 to 10.2
assert np.allclose(from_c, np.array([0, 2, 1, 2]))
assert np.allclose(to_r, np.array([1, 1, 2, 2]))
assert np.allclose(to_c, np.array([0, 1, 0, 0]))
test_find_centroid_connections()
###Output
_____no_output_____
###Markdown
We wrap the centroid connections in the function `connect_centroids`. This function converts the connections into an usable array.
###Code
#export
#the performance functions are tested with the wrapper function connect_centroids
@alphapept.performance.performance_function
def convert_connections_to_array(x:np.ndarray, from_r:np.ndarray, from_c:np.ndarray, to_r:np.ndarray, to_c:np.ndarray, row_borders:np.ndarray, out_from_idx:np.ndarray, out_to_idx:np.ndarray):
"""Convert integer indices of a matrix to coordinates.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_r (np.ndarray): From array with row coordinates.
from_c (np.ndarray): From array with column coordinates.
to_r (np.ndarray): To array with row coordinates.
to_c (np.ndarray): To array with column coordinates.
row_borders (np.ndarray): Row borders (for indexing).
out_from_idx (np.ndarray): Reporting array: 1D index from.
out_to_idx (np.ndarray): Reporting array: 1D index to.
"""
row = from_r[x]
col = from_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_from_idx[x] = start_index_f + col
row = to_r[x]
col = to_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_to_idx[x] = start_index_f + col
@alphapept.performance.performance_function
def eliminate_overarching_vertex(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray):
"""Eliminate overacrhing vertex.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
"""
if x == 0:
return
if from_idx[x - 1] == from_idx[x]:
to_idx[x] = -1
def connect_centroids(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, float, float):
"""Function to connect centroids.
Args:
rowwise_peaks (np.ndarray): Indexes for centroids.
row_borders (np.ndarray): Row borders (for indexing).
centroids (np.ndarray): Centroid data.
max_gap: Maximum gap.
centroid_tol: Centroid tol for matching centroids.
Returns:
np.ndarray: From index.
np.ndarray: To index.
float: Median score.
float: Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks,
row_borders,
centroids,
max_gap,
centroid_tol)
from_idx = cupy.zeros(len(from_r), np.int32)
to_idx = cupy.zeros(len(from_r), np.int32)
convert_connections_to_array(range(len(from_r)),
from_r,
from_c,
to_r,
to_c,
row_borders,
from_idx,
to_idx)
eliminate_overarching_vertex(range(len(from_idx)), from_idx, to_idx)
relavent_idx = cupy.where(to_idx >= 0)
from_idx = cupy.take(from_idx, relavent_idx)[0]
to_idx = cupy.take(to_idx, relavent_idx)[0]
del from_r, from_c, to_r, to_c, relavent_idx
return from_idx, to_idx, score_median, score_std
#Sample snippet to show centroid conncetions
import matplotlib.pyplot as plt
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
scan_no = np.array([0, 0, 0, 1, 1, 2, 2, 2])
plt.figure(figsize=(5,5))
for i, _ in enumerate(row_borders):
ctrd = centroids[_-rowwise_peaks[i]:_]
plt.plot(ctrd, np.ones_like(ctrd)*i, 'o')
for i, _ in enumerate(from_idx):
from_ = _
to_ = to_idx[i]
plt.plot([centroids[from_], centroids[to_]], [scan_no[from_], scan_no[to_]], 'k:')
plt.ylabel('scan')
plt.xlabel('m/z')
plt.ylim(len(row_borders)+0.5, -1.5)
plt.title('Peak connections')
plt.show()
#hide
def test_connect_centroids():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_idx, np.array([0, 1, 2]))
assert np.allclose(to_idx, np.array([3, 4, 6]))
test_connect_centroids()
###Output
_____no_output_____
###Markdown
Extracting hills.To extract hills we extract connected components from the connections.
###Code
#export
@alphapept.performance.performance_function
def path_finder(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, forward:np.ndarray, backward:np.ndarray):
"""Extracts path information and writes to path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): Array containing from indices.
to_idx (np.ndarray): Array containing to indices.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
"""
fr = from_idx[x]
to = to_idx[x]
forward[fr] = to
backward[to] = fr
@alphapept.performance.performance_function
def find_path_start(x:np.ndarray, forward:np.ndarray, backward:np.ndarray, path_starts:np.ndarray):
"""Function to find the start of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
path_starts (np.ndarray): Array to report path starts.
"""
if forward[x] > -1 and backward[x] == -1:
path_starts[x] = 0
@alphapept.performance.performance_function
def find_path_length(x:np.ndarray, path_starts:np.ndarray, forward:np.ndarray, path_cnt:np.ndarray):
"""Function to extract the length of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forward (np.ndarray): Array that stores forward information.
path_cnt (np.ndarray): Reporting array to count the paths.
"""
ctr = 1
idx = path_starts[x]
while forward[idx] > -1:
ctr += 1
idx = forward[idx]
path_cnt[x] = ctr
@alphapept.performance.performance_function
def fill_path_matrix(x:np.ndarray, path_start:np.ndarray, forwards:np.ndarray, out_hill_data:np.ndarray, out_hill_ptr:np.ndarray):
"""Function to fill the path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forwards (np.ndarray): Forward array.
out_hill_data (np.ndarray): Array containing the indices to hills.
out_hill_ptr (np.ndarray): Array containing the bounds to out_hill_data.
"""
path_position = 0
idx = path_start[x]
while idx > -1:
out_hill_data[out_hill_ptr[x] + path_position] = idx
idx = forwards[idx]
path_position += 1
def get_hills(centroids:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, hill_length_min:int=3)-> (np.ndarray, np.ndarray, int):
"""Function to get hills from centroid connections.
Args:
centroids (np.ndarray): 1D Array containing the masses of the centroids.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
hill_length_min (int): Minimum hill length:
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
forward = cupy.full(centroids.shape[0], -1)
backward = cupy.full(centroids.shape[0], -1)
path_starts = cupy.full(centroids.shape[0], -1)
path_finder(range(len(from_idx)), from_idx, to_idx, forward, backward)
find_path_start(range(len(forward)), forward, backward, path_starts)
# path_starts will now container the first index of all connected centroids
path_starts = cupy.where(path_starts == 0)[0]
path_node_cnt = cupy.full(path_starts.shape[0], -1)
find_path_length(range(len(path_starts)), path_starts, forward, path_node_cnt)
relavant_path_node = cupy.where(path_node_cnt >= hill_length_min)[0]
path_starts = cupy.take(path_starts, relavant_path_node)
path_node_cnt = cupy.take(path_node_cnt, relavant_path_node)
del relavant_path_node
# Generate the hill matix indice ptr data
hill_ptrs = cupy.empty((path_starts.shape[0] + 1), dtype=cupy.int32)
hill_ptrs[0] = 0
hill_ptrs[1:] = path_node_cnt.cumsum()
hill_data = cupy.empty((int(hill_ptrs[-1])), np.int32)
fill_path_matrix(range(len(path_starts)), path_starts, forward, hill_data, hill_ptrs)
del from_idx, to_idx, path_starts, forward, backward
return hill_ptrs, hill_data, path_node_cnt
def extract_hills(query_data:dict, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, int, float, float):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
score_median (float): Median score.
score_std (float): Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
indices = cupy.array(query_data['indices_ms1'])
mass_data = cupy.array(query_data['mass_list_ms1'])
rowwise_peaks = indices[1:] - indices[:-1]
row_borders = indices[1:]
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, mass_data, max_gap, centroid_tol)
hill_ptrs, hill_data, path_node_cnt = get_hills(mass_data, from_idx, to_idx)
del mass_data
del indices
if cupy.__name__ != 'numpy':
hill_ptrs = hill_ptrs.get()
hill_data = hill_data.get()
path_node_cnt = path_node_cnt.get()
score_median = score_median.get()
score_std = score_std.get()
return hill_ptrs, hill_data, path_node_cnt, score_median, score_std
###Output
_____no_output_____
###Markdown
Hill SplittingWhen having a hill with two or more maxima, we would like to split it at the minimum position. For this, we use a recursive approach. First, the minimum of a hill is detected. A hill is split at this minimum if the smaller of the surrounding maxima is at least the factor `hill_split_level` larger than the minimum. For each split, the process is repeated.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def fast_minima(y:np.ndarray)->np.ndarray:
"""Function to calculate the local minimas of an array.
Args:
y (np.ndarray): Input array.
Returns:
np.ndarray: Array containing minima positions.
"""
minima = np.zeros(len(y))
start = 0
end = len(y)
for i in range(start + 2, end - 2):
if ((y[i - 1] > y[i]) & (y[i + 1] > y[i])) \
or ((y[i - 1] > y[i]) & (y[i + 1] == y[i]) & (y[i + 2] > y[i])) \
or ((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] > y[i])) \
or (((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] == y[i]) & \
(y[i + 2] > y[i]))):
minima[i] = 1
minima = minima.nonzero()[0]
return minima
#hide
def test_fast_minima():
assert fast_minima(np.array([3,2,1,0,1,2,3])) == 3
assert fast_minima(np.array([4,3,2,1,0,1,2])) == 4
assert len(fast_minima(np.array([5,4,3,2,1,0,1]))) == 0
assert len(fast_minima(np.array([6,5,4,3,2,1,0]))) == 0
test_fast_minima()
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def split(k:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_data:np.ndarray, splits:np.ndarray, hill_split_level:float, window:int):
"""Function to split hills.
Args:
k (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_data (np.ndarray): Array containing the indices to hills.
splits (np.ndarray): Array containing splits.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
"""
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_trace = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.median(int_trace[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.mean(int_trace[min_index:max_index])
#minima = (np.diff(np.sign(np.diff(int_trace))) > 0).nonzero()[0] + 1 #This works also but is slower
minima = fast_minima(int_trace)
sorted_minima = np.argsort(int_trace[minima])
minima = minima[sorted_minima]
for min_ in minima:
minval = int_trace[min_]
left_max = max(int_trace[:min_])
right_max = max(int_trace[min_:])
min_max = min(left_max, right_max)
if (minval == 0) or ((min_max / minval) > hill_split_level):
splits[k] = start+min_
break # Split only once per iteration
def split_hills(hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, hill_split_level:float, window:int)->np.ndarray:
"""Wrapper function to split hills
Args:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
Returns:
np.ndarray: Array containing the bounds to the hill_data with splits.
"""
splits = np.zeros(len(int_data), dtype=np.int32)
to_check = np.arange(len(hill_ptrs)-1)
while len(to_check) > 0:
split(to_check, hill_ptrs, int_data, hill_data, splits, hill_split_level, window)
splitpoints = splits.nonzero()[0]
to_check = np.zeros(len(hill_ptrs))
to_check[splitpoints] = 1
to_check = np.insert(to_check, splitpoints+1, np.ones(len(splitpoints))).nonzero()[0] #array, index, what
hill_ptrs = np.insert(hill_ptrs, splitpoints+1, splits[splitpoints]) #array, index, what
splits = np.zeros(len(hill_ptrs), dtype=np.int32) #was cupy np.int32
return hill_ptrs
###Output
_____no_output_____
###Markdown
Filter HillsTo filter hills, we define a minimum length `hill_min_length`. All peaks below the threshold `hill_peak_min_length` are accepted as is. For longer hills, the intensity at the start and the end are compared to the maximum intensity. If the ratio of the maximum raw intensity to the smoothed intensity and the beginning and end are larger than `hill_peak_factor` the hills are accepted.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def check_large_hills(idx:np.ndarray, large_peaks:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, to_remove:np.ndarray, large_peak:int = 40, hill_peak_factor:float = 2, window:int=1):
"""Function to check large hills and flag them for removal.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
large_peaks (np.ndarray): Array containing large peaks.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
to_remove (np.ndarray): Array with indexes to remove.
large_peak (int, optional): Length criterion when a peak is large. Defaults to 40.
hill_peak_factor (float, optional): Hill maximum criterion. Defaults to 2.
window (int, optional): Smoothing window.. Defaults to 1.
"""
k = large_peaks[idx]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_smooth_ = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.median(int_smooth_[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.mean(int_smooth_[min_index:max_index])
int_ = int_data[int_idx]
max_ = np.max(int_)
if (max_ / int_smooth_[0] > hill_peak_factor) & (max_ / int_smooth_[-1] > hill_peak_factor):
to_remove[idx] = 0
def filter_hills(hill_data:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_check_large:int =40, window:int = 1) -> (np.ndarray, np.ndarray):
"""Filters large hills.
Args:
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_check_large (int, optional): Length criterion when a hill is considered large.. Defaults to 40.
window (int, optional): Smoothing window. Defaults to 1.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
"""
large_peaks = np.where(np.diff(hill_ptrs)>=hill_check_large)[0]
to_remove = np.ones(len(large_peaks), dtype=np.int32)
check_large_hills(range(len(large_peaks)), large_peaks, hill_ptrs, hill_data, int_data, to_remove, window)
idx_ = np.ones(len(hill_data), dtype = np.int32)
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
to_remove = to_remove.nonzero()[0]
for _ in to_remove:
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_
###Output
_____no_output_____
###Markdown
Since the mass estimate min the equation above is more complicated than just an average of the mj, a standard deviation based estimate of the error would not be appropriate. Therefore we calculate the error as a bootstrap2 estimate over B=150 bootstrap replications Calculating Hill StatisticsNext, we calculate summary statistics for the connected centroids. We can obtain a high precision mass estimate for each hill by taking the average of the the masses and weighting this by their intensiteis:$$\overline{m} = \frac{\sum_{j=1}^nm_jI_j}{\sum_{j=1}^nI_j}$$To estimate the mass error, we calculate the error as a boostrap estimate: $$\Delta \overline{m} = \sqrt{\frac{\sum_{b=1}^{B}(\overline{m}_b - \overline{m} )}{(B-1)}}$$The calculation of hill statistics for a single hill is implemented in `get_hill_stats`. To calculate the hill stats for a list of hills, we can call the wrapper `get_hill_data`.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def hill_stats(idx:np.ndarray, hill_range:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, mass_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, stats:np.ndarray, hill_nboot_max:int, hill_nboot:int):
"""Function to calculate hill stats.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_range (np.ndarray): Hill range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
mass_data (np.ndarray): Array containing mass data.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
"""
np.random.seed(42)
start = hill_ptrs[idx]
end = hill_ptrs[idx + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
mz_ = mass_data[idx_]
int_sum = np.sum(int_)
int_area = np.abs(np.trapz(rt_[rt_idx[idx_]], int_)) #Area
rt_min = rt_[rt_idx[idx_]].min()
rt_max = rt_[rt_idx[idx_]].max()
if len(idx_) > hill_nboot_max:
bootsize = hill_nboot_max
else:
bootsize = len(idx_)
averages = np.zeros(hill_nboot)
average = 0
for i in range(hill_nboot):
boot = np.random.choice(len(int_), bootsize, replace=True)
boot_mz = np.sum((mz_[boot] * int_[boot])) / np.sum(int_[boot])
averages[i] = boot_mz
average += boot_mz
average_mz = average/hill_nboot
delta = 0
for i in range(hill_nboot):
delta += (average_mz - averages[i]) ** 2 #maybe easier?
delta_m = np.sqrt(delta / (hill_nboot - 1))
stats[idx,0] = average_mz
stats[idx,1] = delta_m
stats[idx,2] = int_sum
stats[idx,3] = int_area
stats[idx,4] = rt_min
stats[idx,5] = rt_max
def remove_duplicates(stats:np.ndarray, hill_data:np.ndarray, hill_ptrs:np.ndarray)-> (np.ndarray, np.ndarray, np.ndarray):
"""Remove duplicate hills.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
np.ndarray: Filtered hill stats.
"""
dups = pd.DataFrame(stats).duplicated() #all duplicated hills
idx_ = np.ones(len(hill_data), dtype = np.int32) #keep all
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
for _ in np.arange(len(stats))[dups]: #duplicates will be assigned zeros
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_, stats[~dups]
def get_hill_data(query_data:dict, hill_ptrs:np.ndarray, hill_data:np.ndarray, hill_nboot_max:int = 300, hill_nboot:int = 150) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to get the hill data.
Args:
query_data (dict): Data structure containing the query data.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
Returns:
np.ndarray: Hill stats.
np.ndarray: Sortindex.
np.ndarray: Upper index.
np.ndarray: Scan index.
np.ndarray: Hill data.
np.ndarray: Hill points.
"""
indices_ = np.array(query_data['indices_ms1'])
rt_ = np.array(query_data['rt_list_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
scan_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
stats = np.zeros((len(hill_ptrs)-1, 6)) #mz, delta, rt_min, rt_max, sum_max
hill_stats(range(len(hill_ptrs)-1), np.arange(len(hill_ptrs)-1), hill_ptrs, hill_data, int_data, mass_data, rt_, scan_idx, stats, hill_nboot_max, hill_nboot)
# sort the stats
sortindex = np.argsort(stats[:,4]) #Sorted by rt_min
stats = stats[sortindex,:]
idxs_upper = stats[:,4].searchsorted(stats[:,5], side="right")
sortindex_ = np.arange(len(sortindex))[sortindex]
return stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs
###Output
_____no_output_____
###Markdown
Combining Hills to Isotope PatternsAfter obtaining summary statistics of hills, the next step is to check whether they belong together to form an isotope pattern. For this, we check wheter it is possible that they are neighbors in an isotope pattern, e.g. one having a 12C atom that has been replaced by a 13C version. The detailed criterion for the check is implemented in `check_isotope_pattern` and is as follows:$$\left | \Delta m-\frac{\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m_{1}}^{2} +\Delta {m_{2}}^{2}}$$The left side contains $\Delta m$, being the delta of the precise mass estimates from the summary statistics and $\Delta M = 1.00286864$, which is the mass difference ebtween the 13C peak and the monoisotopic peak in an averagine molecule of 1500 Da mass divided by the charge $z$.The right side contains $\Delta S = 0.0109135$, which is the maximum shift that a sulphur atom can cause ($\Delta S = 2m(^{13}C) - 2m(^{12}C) - m(^{34}S) + m(^{32}S)$) and $\Delta {m_{1}}$ and $\Delta {m_{2}}$, which are the bootstrapped mass standard deviations.
###Code
#export
from alphapept.constants import mass_dict
DELTA_M = mass_dict['delta_M']
DELTA_S = mass_dict['delta_S']
maximum_offset = DELTA_M + DELTA_S
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, iso_mass_range:int = 5)-> bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
iso_mass_range (int, optional): Mass range. Defaults to 5.
Returns:
bool: Flag to see if pattern belongs to the same pattern.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
delta_mass = np.abs(mass1 - mass2)
left_side = np.abs(delta_mass - DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
#hide
def test_check_isotope_pattern():
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == True
mass2, delta_mass2 = 102.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == False
test_check_isotope_pattern()
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
mass2, delta_mass2 = 102.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
###Output
True
False
###Markdown
Cosine Correlation of two hills An additional criterion that is being checked is that the intensity profiles have sufficient overalp in retention time. This is validated by ensuring that two hills have a cosine correlation of at least 0.6.$$\frac{\sum_{s=s_{min}}^{s_{max}}I_sJ_s}{\sum_{s=s_{min}}^{s_{max}}I_s^{2} \sum_{s=s_{min}}^{s_{max}}J_s^{2}} \geq 0.6$$The intensities of two hills are only compared if both have an intensity value in a particular scan. Otherwise, the intensity is set to zero. Additionally, an overlap of at least three elements is required.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def correlate(scans_:np.ndarray, scans_2:np.ndarray, int_:np.ndarray, int_2:np.ndarray)->float:
"""Correlate two scans.
Args:
scans_ (np.ndarray): Masses of the first scan.
scans_2 (np.ndarray): Masses of the second scan.
int_ (np.ndarray): Intensity of the first scan.
int_2 (np.ndarray): Intensity of the second scan.
Returns:
float: Correlation.
"""
min_one, max_one = scans_[0], scans_[-1]
min_two, max_two = scans_2[0], scans_2[-1]
if min_one + 3 > max_two: # at least an overlap of 3 elements
corr = 0
elif min_two + 3 > max_one:
corr = 0
else:
min_s = min(min_one, min_two)
max_s = max(max_one, max_two)
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[scans_ - min_s] = int_
int_two_scaled[scans_2 - min_s] = int_2
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
###Output
_____no_output_____
###Markdown
Extracting pre-Isotope PatternsNow having two criteria to check whether hills could, in principle, belong together, we define the wrapper functions `extract_edge` and `get_edges` to extract the connected hills. To minimize the number of comparisons we need to perform, we only compare the hills that overlap in time (i.e., the start of one hill `rt_min` needs to be before the end of the other hill `rt_max`) and are less than the sum of $\Delta M$ and $\Delta S$ apart. To extract all hills that belong together, we again rely on the `NetworkX`-package to extract the connected components.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list:
"""Extract edges.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparing.
runner (int): Index.
max_index (int): Unused.
maximum_offset (float): Maximum offset when comparing edges.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
Returns:
list: List of edges.
"""
edges = []
mass1 = stats[runner, 0]
delta_mass1 = stats[runner, 1]
for j in range(runner+1, idxs_upper[runner]):
mass2 = stats[j, 0]
if np.abs(mass2 - mass1) <= maximum_offset:
delta_mass2 = stats[j, 1]
for charge in range(iso_charge_min, iso_charge_max + 1):
if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range):
edges.append((runner, j))
break
return edges
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def edge_correlation(idx:np.ndarray, to_keep:np.ndarray, sortindex_:np.ndarray, pre_edges:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float):
"""Correlates two edges and flag them it they should be kept.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
to_keep (np.ndarray): Array with indices which edges should be kept.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
pre_edges (np.ndarray): Array with pre edges.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
"""
edge = pre_edges[idx,:]
y = sortindex_[edge[0]]
start = hill_ptrs[y]
end = hill_ptrs[y + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
con = sortindex_[edge[1]]
start = hill_ptrs[con]
end = hill_ptrs[con + 1]
idx_2 = hill_data[start:end]
int_2 = int_data[idx_2]
scans_2 = scan_idx[idx_2]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
to_keep[idx] = 1
#export
import networkx as nx
def get_pre_isotope_patterns(stats:np.ndarray, idxs_upper:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, maximum_offset:float, iso_charge_min:int=1, iso_charge_max:int=6, iso_mass_range:float=5, cc_cutoff:float=0.6)->list:
"""Function to extract pre isotope patterns.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparison.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
maximum_offset (float): Maximum offset when matching.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
cc_cutoff (float, optional): Correlation cutoff. Defaults to 0.6.
Returns:
list: List of pre isotope patterns.
"""
pre_edges = []
# Step 1
for runner in range(len(stats)):
pre_edges.extend(extract_edge(stats, idxs_upper, runner, idxs_upper[runner], maximum_offset, iso_charge_min, iso_charge_max, iso_mass_range))
to_keep = np.zeros(len(pre_edges), dtype='int')
pre_edges = np.array(pre_edges)
edge_correlation(range(len(to_keep)), to_keep, sortindex_, pre_edges, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
edges = pre_edges[to_keep.nonzero()]
G2 = nx.Graph()
for i in range(len(edges)):
G2.add_edge(edges[i][0], edges[i][1])
pre_isotope_patterns = [
sorted(list(c))
for c in sorted(nx.connected_components(G2), key=len, reverse=True)
]
return pre_isotope_patterns
###Output
_____no_output_____
###Markdown
Extracting Isotope PatternsThe extracted pre-isotope patterns may not be consistent because their pair-wise mass differences may not correspond to the same charge. To extract isotope patterns from pre-isotope patterns, we need to ensure that they are consistent for a single charge. To do this, we start with the 100 most intense peaks from a pre-isotope pattern to be used as a seed. For each seed and charge we then try to extract the longest consistent isotope pattern. To check wheter a hill is consistent with the seed we employ a modified checking criterion (`check_isotope_pattern_directed`) to be as follows:$$\left | m-m_j-\frac{j\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m}^{2} +\Delta {m_{j}}^{2}}$$Here $m$ is the mass of a seed peak, and $m_{j}$ refers to a peak relative to the seed. $j$ refers to the peaks to the left or right (negative or positive index) within the pattern. $j$ needs to run over consecutive values so that gaps are not allowed. Besides this consistency check, two hills are also checked to have a cosine correlation of at least 0.6.Programmatically, this is implemented in `grow_trail` and `grow`. These function uses a recursive approach that adds matching hills to the seed on the left and right side until no more hills can be added.
###Code
#export
from numba.typed import List
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern_directed(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, index:int, iso_mass_range:float)->bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
index (int): Index (unused).
iso_mass_range (float): Isotope mass ranges.
Returns:
bool: Flag if two isotope patterns belong together.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
left_side = np.abs(mass1 - mass2 - index * DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
@alphapept.performance.compile_function(compilation_mode="numba")
def grow(trail:List, seed:int, direction:int, relative_pos:int, index:int, stats:np.ndarray, pattern:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Grows isotope pattern based on a seed and direction.
Args:
trail (List): List of hills belonging to a pattern.
seed (int): Seed position.
direction (int): Direction in which to grow the trail
relative_pos (int): Relative position.
index (int): Index.
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: List of hills belonging to a pattern.
"""
x = pattern[seed] # This is the seed
mass1 = stats[x,0]
delta_mass1 = stats[x,1]
k = sortindex_[x]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
growing = True
while growing:
if direction == 1:
if seed + relative_pos == len(pattern):
growing = False
break
else:
if seed + relative_pos < 0:
growing = False
break
y = pattern[seed + relative_pos] # This is a reference peak
l = sortindex_[y]
mass2 = stats[y,0]
delta_mass2 = stats[y,1]
start = hill_ptrs[l]
end = hill_ptrs[l + 1]
idx_ = hill_data[start:end]
int_2 = int_data[idx_]
scans_2 = scan_idx[idx_]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
if check_isotope_pattern_directed(mass1, mass2, delta_mass1, delta_mass2, charge, -direction * index, iso_mass_range):
if direction == 1:
trail.append(y)
else:
trail.insert(0, y)
index += (
1
) # Greedy matching: Only one edge for a specific distance, will not affect the following matches
delta_mass = np.abs(mass1 - mass2)
if (delta_mass > (DELTA_M+DELTA_S) * index): # the pattern is sorted so there is a maximum to look back
break
relative_pos += direction
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def grow_trail(seed:int, pattern:np.ndarray, stats:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to grow an isotope pattern to the left and right side.
Args:
seed (int): Seed position.
pattern (np.ndarray): Isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Isotope pattern.
"""
x = pattern[seed]
trail = List()
trail.append(x)
trail = grow(trail, seed, -1, -1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trail = grow(trail, seed, 1, 1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def get_trails(seed:int, pattern:np.ndarray, stats:np.ndarray, charge_range:List, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to extract trails for a given charge range.
Args:
seed (int): Seed index.
pattern (np.ndarray): Pre isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge_range (List): Charge range.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Trail of consistent hills.
"""
trails = []
for charge in charge_range:
trail = grow_trail(seed, pattern, stats, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trails.append(trail)
return trails
#export
def plot_pattern(pattern:np.ndarray, sorted_hills:np.ndarray, centroids:np.ndarray, hill_data:np.ndarray):
"""Helper function to plot a pattern.
Args:
pattern (np.ndarray): Pre isotope pattern.
sorted_hills (np.ndarray): Hills, sorted.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
hill_data (np.ndarray): Array containing the indices to hills.
"""
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,10))
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
mzs = []
rts = []
ints = []
for entry in pattern:
hill = sorted_hills[entry]
hill_data = np.array([centroids[_[0]][_[1]] for _ in hill], dtype=centroid_dtype)
int_profile = hill_data["int"]
ax1.plot(hill_data["rt"], hill_data["int"])
ax2.scatter(hill_data["rt"], hill_data["mz"], s = hill_data["int"]/5e5 )
ax1.set_title('Pattern')
ax1.set_xlabel('RT (min)')
ax1.set_ylabel('Intensity')
ax2.set_xlabel('RT (min)')
ax2.set_ylabel('m/z')
plt.show()
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def get_minpos(y:np.ndarray, iso_split_level:float)->List:
"""Function to get a list of minima in a trace.
A minimum is returned if the ratio of lower of the surrounding maxima to the minimum is larger than the splitting factor.
Args:
y (np.ndarray): Input array.
iso_split_level (float): Isotope split level.
Returns:
List: List with min positions.
"""
minima = get_local_minima(y)
minima_list = List()
for minpos in minima:
minval = y[minpos]
left_max = (y[:minpos]).max()
right_max = (y[minpos:]).max()
minimum_max = min(left_max, right_max)
if minimum_max / minval >= iso_split_level:
minima_list.append(minpos)
return minima_list
@alphapept.performance.compile_function(compilation_mode="numba")
def get_local_minima(y:np.ndarray)->List:
"""Function to return all local minima of a array
Args:
y (np.ndarray): Input array.
Returns:
List: List with indices to minima.
"""
minima = List()
for i in range(1, len(y) - 1):
if is_local_minima(y, i):
minima.append(i)
return minima
@alphapept.performance.compile_function(compilation_mode="numba")
def is_local_minima(y:np.ndarray, i:int)->bool:
"""Check if position is a local minima.
Args:
y (np.ndarray): Input array.
i (int): Position to check.
Returns:
bool: Flag if position is minima or not.
"""
return (y[i - 1] > y[i]) & (y[i + 1] > y[i])
@alphapept.performance.compile_function(compilation_mode="numba")
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array
#hide
def test_get_minpos():
"""
Generate an intensity profile with local minima
Check that the minima are found
"""
intensity_profile = np.ones(20) * 10
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
minima = get_minpos(intensity_profile, 2)
minima_list = [_ for _ in minima]
assert minima_list == minima_ref
test_get_minpos()
###Output
_____no_output_____
###Markdown
Isolating Isotope_patternsThe extraction of the longest consistent isotope pattern is implemented in `isolate_isotope_pattern`. Here, three additional checks for an isotope pattern are implemented. The first one is `truncate`. Here, one checks the seed position, whether it has a minimum to its left or right side. If a minimum is found, the isotope pattern is cut off at this position.The second one is a mass filter. If the seed has a mass of smaller than 1000, the intensity maximum is detected, and all smaller masses are discarded. This reflects the averagine distribution for small masses where no minimum on the left side can be found.The third one is `check_averagine` that relies on `pattern_to_mz` and `cosine_averagine`. It is used to ensure that the extracted isotope pattern has a cosine correlation of the averagine isotope pattern of the same mass of at least 0.6.After the longest consistent isotope pattern is found, the hills are removed from the pre-isotope pattern, and the process is repeated until no more isotope patterns can be extracted from the pre-isotope patterns.
###Code
#export
from alphapept.chem import mass_to_dist
from alphapept.constants import averagine_aa, isotopes, Isotope
from numba.typed import Dict
@alphapept.performance.compile_function(compilation_mode="numba")
def check_averagine(stats:np.ndarray, pattern:np.ndarray, charge:int, averagine_aa:Dict, isotopes:Dict)->float:
"""Function to compare a pattern to an averagine model.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
Returns:
float: Averagine correlation.
"""
masses, intensity = pattern_to_mz(stats, pattern, charge)
spec_one = np.floor(masses).astype(np.int64)
int_one = intensity
spec_two, int_two = mass_to_dist(np.min(masses), averagine_aa, isotopes) # maybe change to no rounded version
spec_two = np.floor(spec_two).astype(np.int64)
return cosine_averagine(int_one, int_two, spec_one, spec_two)
@alphapept.performance.compile_function(compilation_mode="numba")
def pattern_to_mz(stats:np.ndarray, pattern:np.ndarray, charge:int)-> (np.ndarray, np.ndarray):
"""Function to calculate masses and intensities from pattern for a given charge.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge of the pattern.
Returns:
np.ndarray: masses
np.ndarray: intensity
"""
mzs = np.zeros(len(pattern))
ints = np.zeros(len(pattern))
for i in range(len(pattern)):
entry = pattern[i]
mzs[i] = mz_to_mass(stats[entry,0], charge)
ints[i] = stats[entry,2]
sortindex = np.argsort(mzs)
masses = mzs[sortindex]
intensity = ints[sortindex]
return masses, intensity
@alphapept.performance.compile_function(compilation_mode="numba")
def cosine_averagine(int_one:np.ndarray, int_two:np.ndarray, spec_one:np.ndarray, spec_two:np.ndarray)-> float:
"""Calculate the cosine correlation of two hills.
Args:
int_one (np.ndarray): Intensity of the first hill.
int_two (np.ndarray): Intensity of the second hill.
spec_one (np.ndarray): Scan numbers of the first hill.
spec_two (np.ndarray): Scan numbers of the second hill.
Returns:
float: Cosine
"""
min_one, max_one = spec_one[0], spec_one[-1]
min_two, max_two = spec_two[0], spec_two[-1]
min_s = np.min(np.array([min_one, min_two]))
max_s = np.max(np.array([max_one, max_two]))
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[spec_one - min_s] = int_one
int_two_scaled[spec_two - min_s] = int_two
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
@alphapept.performance.compile_function(compilation_mode="numba")
def int_list_to_array(numba_list:List)->np.ndarray:
"""Numba compatbilte function to convert a numba list with integers to a numpy array
Args:
numba_list (List): Input numba-typed List.
Returns:
np.ndarray: Output numpy array.
"""
array = np.zeros(len(numba_list), dtype=np.int64)
for i in range(len(array)):
array[i] = numba_list[i]
return array
M_PROTON = mass_dict['Proton']
@alphapept.performance.compile_function(compilation_mode="numba")
def mz_to_mass(mz:float, charge:int)->float:
"""Function to calculate the mass from a mz value.
Args:
mz (float): M/z
charge (int): Charge.
Raises:
NotImplementedError: When a negative charge is used.
Returns:
float: mass
"""
if charge < 0:
raise NotImplementedError("Negative Charges not implemented.")
mass = mz * charge - charge * M_PROTON
return mass
#hide
if False:
def test_truncate():
"""
Generate an intensity profile with local minima
Check wheter the the profile is correctly truncated with respect to the seed
"""
array = np.arange(0, 20)
intensity_profile = np.ones(20) * 10
iso_split_level = 1.3
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
seedpos = 5
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([3, 4, 5, 6, 7]))
seedpos = 0
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([0, 1, 2, 3]))
seedpos = len(array)
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([17, 18, 19]))
test_truncate()
###Output
_____no_output_____
###Markdown
Isotope PatternsThe wrapper function `get_isotope_patterns` iterates over all pre_isotope_patterns.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def isolate_isotope_pattern(pre_pattern:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, iso_mass_range:float, charge_range:List, averagine_aa:Dict, isotopes:Dict, iso_n_seeds:int, cc_cutoff:float, iso_split_level:float)->(np.ndarray, int):
"""Isolate isotope patterns.
Args:
pre_pattern (np.ndarray): Pre isotope pattern.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
iso_mass_range (float): Mass range for checking isotope patterns.
charge_range (List): Charge range.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_n_seeds (int): Number of seeds.
cc_cutoff (float): Cutoff value for what is considered correlating.
iso_split_level (float): Split level when isotopes are split.
Returns:
np.ndarray: Array with the best pattern.
int: Charge of the best pattern.
"""
longest_trace = 0
champion_trace = None
champion_charge = 0
champion_intensity = 0
# Sort patterns by mass
sortindex = np.argsort(stats[pre_pattern][:,0]) #intensity
sorted_pattern = pre_pattern[sortindex]
massindex = np.argsort(stats[sorted_pattern][:,2])[::-1][:iso_n_seeds]
# Use all the elements in the pre_pattern as seed
for seed in massindex: # Loop through all seeds
seed_global = sorted_pattern[seed]
trails = get_trails(seed, sorted_pattern, stats, charge_range, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
for index, trail in enumerate(trails):
if len(trail) >= longest_trace: # Needs to be longer than the current champion
arr = int_list_to_array(trail)
intensity_profile = stats[arr][:,2]
seedpos = np.nonzero(arr==seed_global)[0][0]
# truncate around the seed...
arr = truncate(arr, intensity_profile, seedpos, iso_split_level)
intensity_profile = stats[arr][:,2]
# Remove lower masses:
# Take the index of the maximum and remove all masses on the left side
if charge_range[index] * stats[seed_global, 0] < 1000:
maxpos = np.argmax(intensity_profile)
arr = arr[maxpos:]
intensity_profile = stats[arr][:,2]
if (len(arr) > longest_trace) | ((len(arr) == longest_trace) & (intensity_profile.sum() > champion_intensity)):
# Averagine check
cc = check_averagine(stats, arr, charge_range[index], averagine_aa, isotopes)
if cc > 0.6:
# Update the champion
champion_trace = arr
champion_charge = charge_range[index]
longest_trace = len(arr)
champion_intensity = intensity_profile.sum()
return champion_trace, champion_charge
#hide
if False:
def test_get_isotope_patterns():
test_centroids = [
[
(300, 50, 1, 1),
(300.501, 40, 1, 1),
(301.003, 30, 1, 1),
(301.504, 20, 1, 1),
(302.006, 10, 1, 1),
],
[
(300, 50, 2, 2),
(300.501, 40, 2, 2),
(301.003, 30, 2, 2),
(301.504, 20, 2, 2),
(302.006, 10, 2, 2),
],
[
(300, 50, 3, 3),
(300.501, 40, 3, 3),
(301.003, 30, 3, 3),
(301.504, 20, 3, 3),
(302.006, 10, 3, 3),
],
[
(300, 50, 4, 4),
(300.501, 40, 4, 4),
(301.003, 30, 4, 4),
(301.504, 20, 4, 4),
(302.006, 10, 4, 4),
],
[
(300, 50, 5, 5),
(300.501, 40, 5, 5),
(301.003, 30, 5, 5),
(301.504, 20, 5, 5),
(302.006, 10, 5, 5),
],
[(400, 10, 6, 6), (401, 10, 6, 6), (402, 10, 6, 6)],
[(400, 10, 7, 7), (401, 10, 7, 7), (402, 10, 7, 7)],
[(400, 10, 8, 8), (401, 10, 8, 8), (402, 10, 8, 8)],
[(400, 10, 9, 9), (401, 10, 9, 9), (402, 10, 9, 9)],
]
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
test_centroids_tmp = [np.array(_, dtype=centroid_dtype) for _ in test_centroids]
test_centroids = List([_ for _ in test_centroids_tmp])
test_hills = get_hills(test_centroids)
sorted_hills, stats, data, hill_data, hill_ptrs = get_hill_data(test_hills, test_centroids)
pre_patterns = get_edges(stats, data)
isotope_patterns, isotope_charges = get_isotope_patterns(pre_patterns, stats, data, averagine_aa, isotopes)
assert np.all(isotope_patterns[0] == np.array([0, 1, 2, 3, 4]))
assert isotope_charges[0] == 2
assert np.all(isotope_patterns[1] == np.array([5,6,7]))
assert isotope_charges[1] == 1
test_get_isotope_patterns()
#export
from numba.typed import List
from typing import Callable, Union
def get_isotope_patterns(pre_isotope_patterns:list, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, averagine_aa:Dict, isotopes:Dict, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:float = 5, iso_n_seeds:int = 100, cc_cutoff:float=0.6, iso_split_level:float = 1.3, callback:Union[Callable, None]=None) -> (np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to iterate over pre_isotope_patterns.
Args:
pre_isotope_patterns (list): List of pre-isotope patterns.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
iso_n_seeds (int, optional): Number of isotope seeds. Defaults to 100.
cc_cutoff (float, optional): Cuttoff for correlation.. Defaults to 0.6.
iso_split_level (float, optional): Isotope split level.. Defaults to 1.3.
callback (Union[Callable, None], optional): Callback function for progress. Defaults to None.
Returns:
list: List of isotope patterns.
np.ndarray: Iso idx.
np.ndarray: Array containing isotope charges.
"""
isotope_patterns = []
isotope_charges = []
charge_range = List()
for i in range(iso_charge_min, iso_charge_max + 1):
charge_range.append(i)
isotope_patterns = []
isotope_charges = []
for idx, pre_pattern in enumerate(pre_isotope_patterns):
extract = True
while extract:
isotope_pattern, isotope_charge = isolate_isotope_pattern(np.array(pre_pattern), hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, iso_mass_range, charge_range, averagine_aa, isotopes, iso_n_seeds, cc_cutoff, iso_split_level)
if isotope_pattern is None:
length = 0
else:
length = len(isotope_pattern)
if length > 1:
isotope_charges.append(isotope_charge)
isotope_patterns.append(isotope_pattern)
pre_pattern = [_ for _ in pre_pattern if _ not in isotope_pattern]
if len(pre_pattern) <= 1:
extract = False
else:
extract = False
if callback:
callback((idx+1)/len(pre_isotope_patterns))
iso_patterns = np.zeros(sum([len(_) for _ in isotope_patterns]), dtype=np.int64)
iso_idx = np.zeros(len(isotope_patterns)+1, dtype='int')
start = 0
for idx, _ in enumerate(isotope_patterns):
iso_patterns[start:start+len(_)] = _
start += len(_)
iso_idx[idx+1] = start
return iso_patterns, iso_idx, np.array(isotope_charges)
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def report_(idx:np.ndarray, isotope_charges:list, isotope_patterns:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, results:np.ndarray, lookup_idx:np.ndarray):
"""Function to extract summary statstics from a list of isotope patterns and charges.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
results (np.ndarray): Recordarray with isotope pattern summary statistics.
lookup_idx (np.ndarray): Lookup array for each centroid.
"""
pattern = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
isotope_data = stats[pattern]
mz = np.min(isotope_data[:, 0])
mz_std = np.mean(isotope_data[:, 1])
charge = isotope_charges[idx]
mass = mz_to_mass(mz, charge)
int_max_idx = np.argmax(isotope_data[:, 2])
mz_most_abundant = isotope_data[:, 0][int_max_idx]
int_max = isotope_data[:,2][int_max_idx]
rt_start = isotope_data[int_max_idx, 4] # This is the start of the most abundant trace
rt_end = isotope_data[int_max_idx, 5]
# better measurement of the peak with interpolation
rt_min_ = min(isotope_data[:, 4])
rt_max_ = max(isotope_data[:, 5])
rt_range = np.linspace(rt_min_, rt_max_, 100)
trace_sum = np.zeros_like(rt_range)
for i, k in enumerate(pattern):
x = sortindex_[k]
start = hill_ptrs[x]
end = hill_ptrs[x + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
rts = rt_[rt_idx[idx_]]
lookup_idx[idx_, 0] = idx
lookup_idx[idx_, 1] = i
interpolation = np.interp(rt_range, rts, int_)
#Filter
interpolation[:(rt_range < rts[0]).sum()] = 0
right_cut = (rt_range > rts[-1]).sum()
if right_cut > 0:
interpolation[-right_cut:]= 0
trace_sum += interpolation
rt_apex_idx = trace_sum.argmax()
rt_apex = rt_range[rt_apex_idx]
trace = trace_sum
half_max = trace.max()/2
if rt_apex_idx == 0:
left_apex = 0
else:
left_apex = np.abs(trace[:rt_apex_idx]-half_max).argmin()
right_apex = np.abs(trace[rt_apex_idx:]-half_max).argmin()+rt_apex_idx
int_apex = trace_sum[rt_apex_idx]
fwhm = rt_range[right_apex] - rt_range[left_apex]
n_isotopes = len(pattern)
rt_cutoff = 0.95 #5%
if rt_apex_idx == 0:
rt_min_idx = 0
else:
rt_min_idx = np.abs(trace[:rt_apex_idx]-trace.max()*(1-rt_cutoff)).argmin()
rt_max_idx = np.abs(trace[rt_apex_idx:]-trace.max()*(1-rt_cutoff)).argmin()+rt_apex_idx
#plt.xlabel('rt')
#plt.ylabel('int')
#plt.show()
#plt.plot(rt_range, trace_sum)
#plt.plot([rt_range[left_apex], rt_range[right_apex]], [(trace[left_apex] + trace[right_apex])/2]*2, 'k:')
#plt.plot(rt_range[rt_apex_idx], trace[rt_apex_idx], 'k*')
#plt.plot(rt_range[rt_min_idx], trace[rt_min_idx], 'k*')
#plt.plot(rt_range[rt_max_idx], trace[rt_max_idx], 'k*')
#plt.show()
rt_start = rt_range[rt_min_idx]
rt_end = rt_range[rt_max_idx]
int_area = np.abs(np.trapz(trace_sum[rt_min_idx:rt_max_idx], rt_range[rt_min_idx:rt_max_idx]))
int_sum = trace_sum.sum()
results[idx,:] = np.array([mz, mz_std, mz_most_abundant, charge, rt_start, rt_apex, rt_end, fwhm, n_isotopes, mass, int_apex, int_area, int_sum])
#export
import pandas as pd
def feature_finder_report(query_data:dict, isotope_patterns:list, isotope_charges:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray)->pd.DataFrame:
"""Creates a report dataframe with summary statistics of the found isotope patterns.
Args:
query_data (dict): Data structure containing the query data.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to the isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
Returns:
pd.DataFrame: DataFrame with isotope pattern summary statistics.
"""
rt_ = np.array(query_data['rt_list_ms1'])
indices_ = np.array(query_data['indices_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
rt_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
lookup_idx= np.zeros((len(mass_data),2), dtype=np.int)-1
int_data = np.array(query_data['int_list_ms1'])
results = np.zeros((len(isotope_charges), 13))
report_(range(len(isotope_charges)), isotope_charges, isotope_patterns, iso_idx, stats, sortindex_, hill_ptrs, hill_data, int_data, rt_, rt_idx, results, lookup_idx)
df = pd.DataFrame(results, columns = ['mz','mz_std','mz_most_abundant','charge','rt_start','rt_apex','rt_end','fwhm','n_isotopes','mass','int_apex','int_area', 'int_sum'])
df.sort_values(['rt_start','mz'])
return df, lookup_idx
###Output
_____no_output_____
###Markdown
Data OutputFor each feature that is found we extract summary statistics and put it in tabular form to be used as as pandas dataframe. PlottingFor quality control reasons we also employ a function to plot a feature in its local environment. External Feature FinderTo utilize the command-line Feature Finder from Bruker `4DFF-3.13` - `uff-cmdline2.exe`, we call it via a subprocess and wait until completion.
###Code
#export
import subprocess
import os
import platform
def extract_bruker(file:str, base_dir:str = "ext/bruker/FF", config:str = "proteomics_4d.config"):
"""Call Bruker Feautre Finder via subprocess.
Args:
file (str): Filename for feature finding.
base_dir (str, optional): Base dir where the feature finder is stored.. Defaults to "ext/bruker/FF".
config (str, optional): Config file for feature finder. Defaults to "proteomics_4d.config".
Raises:
NotImplementedError: Unsupported operating system.
FileNotFoundError: Feature finder not found.
FileNotFoundError: Config file not found.
FileNotFoundError: Feature file not found.
"""
feature_path = file + '/'+ os.path.split(file)[-1] + '.features'
base_dir = os.path.join(os.path.dirname(__file__), base_dir)
operating_system = platform.system()
if operating_system == 'Linux':
ff_dir = os.path.join(base_dir, 'linux64','uff-cmdline2')
logging.info('Using Linux FF')
elif operating_system == 'Windows':
ff_dir = os.path.join(base_dir, 'win64','uff-cmdline2.exe')
logging.info('Using Windows FF')
else:
raise NotImplementedError(f"System {operating_system} not supported.")
if os.path.exists(feature_path):
return feature_path
else:
if not os.path.isfile(ff_dir):
raise FileNotFoundError(f'Bruker feature finder cmd not found here {ff_dir}.')
config_path = base_dir + '/'+ config
if not os.path.isfile(config_path):
raise FileNotFoundError(f'Config file not found here {config_path}.')
if operating_system == 'Windows':
FF_parameters = [ff_dir,'--ff 4d',f'--readconfig "{config_path}"', f'--analysisDirectory "{file}"']
process = subprocess.Popen(' '.join(FF_parameters), stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, b''):
logtxt = line.decode('utf8')
logging.info(logtxt[48:].rstrip()) #Remove logging info from FF
elif operating_system == 'Linux':
FF_parameters = [
ff_dir,
'--ff',
'4d',
'--readconfig',
config_path,
'--analysisDirectory',
file
]
process = subprocess.run(FF_parameters, stdout=subprocess.PIPE)
if os.path.exists(feature_path):
return feature_path
else:
raise FileNotFoundError(f"Feature file {feature_path} does not exist.")
import sqlalchemy as db
def convert_bruker(feature_path:str)->pd.DataFrame:
"""Reads feature table and converts to feature table to be used with AlphaPept.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
feature_table = pd.read_sql_table('LcTimsMsFeature', engine_featurefile)
from alphapept.constants import mass_dict
M_PROTON = mass_dict['Proton']
feature_table['Mass'] = feature_table['MZ'].values * feature_table['Charge'].values - feature_table['Charge'].values*M_PROTON
feature_table = feature_table.rename(columns={"MZ": "mz","Mass": "mass", "RT": "rt_apex", "RT_lower":"rt_start", "RT_upper":"rt_end", "Mobility": "mobility", "Mobility_lower": "mobility_lower", "Mobility_upper": "mobility_upper", "Charge":"charge","Intensity":'int_sum',"ClusterCount":'n_isotopes'})
feature_table['rt_apex'] = feature_table['rt_apex']/60
feature_table['rt_start'] = feature_table['rt_start']/60
feature_table['rt_end'] = feature_table['rt_end']/60
return feature_table
def map_bruker(feature_path:str, feature_table:pd.DataFrame, query_data:dict)->pd.DataFrame:
"""Map Ms1 to Ms2 via Table FeaturePrecursorMapping from Bruker FF.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
feature_table (pd.DataFrame): Pandas DataFrame containing the features.
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
mapping = pd.read_sql_table('FeaturePrecursorMapping', engine_featurefile)
mapping = mapping.set_index('PrecursorId')
feature_table= feature_table.set_index('Id')
query_prec_id = query_data['prec_id']
#Now look up the feature for each precursor
mass_matched = []
mz_matched = []
rt_matched = []
query_idx = []
f_idx = []
for idx, prec_id in tqdm(enumerate(query_prec_id)):
try:
f_id = mapping.loc[prec_id]['FeatureId']
all_matches = feature_table.loc[f_id]
if type(f_id) == np.int64:
match = all_matches
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
else:
for k in range(len(all_matches)):
match = all_matches.iloc[k]
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
except KeyError:
pass
features = pd.DataFrame(np.array([mass_matched, mz_matched, rt_matched, query_idx, f_idx]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched', 'query_idx', 'feature_idx'])
features['query_idx'] = features['query_idx'].astype('int')
return features
###Output
_____no_output_____
###Markdown
Wrapper
###Code
#export
import numpy as np
import logging
import os
from alphapept.search import query_data_to_features
import alphapept.io
import functools
def find_features(to_process:tuple, callback:Union[Callable, None] = None, parallel:bool = False)-> Union[str, bool]:
"""Wrapper for feature finding.
Args:
to_process (tuple): to_process tuple, to be used from a proces spool.
callback (Union[Callable, None], optional): Optional callback function. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Currently unused. Defaults to False.
Raises:
NotImplementedError: Error if the file extension is not understood.
Returns:
Union[str, bool]: Returns true if function was sucessfull, otherwise the exception as string.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base, ext = os.path.splitext(file_name)
if ext.lower() == '.raw':
datatype='thermo'
elif ext.lower() == '.d':
datatype='bruker'
elif ext.lower() == '.mzml':
datatype='mzml'
else:
raise NotImplementedError('File extension {} not understood.'.format(ext))
out_file = f"{base}.ms_data.hdf"
skip = True
if os.path.isfile(out_file):
try:
alphapept.io.MS_Data_File(
out_file
).read(dataset_name="features")
logging.info(
'Found *.hdf with features for {}'.format(out_file)
)
except KeyError:
logging.info(
'No *.hdf file with features found for {}. Adding to feature finding list.'.format(out_file)
)
skip = False
if not skip:
ms_file = alphapept.io.MS_Data_File(out_file, is_read_only=False)
query_data = ms_file.read_DDA_query_data()
if not settings['workflow']["find_features"]:
features = query_data_to_features(query_data)
else:
if datatype in ['thermo','mzml']:
from alphapept.constants import averagine_aa, isotopes
f_settings = settings['features']
max_gap = f_settings['max_gap']
centroid_tol = f_settings['centroid_tol']
hill_split_level = f_settings['hill_split_level']
iso_split_level = f_settings['iso_split_level']
#Cleanup if
int_data = np.array(query_data['int_list_ms1'])
window = f_settings['hill_smoothing']
hill_check_large = f_settings['hill_check_large']
iso_charge_min = f_settings['iso_charge_min']
iso_charge_max = f_settings['iso_charge_max']
iso_n_seeds = f_settings['iso_n_seeds']
hill_nboot_max = f_settings['hill_nboot_max']
hill_nboot = f_settings['hill_nboot']
iso_mass_range = f_settings['iso_mass_range']
iso_corr_min = f_settings['iso_corr_min']
logging.info('Feature finding on {}'.format(file_name))
logging.info(f'Hill extraction with centroid_tol {centroid_tol} and max_gap {max_gap}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, centroid_tol)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
logging.info(f'Repeating hill extraction with centroid_tol {score_median+score_std*3:.2f}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, score_median+score_std*3)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
hill_ptrs = split_hills(hill_ptrs, hill_data, int_data, hill_split_level=hill_split_level, window = window) #hill lenght is inthere already
logging.info(f'After split hill_ptrs {len(hill_ptrs):,}')
hill_data, hill_ptrs = filter_hills(hill_data, hill_ptrs, int_data, hill_check_large =hill_check_large, window=window)
logging.info(f'After filter hill_ptrs {len(hill_ptrs):,}')
stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs = get_hill_data(query_data, hill_ptrs, hill_data, hill_nboot_max = hill_nboot_max, hill_nboot = hill_nboot)
logging.info('Extracting hill stats complete')
pre_isotope_patterns = get_pre_isotope_patterns(stats, idxs_upper, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, maximum_offset, iso_charge_min=iso_charge_min, iso_charge_max=iso_charge_max, iso_mass_range=iso_mass_range, cc_cutoff=iso_corr_min)
logging.info('Found {:,} pre isotope patterns.'.format(len(pre_isotope_patterns)))
isotope_patterns, iso_idx, isotope_charges = get_isotope_patterns(pre_isotope_patterns, hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, averagine_aa, isotopes, iso_charge_min = iso_charge_min, iso_charge_max = iso_charge_max, iso_mass_range = iso_mass_range, iso_n_seeds = iso_n_seeds, cc_cutoff = iso_corr_min, iso_split_level=iso_split_level, callback=None)
logging.info('Extracted {:,} isotope patterns.'.format(len(isotope_charges)))
feature_table, lookup_idx = feature_finder_report(query_data, isotope_patterns, isotope_charges, iso_idx, stats, sortindex_, hill_ptrs, hill_data)
lookup_idx_df = pd.DataFrame(lookup_idx, columns = ['isotope_pattern', 'isotope_pattern_hill'])
ms_file.write(lookup_idx_df, dataset_name="feature_table_idx")
logging.info('Report complete.')
elif datatype == 'bruker':
logging.info('Feature finding on {}'.format(file_name))
feature_path = extract_bruker(file_name)
feature_table = convert_bruker(feature_path)
logging.info('Bruker featurer finder complete. Extracted {:,} features.'.format(len(feature_table)))
# Calculate additional params
feature_table['rt_length'] = feature_table['rt_end'] - feature_table['rt_start']
feature_table['rt_right'] = feature_table['rt_end'] - feature_table['rt_apex']
feature_table['rt_left'] = feature_table['rt_apex'] - feature_table['rt_start']
feature_table['rt_tail'] = feature_table['rt_right'] / feature_table['rt_left']
logging.info('Matching features to query data.')
if 'mono_mzs2' not in query_data.keys():
logging.info('No MS2-data to match.')
features = pd.DataFrame()
else:
features = map_ms2(feature_table, query_data, **settings['features'])
logging.info('Saving feature table.')
ms_file.write(feature_table, dataset_name="feature_table")
logging.info('Feature table saved to {}'.format(out_file))
logging.info('Saving features.')
ms_file.write(features, dataset_name="features")
logging.info(f'Feature finding of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Feature finding of file {file_name} failed. Exception {e}')
return f"{e}" #Can't return exception object, cast as string
###Output
_____no_output_____
###Markdown
MappingMapping MS1 to MS2
###Code
#export
from sklearn.neighbors import KDTree
import pandas as pd
import numpy as np
def replace_infs(array:np.ndarray)->np.ndarray:
"""Replace nans and infs with 0
Args:
array (np.ndarray): Input array.
Returns:
np.ndarray: Output array without nans and infs.
"""
array[array == -np.inf] = 0
array[array == np.inf] = 0
array[np.isnan(array)] = 0
return array
def map_ms2(feature_table:pd.DataFrame, query_data:dict, map_mz_range:float = 1, map_rt_range:float = 0.5, map_mob_range:float = 0.3, map_n_neighbors:int=5, search_unidentified:bool = False, **kwargs)->pd.DataFrame:
"""Map MS1 features to MS2 based on rt and mz.
If ccs is included also add.
Args:
feature_table (pd.DataFrame): Pandas DataFrame with features.
query_data (dict): Data structure containing the query data.
map_mz_range (float, optional): Mapping range for mz (Da). Defaults to 1.
map_rt_range (float, optional): Mapping range for rt (min). Defaults to 0.5.
map_mob_range (float, optional): Mapping range for mobility (%). Defaults to 0.3.
map_n_neighbors (int, optional): Maximum number of neighbors to be extracted. Defaults to 5.
search_unidentified (bool, optional): Flag to perform search on features that have no isotope pattern. Defaults to False.
Returns:
pd.DataFrame: Table with features.
"""
feature_table['rt'] = feature_table['rt_apex']
range_dict = {}
range_dict['mz'] = ('mono_mzs2', map_mz_range)
range_dict['rt'] = ('rt_list_ms2', map_rt_range)
range_dict['mobility'] = ('mobility', map_mob_range)
query_dict = {}
query_dict['rt'] = 'rt_list_ms2'
query_dict['mass'] = 'prec_mass_list2'
query_dict['mz'] = 'mono_mzs2'
query_dict['charge'] = 'charge2'
query_dict['mobility'] = 'mobility'
if 'mobility' not in feature_table.columns:
del range_dict['mobility']
del query_dict['mobility']
use_mob = False
else:
use_mob = True
tree_points = feature_table[list(range_dict.keys())].values
for i, key in enumerate(range_dict):
tree_points[:,i] = tree_points[:,i]/range_dict[key][1]
matching_tree = KDTree(tree_points, metric="minkowski")
ref_points = np.array([query_data[range_dict[_][0]] / range_dict[_][1] for _ in range_dict]).T
ref_points = replace_infs(ref_points)
dist, idx = matching_tree.query(ref_points, k=map_n_neighbors)
ref_matched = np.zeros(ref_points.shape[0], dtype=np.bool_)
all_df = []
for neighbor in range(map_n_neighbors):
ref_df = pd.DataFrame(np.array([query_data[query_dict[_]] for _ in query_dict]).T, columns = query_dict.keys())
for _ in query_dict:
ref_df[_+'_matched'] = feature_table.iloc[idx[:,neighbor]][_].values
ref_df[_+'_offset'] = ref_df[_+'_matched'] - ref_df[_]
ref_df['query_idx'] = ref_df.index
ref_df['feature_idx'] = idx[:,neighbor]
for field in ['int_sum','int_apex','rt_start','rt_apex','rt_end','fwhm','mobility_lower','mobility_upper']:
if field in feature_table.keys():
ref_df[field] = feature_table.iloc[idx[:,neighbor]][field].values
rt_check = (ref_df['rt_start'] <= ref_df['rt']) & (ref_df['rt'] <= ref_df['rt_end'])
# check isolation window (win=3)
mass_check = np.abs(ref_df['mz_offset'].values) <= 3
_check = rt_check & mass_check
if use_mob:
mob_check = (ref_df['mobility_lower'] <= ref_df['mobility']) & (ref_df['mobility'] <= ref_df['mobility_upper'])
_check &= mob_check
ref_matched |= _check
ref_df['dist'] = dist[:,neighbor]
ref_df = ref_df[_check]
all_df.append(ref_df)
if search_unidentified:
if use_mob:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2'], query_data['mobility']]).T, columns=['rt', 'mass', 'mz', 'charge','mobility'])
else:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2']]).T, columns=['rt', 'mass', 'mz', 'charge'])
unmatched_ref = unmatched_ref[~ref_matched]
unmatched_ref['mass_matched'] = unmatched_ref['mass']
unmatched_ref['mass_offset'] = 0
unmatched_ref['rt_matched'] = unmatched_ref['rt']
unmatched_ref['rt_offset'] = 0
unmatched_ref['mz_matched'] = unmatched_ref['mz']
unmatched_ref['mz_offset'] = 0
unmatched_ref['charge_matched'] = unmatched_ref['charge']
unmatched_ref['query_idx'] = unmatched_ref.index
unmatched_ref['feature_idx'] = np.nan
if use_mob:
ref_df['mobility_matched'] = unmatched_ref['mobility']
ref_df['mobility_offset'] = np.nan
for field in ['int_sum','int_apex','rt_start','rt_apex','rt_end','fwhm']:
if field in feature_table.keys():
unmatched_ref[field] = np.nan
unmatched_ref['dist'] = np.nan
all_df.append(unmatched_ref)
features = pd.concat(all_df)
features = features.sort_values('mass_matched', ascending=True)
features = features.reset_index(drop=True)
return features
#hide
from nbdev.showdoc import *
#hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_settings.ipynb.
Converted 01_chem.ipynb.
Converted 02_io.ipynb.
Converted 03_fasta.ipynb.
Converted 04_feature_finding.ipynb.
Converted 05_search.ipynb.
Converted 06_score.ipynb.
Converted 07_recalibration.ipynb.
Converted 08_quantification.ipynb.
Converted 09_matching.ipynb.
Converted 10_constants.ipynb.
Converted 11_interface.ipynb.
Converted 12_performance.ipynb.
Converted 13_export.ipynb.
Converted 14_display.ipynb.
Converted 15_label.ipynb.
Converted additional_code.ipynb.
Converted contributing.ipynb.
Converted file_formats.ipynb.
Converted index.ipynb.
###Markdown
Feature Finding> Functions related to feature finding This part describes the implementation of the feature-finding algorithm. The core of the algorithm is described in the [MaxQuant-Paper](https://www.nature.com/articles/nbt.1511).The supplementary material explains the underlying methodology in great detail and is the foundation of the theoretical background that is described here.A refined version of the algorithm was presented with [Dinosaur](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4933939/), which was also used as a reference for the Python implementation.For the algorithm, we need serval modules:1. Connecting Centroids to Hills2. Refinement of Hills3. Calculating Hill Statistics4. Combining Hills to Isotope Patterns5. Deconvolution of Isotope Patterns Loading DataFrom the `IO` library, we already have an `*.ms_data.hdf` container that contains centroided data. To use it in feature finding, we directly load the data. Connecting Centroids to Hills> Note: Feature finding relies heavily on the performance function decorator from the performance notebook: `@alphapept.performance.performance_function`. Part of this is that the functions will not have return values to be GPU compatible. Please check out this notebook for further information. Connecting centroidsFeature finding starts with connecting centroids. For this we look at subsequent scans and compare peaks that are withing a defined mass tolerance (`centroid_tol`). Imagine you have three scans with the following centroids:* Scan 0: 10, 20, 30* Scan 1: 10.2, 40.1* Scan 2: 40, 50, 60When comparing consecutive scans and defining the maximum delta mass to be 0.5 find the following connections: (Scan No, Centroid No) -> (Scan No, Centroid No). As we cannot easily store tuples in the matrix, we convert tuple containing the position of the connected centroid to an integer.* (0,0) -> (1,0) -> (3): 10 & 10.2 -> delta = 0.2* (1,1) -> (2,0) -> (6): 40.1 & 40 -> delta = 0.1Finally, we store this in the `results` matrix:$\begin{bmatrix}3 & -1 & -1 \\ -1 & 6 & -1\\ -1 & -1 & -1 \end{bmatrix}$The coressponding `scores` matrix will look as follows:$\begin{bmatrix}0.2 & -1 & -1 \\ -1 & 0.1 & -1\\ -1 & -1 & -1 \end{bmatrix}$This allows us to not only easily store connections between centroids but also perform a quick lookup for the delta of an existing connection. Note that it also only stores the best connection for each centroid. To extract the connected centroids, we can use `np.where(results >= 0)`. This implementation allows getting millions of connections within seconds. As we are also allowing gaps, refering to that we can have connections between Scan 0 and Scan 2, we make the aforementioned matrix multdimensional, so that e.g. a first matrix stores the conncetions for no gap, the second matrix the connections with a gap of 1.The functionality for this step is implemented in `connect_centroids_unidirection` and the wrapper `find_centroid_connections`.
###Code
#export
import numpy as np
import alphapept.performance
#This function is tested by being called from find_centroid_connections
@alphapept.performance.performance_function
def connect_centroids_unidirection(x:np.ndarray, row_borders:np.ndarray, connections:np.ndarray, scores:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Connect centroids.
Args:
x (np.ndarray): Index to datapoint. Note that this using the performance_function, so one passes an ndarray.
row_borders (np.ndarray): Row borders of the centroids array.
connections (np.ndarray): Connections matrix to store the connections
scores (np.ndarray): Score matrix to store the connections
centroids (np.ndarray): 1D Array containing the masses of the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
for gap in range(max_gap + 1):
y = x + gap + 1
if y >= row_borders.shape[0]:
return
start_index_f = 0
if x > 0:
start_index_f = row_borders[x - 1]
centroids_1 = centroids[start_index_f: row_borders[x]]
start_index_b = row_borders[y - 1]
centroids_2 = centroids[start_index_b: row_borders[y]]
i = 0
j = 0
while (i < len(centroids_1)) & (j < len(centroids_2)):
mz1, mz2 = centroids_1[i], centroids_2[j]
diff = mz1 - mz2
mz_sum = mz1 + mz2
delta = 2 * 1e6 * abs(diff) / mz_sum
if delta < centroid_tol:
if scores[x, i, gap] > delta:
scores[x, i, gap] = delta
connections[x, i, gap] = (connections.shape[1] * y) + j
if diff > 0:
j += 1
else:
i += 1
def find_centroid_connections(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Wrapper function to call connect_centroids_unidirection
Args:
rowwise_peaks (np.ndarray): Length of centroids with respect to the row borders.
row_borders (np.ndarray): Row borders of the centroids array.
centroids (np.ndarray): Array containing the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
max_centroids = int(cupy.max(rowwise_peaks))
spectra_cnt = len(row_borders) - 1
connections = cupy.full((spectra_cnt, max_centroids, max_gap + 1), -1, dtype=np.int32)
score = cupy.full((spectra_cnt, max_centroids, max_gap + 1), np.inf)
connect_centroids_unidirection(range(len(row_borders)),
row_borders,
connections,
score,
centroids,
max_gap,
centroid_tol)
score = score[cupy.where(score < np.inf)]
score_median = cupy.median(score)
score_std = cupy.std(score)
del score, max_centroids, spectra_cnt
c_shape = connections.shape
from_r, from_c, from_g = cupy.where(connections >= 0)
to_r = connections[from_r, from_c, from_g] // c_shape[1]
to_c = connections[from_r, from_c, from_g] - to_r * c_shape[1]
del connections, from_g
return from_r, from_c, to_r, to_c, score_median, score_std
#hide
def test_find_centroid_connections():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 40.1, 40, 50, 60])
centroid_tol = 0.5*1e6
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_r, np.array([0, 0, 1, 1])) #e.g. 0,0 is connected to 0,1 -> 10 to 10.2
assert np.allclose(from_c, np.array([0, 2, 1, 2]))
assert np.allclose(to_r, np.array([1, 1, 2, 2]))
assert np.allclose(to_c, np.array([0, 1, 0, 0]))
test_find_centroid_connections()
###Output
_____no_output_____
###Markdown
We wrap the centroid connections in the function `connect_centroids`. This function converts the connections into an usable array.
###Code
#export
#the performance functions are tested with the wrapper function connect_centroids
@alphapept.performance.performance_function
def convert_connections_to_array(x:np.ndarray, from_r:np.ndarray, from_c:np.ndarray, to_r:np.ndarray, to_c:np.ndarray, row_borders:np.ndarray, out_from_idx:np.ndarray, out_to_idx:np.ndarray):
"""Convert integer indices of a matrix to coordinates.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_r (np.ndarray): From array with row coordinates.
from_c (np.ndarray): From array with column coordinates.
to_r (np.ndarray): To array with row coordinates.
to_c (np.ndarray): To array with column coordinates.
row_borders (np.ndarray): Row borders (for indexing).
out_from_idx (np.ndarray): Reporting array: 1D index from.
out_to_idx (np.ndarray): Reporting array: 1D index to.
"""
row = from_r[x]
col = from_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_from_idx[x] = start_index_f + col
row = to_r[x]
col = to_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_to_idx[x] = start_index_f + col
@alphapept.performance.performance_function
def eliminate_overarching_vertex(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray):
"""Eliminate overacrhing vertex.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
"""
if x == 0:
return
if from_idx[x - 1] == from_idx[x]:
to_idx[x] = -1
def connect_centroids(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, float, float):
"""Function to connect centroids.
Args:
rowwise_peaks (np.ndarray): Indexes for centroids.
row_borders (np.ndarray): Row borders (for indexing).
centroids (np.ndarray): Centroid data.
max_gap: Maximum gap.
centroid_tol: Centroid tol for matching centroids.
Returns:
np.ndarray: From index.
np.ndarray: To index.
float: Median score.
float: Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks,
row_borders,
centroids,
max_gap,
centroid_tol)
from_idx = cupy.zeros(len(from_r), np.int32)
to_idx = cupy.zeros(len(from_r), np.int32)
convert_connections_to_array(range(len(from_r)),
from_r,
from_c,
to_r,
to_c,
row_borders,
from_idx,
to_idx)
eliminate_overarching_vertex(range(len(from_idx)), from_idx, to_idx)
relavent_idx = cupy.where(to_idx >= 0)
from_idx = cupy.take(from_idx, relavent_idx)[0]
to_idx = cupy.take(to_idx, relavent_idx)[0]
del from_r, from_c, to_r, to_c, relavent_idx
return from_idx, to_idx, score_median, score_std
#Sample snippet to show centroid conncetions
import matplotlib.pyplot as plt
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
scan_no = np.array([0, 0, 0, 1, 1, 2, 2, 2])
plt.figure(figsize=(5,5))
for i, _ in enumerate(row_borders):
ctrd = centroids[_-rowwise_peaks[i]:_]
plt.plot(ctrd, np.ones_like(ctrd)*i, 'o')
for i, _ in enumerate(from_idx):
from_ = _
to_ = to_idx[i]
plt.plot([centroids[from_], centroids[to_]], [scan_no[from_], scan_no[to_]], 'k:')
plt.ylabel('scan')
plt.xlabel('m/z')
plt.ylim(len(row_borders)+0.5, -1.5)
plt.title('Peak connections')
plt.show()
#hide
def test_connect_centroids():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_idx, np.array([0, 1, 2]))
assert np.allclose(to_idx, np.array([3, 4, 6]))
test_connect_centroids()
###Output
_____no_output_____
###Markdown
Extracting hills.To extract hills we extract connected components from the connections.
###Code
#export
@alphapept.performance.performance_function
def path_finder(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, forward:np.ndarray, backward:np.ndarray):
"""Extracts path information and writes to path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): Array containing from indices.
to_idx (np.ndarray): Array containing to indices.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
"""
fr = from_idx[x]
to = to_idx[x]
forward[fr] = to
backward[to] = fr
@alphapept.performance.performance_function
def find_path_start(x:np.ndarray, forward:np.ndarray, backward:np.ndarray, path_starts:np.ndarray):
"""Function to find the start of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
path_starts (np.ndarray): Array to report path starts.
"""
if forward[x] > -1 and backward[x] == -1:
path_starts[x] = 0
@alphapept.performance.performance_function
def find_path_length(x:np.ndarray, path_starts:np.ndarray, forward:np.ndarray, path_cnt:np.ndarray):
"""Function to extract the length of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forward (np.ndarray): Array that stores forward information.
path_cnt (np.ndarray): Reporting array to count the paths.
"""
ctr = 1
idx = path_starts[x]
while forward[idx] > -1:
ctr += 1
idx = forward[idx]
path_cnt[x] = ctr
@alphapept.performance.performance_function
def fill_path_matrix(x:np.ndarray, path_start:np.ndarray, forwards:np.ndarray, out_hill_data:np.ndarray, out_hill_ptr:np.ndarray):
"""Function to fill the path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forwards (np.ndarray): Forward array.
out_hill_data (np.ndarray): Array containing the indices to hills.
out_hill_ptr (np.ndarray): Array containing the bounds to out_hill_data.
"""
path_position = 0
idx = path_start[x]
while idx > -1:
out_hill_data[out_hill_ptr[x] + path_position] = idx
idx = forwards[idx]
path_position += 1
def get_hills(centroids:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, hill_length_min:int=3)-> (np.ndarray, np.ndarray, int):
"""Function to get hills from centroid connections.
Args:
centroids (np.ndarray): 1D Array containing the masses of the centroids.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
hill_length_min (int): Minimum hill length:
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
forward = cupy.full(centroids.shape[0], -1)
backward = cupy.full(centroids.shape[0], -1)
path_starts = cupy.full(centroids.shape[0], -1)
path_finder(range(len(from_idx)), from_idx, to_idx, forward, backward)
find_path_start(range(len(forward)), forward, backward, path_starts)
# path_starts will now container the first index of all connected centroids
path_starts = cupy.where(path_starts == 0)[0]
path_node_cnt = cupy.full(path_starts.shape[0], -1)
find_path_length(range(len(path_starts)), path_starts, forward, path_node_cnt)
relavant_path_node = cupy.where(path_node_cnt >= hill_length_min)[0]
path_starts = cupy.take(path_starts, relavant_path_node)
path_node_cnt = cupy.take(path_node_cnt, relavant_path_node)
del relavant_path_node
# Generate the hill matix indice ptr data
hill_ptrs = cupy.empty((path_starts.shape[0] + 1), dtype=cupy.int32)
hill_ptrs[0] = 0
hill_ptrs[1:] = path_node_cnt.cumsum()
hill_data = cupy.empty((int(hill_ptrs[-1])), np.int32)
fill_path_matrix(range(len(path_starts)), path_starts, forward, hill_data, hill_ptrs)
del from_idx, to_idx, path_starts, forward, backward
return hill_ptrs, hill_data, path_node_cnt
def extract_hills(query_data:dict, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, int, float, float):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
score_median (float): Median score.
score_std (float): Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
indices = cupy.array(query_data['indices_ms1'])
mass_data = cupy.array(query_data['mass_list_ms1'])
rowwise_peaks = indices[1:] - indices[:-1]
row_borders = indices[1:]
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, mass_data, max_gap, centroid_tol)
hill_ptrs, hill_data, path_node_cnt = get_hills(mass_data, from_idx, to_idx)
del mass_data
del indices
if cupy.__name__ != 'numpy':
hill_ptrs = hill_ptrs.get()
hill_data = hill_data.get()
path_node_cnt = path_node_cnt.get()
score_median = score_median.get()
score_std = score_std.get()
return hill_ptrs, hill_data, path_node_cnt, score_median, score_std
from numba import njit
@njit
def remove_duplicate_hills(hill_ptrs, hill_data, path_node_cnt):
"""
Removes hills that share datapoints. Starts from the largest hills.
"""
taken_points = np.zeros(hill_data.max()+1)
c = 0
current_idx = 0
hill_ptrs_new = np.zeros_like(hill_ptrs)
hill_data_new = np.zeros_like(hill_data)
for i, _ in enumerate(np.argsort(path_node_cnt)[::-1]):
s, e = hill_ptrs[_], hill_ptrs[_+1]
point_idx = hill_data[s:e]
hill_pts = taken_points[point_idx]
if hill_pts.sum() == 0:
hill_data_new[current_idx:current_idx+len(hill_pts)] = point_idx
current_idx += len(hill_pts)
hill_ptrs_new[c+1] = current_idx
c +=1
taken_points[point_idx] +=1
hill_data_new = hill_data_new[:current_idx]
hill_ptrs_new = hill_ptrs_new[:c]
return hill_ptrs_new, hill_data_new
###Output
_____no_output_____
###Markdown
Hill SplittingWhen having a hill with two or more maxima, we would like to split it at the minimum position. For this, we use a recursive approach. First, the minimum of a hill is detected. A hill is split at this minimum if the smaller of the surrounding maxima is at least the factor `hill_split_level` larger than the minimum. For each split, the process is repeated.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def fast_minima(y:np.ndarray)->np.ndarray:
"""Function to calculate the local minimas of an array.
Args:
y (np.ndarray): Input array.
Returns:
np.ndarray: Array containing minima positions.
"""
minima = np.zeros(len(y))
start = 0
end = len(y)
for i in range(start + 2, end - 2):
if ((y[i - 1] > y[i]) & (y[i + 1] > y[i])) \
or ((y[i - 1] > y[i]) & (y[i + 1] == y[i]) & (y[i + 2] > y[i])) \
or ((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] > y[i])) \
or (((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] == y[i]) & \
(y[i + 2] > y[i]))):
minima[i] = 1
minima = minima.nonzero()[0]
return minima
#hide
def test_fast_minima():
assert fast_minima(np.array([3,2,1,0,1,2,3])) == 3
assert fast_minima(np.array([4,3,2,1,0,1,2])) == 4
assert len(fast_minima(np.array([5,4,3,2,1,0,1]))) == 0
assert len(fast_minima(np.array([6,5,4,3,2,1,0]))) == 0
test_fast_minima()
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def split(k:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_data:np.ndarray, splits:np.ndarray, hill_split_level:float, window:int):
"""Function to split hills.
Args:
k (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_data (np.ndarray): Array containing the indices to hills.
splits (np.ndarray): Array containing splits.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
"""
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_trace = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.median(int_trace[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.mean(int_trace[min_index:max_index])
#minima = (np.diff(np.sign(np.diff(int_trace))) > 0).nonzero()[0] + 1 #This works also but is slower
minima = fast_minima(int_trace)
sorted_minima = np.argsort(int_trace[minima])
minima = minima[sorted_minima]
for min_ in minima:
minval = int_trace[min_]
left_max = max(int_trace[:min_])
right_max = max(int_trace[min_:])
min_max = min(left_max, right_max)
if (minval == 0) or ((min_max / minval) > hill_split_level):
splits[k] = start+min_
break # Split only once per iteration
def split_hills(hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, hill_split_level:float, window:int)->np.ndarray:
"""Wrapper function to split hills
Args:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
Returns:
np.ndarray: Array containing the bounds to the hill_data with splits.
"""
splits = np.zeros(len(int_data), dtype=np.int32)
to_check = np.arange(len(hill_ptrs)-1)
while len(to_check) > 0:
split(to_check, hill_ptrs, int_data, hill_data, splits, hill_split_level, window)
splitpoints = splits.nonzero()[0]
to_check = np.zeros(len(hill_ptrs))
to_check[splitpoints] = 1
to_check = np.insert(to_check, splitpoints+1, np.ones(len(splitpoints))).nonzero()[0] #array, index, what
hill_ptrs = np.insert(hill_ptrs, splitpoints+1, splits[splitpoints]) #array, index, what
splits = np.zeros(len(hill_ptrs), dtype=np.int32) #was cupy np.int32
return hill_ptrs
###Output
_____no_output_____
###Markdown
Filter HillsTo filter hills, we define a minimum length `hill_min_length`. All peaks below the threshold `hill_peak_min_length` are accepted as is. For longer hills, the intensity at the start and the end are compared to the maximum intensity. If the ratio of the maximum raw intensity to the smoothed intensity and the beginning and end are larger than `hill_peak_factor` the hills are accepted.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def check_large_hills(idx:np.ndarray, large_peaks:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, to_remove:np.ndarray, large_peak:int = 40, hill_peak_factor:float = 2, window:int=1):
"""Function to check large hills and flag them for removal.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
large_peaks (np.ndarray): Array containing large peaks.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
to_remove (np.ndarray): Array with indexes to remove.
large_peak (int, optional): Length criterion when a peak is large. Defaults to 40.
hill_peak_factor (float, optional): Hill maximum criterion. Defaults to 2.
window (int, optional): Smoothing window.. Defaults to 1.
"""
k = large_peaks[idx]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_smooth_ = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.median(int_smooth_[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.mean(int_smooth_[min_index:max_index])
int_ = int_data[int_idx]
max_ = np.max(int_)
if (max_ / int_smooth_[0] > hill_peak_factor) & (max_ / int_smooth_[-1] > hill_peak_factor):
to_remove[idx] = 0
def filter_hills(hill_data:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_check_large:int =40, window:int = 1) -> (np.ndarray, np.ndarray):
"""Filters large hills.
Args:
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_check_large (int, optional): Length criterion when a hill is considered large.. Defaults to 40.
window (int, optional): Smoothing window. Defaults to 1.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
"""
large_peaks = np.where(np.diff(hill_ptrs)>=hill_check_large)[0]
to_remove = np.ones(len(large_peaks), dtype=np.int32)
check_large_hills(range(len(large_peaks)), large_peaks, hill_ptrs, hill_data, int_data, to_remove, window)
idx_ = np.ones(len(hill_data), dtype = np.int32)
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
to_remove = to_remove.nonzero()[0]
for _ in to_remove:
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_
###Output
_____no_output_____
###Markdown
Since the mass estimate min the equation above is more complicated than just an average of the mj, a standard deviation based estimate of the error would not be appropriate. Therefore we calculate the error as a bootstrap2 estimate over B=150 bootstrap replications Calculating Hill StatisticsNext, we calculate summary statistics for the connected centroids. We can obtain a high precision mass estimate for each hill by taking the average of the the masses and weighting this by their intensiteis:$$\overline{m} = \frac{\sum_{j=1}^nm_jI_j}{\sum_{j=1}^nI_j}$$To estimate the mass error, we calculate the error as a boostrap estimate: $$\Delta \overline{m} = \sqrt{\frac{\sum_{b=1}^{B}(\overline{m}_b - \overline{m} )}{(B-1)}}$$The calculation of hill statistics for a single hill is implemented in `get_hill_stats`. To calculate the hill stats for a list of hills, we can call the wrapper `get_hill_data`.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def hill_stats(idx:np.ndarray, hill_range:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, mass_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, stats:np.ndarray, hill_nboot_max:int, hill_nboot:int):
"""Function to calculate hill stats.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_range (np.ndarray): Hill range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
mass_data (np.ndarray): Array containing mass data.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
"""
np.random.seed(42)
start = hill_ptrs[idx]
end = hill_ptrs[idx + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
mz_ = mass_data[idx_]
ms1_int_sum = np.sum(int_)
ms1_int_area = np.abs(np.trapz(rt_[rt_idx[idx_]], int_)) #Area
rt_min = rt_[rt_idx[idx_]].min()
rt_max = rt_[rt_idx[idx_]].max()
if len(idx_) > hill_nboot_max:
bootsize = hill_nboot_max
else:
bootsize = len(idx_)
averages = np.zeros(hill_nboot)
average = 0
for i in range(hill_nboot):
boot = np.random.choice(len(int_), bootsize, replace=True)
boot_mz = np.sum((mz_[boot] * int_[boot])) / np.sum(int_[boot])
averages[i] = boot_mz
average += boot_mz
average_mz = average/hill_nboot
delta = 0
for i in range(hill_nboot):
delta += (average_mz - averages[i]) ** 2 #maybe easier?
delta_m = np.sqrt(delta / (hill_nboot - 1))
stats[idx,0] = average_mz
stats[idx,1] = delta_m
stats[idx,2] = ms1_int_sum
stats[idx,3] = ms1_int_area
stats[idx,4] = rt_min
stats[idx,5] = rt_max
def remove_duplicates(stats:np.ndarray, hill_data:np.ndarray, hill_ptrs:np.ndarray)-> (np.ndarray, np.ndarray, np.ndarray):
"""Remove duplicate hills.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
np.ndarray: Filtered hill stats.
"""
dups = pd.DataFrame(stats).duplicated() #all duplicated hills
idx_ = np.ones(len(hill_data), dtype = np.int32) #keep all
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
for _ in np.arange(len(stats))[dups]: #duplicates will be assigned zeros
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_, stats[~dups]
def get_hill_data(query_data:dict, hill_ptrs:np.ndarray, hill_data:np.ndarray, hill_nboot_max:int = 300, hill_nboot:int = 150) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to get the hill data.
Args:
query_data (dict): Data structure containing the query data.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
Returns:
np.ndarray: Hill stats.
np.ndarray: Sortindex.
np.ndarray: Upper index.
np.ndarray: Scan index.
np.ndarray: Hill data.
np.ndarray: Hill points.
"""
indices_ = np.array(query_data['indices_ms1'])
rt_ = np.array(query_data['rt_list_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
scan_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
stats = np.zeros((len(hill_ptrs)-1, 6)) #mz, delta, rt_min, rt_max, sum_max
hill_stats(range(len(hill_ptrs)-1), np.arange(len(hill_ptrs)-1), hill_ptrs, hill_data, int_data, mass_data, rt_, scan_idx, stats, hill_nboot_max, hill_nboot)
# sort the stats
sortindex = np.argsort(stats[:,4]) #Sorted by rt_min
stats = stats[sortindex,:]
idxs_upper = stats[:,4].searchsorted(stats[:,5], side="right")
sortindex_ = np.arange(len(sortindex))[sortindex]
return stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs
###Output
_____no_output_____
###Markdown
Combining Hills to Isotope PatternsAfter obtaining summary statistics of hills, the next step is to check whether they belong together to form an isotope pattern. For this, we check wheter it is possible that they are neighbors in an isotope pattern, e.g. one having a 12C atom that has been replaced by a 13C version. The detailed criterion for the check is implemented in `check_isotope_pattern` and is as follows:$$\left | \Delta m-\frac{\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m_{1}}^{2} +\Delta {m_{2}}^{2}}$$The left side contains $\Delta m$, being the delta of the precise mass estimates from the summary statistics and $\Delta M = 1.00286864$, which is the mass difference ebtween the 13C peak and the monoisotopic peak in an averagine molecule of 1500 Da mass divided by the charge $z$.The right side contains $\Delta S = 0.0109135$, which is the maximum shift that a sulphur atom can cause ($\Delta S = 2m(^{13}C) - 2m(^{12}C) - m(^{34}S) + m(^{32}S)$) and $\Delta {m_{1}}$ and $\Delta {m_{2}}$, which are the bootstrapped mass standard deviations.
###Code
#export
from alphapept.constants import mass_dict
DELTA_M = mass_dict['delta_M']
DELTA_S = mass_dict['delta_S']
maximum_offset = DELTA_M + DELTA_S
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, iso_mass_range:int = 5)-> bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
iso_mass_range (int, optional): Mass range. Defaults to 5.
Returns:
bool: Flag to see if pattern belongs to the same pattern.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
delta_mass = np.abs(mass1 - mass2)
left_side = np.abs(delta_mass - DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
#hide
def test_check_isotope_pattern():
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == True
mass2, delta_mass2 = 102.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == False
test_check_isotope_pattern()
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
mass2, delta_mass2 = 102.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
###Output
True
False
###Markdown
Cosine Correlation of two hills An additional criterion that is being checked is that the intensity profiles have sufficient overalp in retention time. This is validated by ensuring that two hills have a cosine correlation of at least 0.6.$$\frac{\sum_{s=s_{min}}^{s_{max}}I_sJ_s}{\sum_{s=s_{min}}^{s_{max}}I_s^{2} \sum_{s=s_{min}}^{s_{max}}J_s^{2}} \geq 0.6$$The intensities of two hills are only compared if both have an intensity value in a particular scan. Otherwise, the intensity is set to zero. Additionally, an overlap of at least three elements is required.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def correlate(scans_:np.ndarray, scans_2:np.ndarray, int_:np.ndarray, int_2:np.ndarray)->float:
"""Correlate two scans.
Args:
scans_ (np.ndarray): Masses of the first scan.
scans_2 (np.ndarray): Masses of the second scan.
int_ (np.ndarray): Intensity of the first scan.
int_2 (np.ndarray): Intensity of the second scan.
Returns:
float: Correlation.
"""
min_one, max_one = scans_[0], scans_[-1]
min_two, max_two = scans_2[0], scans_2[-1]
if min_one + 3 > max_two: # at least an overlap of 3 elements
corr = 0
elif min_two + 3 > max_one:
corr = 0
else:
min_s = min(min_one, min_two)
max_s = max(max_one, max_two)
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[scans_ - min_s] = int_
int_two_scaled[scans_2 - min_s] = int_2
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
###Output
_____no_output_____
###Markdown
Extracting pre-Isotope PatternsNow having two criteria to check whether hills could, in principle, belong together, we define the wrapper functions `extract_edge` and `get_edges` to extract the connected hills. To minimize the number of comparisons we need to perform, we only compare the hills that overlap in time (i.e., the start of one hill `rt_min` needs to be before the end of the other hill `rt_max`) and are less than the sum of $\Delta M$ and $\Delta S$ apart. To extract all hills that belong together, we again rely on the `NetworkX`-package to extract the connected components.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list:
"""Extract edges.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparing.
runner (int): Index.
max_index (int): Unused.
maximum_offset (float): Maximum offset when comparing edges.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
Returns:
list: List of edges.
"""
edges = []
mass1 = stats[runner, 0]
delta_mass1 = stats[runner, 1]
for j in range(runner+1, idxs_upper[runner]):
mass2 = stats[j, 0]
if np.abs(mass2 - mass1) <= maximum_offset:
delta_mass2 = stats[j, 1]
for charge in range(iso_charge_min, iso_charge_max + 1):
if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range):
edges.append((runner, j))
break
return edges
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def edge_correlation(idx:np.ndarray, to_keep:np.ndarray, sortindex_:np.ndarray, pre_edges:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float):
"""Correlates two edges and flag them it they should be kept.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
to_keep (np.ndarray): Array with indices which edges should be kept.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
pre_edges (np.ndarray): Array with pre edges.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
"""
edge = pre_edges[idx,:]
y = sortindex_[edge[0]]
start = hill_ptrs[y]
end = hill_ptrs[y + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
con = sortindex_[edge[1]]
start = hill_ptrs[con]
end = hill_ptrs[con + 1]
idx_2 = hill_data[start:end]
int_2 = int_data[idx_2]
scans_2 = scan_idx[idx_2]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
to_keep[idx] = 1
#export
import networkx as nx
def get_pre_isotope_patterns(stats:np.ndarray, idxs_upper:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, maximum_offset:float, iso_charge_min:int=1, iso_charge_max:int=6, iso_mass_range:float=5, cc_cutoff:float=0.6)->list:
"""Function to extract pre isotope patterns.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparison.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
maximum_offset (float): Maximum offset when matching.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
cc_cutoff (float, optional): Correlation cutoff. Defaults to 0.6.
Returns:
list: List of pre isotope patterns.
"""
pre_edges = []
# Step 1
for runner in range(len(stats)):
pre_edges.extend(extract_edge(stats, idxs_upper, runner, idxs_upper[runner], maximum_offset, iso_charge_min, iso_charge_max, iso_mass_range))
to_keep = np.zeros(len(pre_edges), dtype='int')
pre_edges = np.array(pre_edges)
edge_correlation(range(len(to_keep)), to_keep, sortindex_, pre_edges, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
edges = pre_edges[to_keep.nonzero()]
G2 = nx.Graph()
for i in range(len(edges)):
G2.add_edge(edges[i][0], edges[i][1])
pre_isotope_patterns = [
sorted(list(c))
for c in sorted(nx.connected_components(G2), key=len, reverse=True)
]
return pre_isotope_patterns
###Output
_____no_output_____
###Markdown
Extracting Isotope PatternsThe extracted pre-isotope patterns may not be consistent because their pair-wise mass differences may not correspond to the same charge. To extract isotope patterns from pre-isotope patterns, we need to ensure that they are consistent for a single charge. To do this, we start with the 100 most intense peaks from a pre-isotope pattern to be used as a seed. For each seed and charge we then try to extract the longest consistent isotope pattern. To check wheter a hill is consistent with the seed we employ a modified checking criterion (`check_isotope_pattern_directed`) to be as follows:$$\left | m-m_j-\frac{j\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m}^{2} +\Delta {m_{j}}^{2}}$$Here $m$ is the mass of a seed peak, and $m_{j}$ refers to a peak relative to the seed. $j$ refers to the peaks to the left or right (negative or positive index) within the pattern. $j$ needs to run over consecutive values so that gaps are not allowed. Besides this consistency check, two hills are also checked to have a cosine correlation of at least 0.6.Programmatically, this is implemented in `grow_trail` and `grow`. These function uses a recursive approach that adds matching hills to the seed on the left and right side until no more hills can be added.
###Code
#export
from numba.typed import List
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern_directed(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, index:int, iso_mass_range:float)->bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
index (int): Index (unused).
iso_mass_range (float): Isotope mass ranges.
Returns:
bool: Flag if two isotope patterns belong together.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
left_side = np.abs(mass1 - mass2 - index * DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
@alphapept.performance.compile_function(compilation_mode="numba")
def grow(trail:List, seed:int, direction:int, relative_pos:int, index:int, stats:np.ndarray, pattern:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Grows isotope pattern based on a seed and direction.
Args:
trail (List): List of hills belonging to a pattern.
seed (int): Seed position.
direction (int): Direction in which to grow the trail
relative_pos (int): Relative position.
index (int): Index.
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: List of hills belonging to a pattern.
"""
x = pattern[seed] # This is the seed
mass1 = stats[x,0]
delta_mass1 = stats[x,1]
k = sortindex_[x]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
growing = True
while growing:
if direction == 1:
if seed + relative_pos == len(pattern):
growing = False
break
else:
if seed + relative_pos < 0:
growing = False
break
y = pattern[seed + relative_pos] # This is a reference peak
l = sortindex_[y]
mass2 = stats[y,0]
delta_mass2 = stats[y,1]
start = hill_ptrs[l]
end = hill_ptrs[l + 1]
idx_ = hill_data[start:end]
int_2 = int_data[idx_]
scans_2 = scan_idx[idx_]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
if check_isotope_pattern_directed(mass1, mass2, delta_mass1, delta_mass2, charge, -direction * index, iso_mass_range):
if direction == 1:
trail.append(y)
else:
trail.insert(0, y)
index += (
1
) # Greedy matching: Only one edge for a specific distance, will not affect the following matches
delta_mass = np.abs(mass1 - mass2)
if (delta_mass > (DELTA_M+DELTA_S) * index): # the pattern is sorted so there is a maximum to look back
break
relative_pos += direction
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def grow_trail(seed:int, pattern:np.ndarray, stats:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to grow an isotope pattern to the left and right side.
Args:
seed (int): Seed position.
pattern (np.ndarray): Isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Isotope pattern.
"""
x = pattern[seed]
trail = List()
trail.append(x)
trail = grow(trail, seed, -1, -1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trail = grow(trail, seed, 1, 1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def get_trails(seed:int, pattern:np.ndarray, stats:np.ndarray, charge_range:List, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to extract trails for a given charge range.
Args:
seed (int): Seed index.
pattern (np.ndarray): Pre isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge_range (List): Charge range.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Trail of consistent hills.
"""
trails = []
for charge in charge_range:
trail = grow_trail(seed, pattern, stats, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trails.append(trail)
return trails
#export
def plot_pattern(pattern:np.ndarray, sorted_hills:np.ndarray, centroids:np.ndarray, hill_data:np.ndarray):
"""Helper function to plot a pattern.
Args:
pattern (np.ndarray): Pre isotope pattern.
sorted_hills (np.ndarray): Hills, sorted.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
hill_data (np.ndarray): Array containing the indices to hills.
"""
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,10))
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
mzs = []
rts = []
ints = []
for entry in pattern:
hill = sorted_hills[entry]
hill_data = np.array([centroids[_[0]][_[1]] for _ in hill], dtype=centroid_dtype)
int_profile = hill_data["int"]
ax1.plot(hill_data["rt"], hill_data["int"])
ax2.scatter(hill_data["rt"], hill_data["mz"], s = hill_data["int"]/5e5 )
ax1.set_title('Pattern')
ax1.set_xlabel('RT (min)')
ax1.set_ylabel('Intensity')
ax2.set_xlabel('RT (min)')
ax2.set_ylabel('m/z')
plt.show()
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def get_minpos(y:np.ndarray, iso_split_level:float)->List:
"""Function to get a list of minima in a trace.
A minimum is returned if the ratio of lower of the surrounding maxima to the minimum is larger than the splitting factor.
Args:
y (np.ndarray): Input array.
iso_split_level (float): Isotope split level.
Returns:
List: List with min positions.
"""
minima = get_local_minima(y)
minima_list = List()
for minpos in minima:
minval = y[minpos]
left_max = (y[:minpos]).max()
right_max = (y[minpos:]).max()
minimum_max = min(left_max, right_max)
if minimum_max / minval >= iso_split_level:
minima_list.append(minpos)
return minima_list
@alphapept.performance.compile_function(compilation_mode="numba")
def get_local_minima(y:np.ndarray)->List:
"""Function to return all local minima of a array
Args:
y (np.ndarray): Input array.
Returns:
List: List with indices to minima.
"""
minima = List()
for i in range(1, len(y) - 1):
if is_local_minima(y, i):
minima.append(i)
return minima
@alphapept.performance.compile_function(compilation_mode="numba")
def is_local_minima(y:np.ndarray, i:int)->bool:
"""Check if position is a local minima.
Args:
y (np.ndarray): Input array.
i (int): Position to check.
Returns:
bool: Flag if position is minima or not.
"""
return (y[i - 1] > y[i]) & (y[i + 1] > y[i])
@alphapept.performance.compile_function(compilation_mode="numba")
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array
#hide
def test_get_minpos():
"""
Generate an intensity profile with local minima
Check that the minima are found
"""
intensity_profile = np.ones(20) * 10
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
minima = get_minpos(intensity_profile, 2)
minima_list = [_ for _ in minima]
assert minima_list == minima_ref
test_get_minpos()
###Output
_____no_output_____
###Markdown
Isolating Isotope_patternsThe extraction of the longest consistent isotope pattern is implemented in `isolate_isotope_pattern`. Here, three additional checks for an isotope pattern are implemented. The first one is `truncate`. Here, one checks the seed position, whether it has a minimum to its left or right side. If a minimum is found, the isotope pattern is cut off at this position.The second one is a mass filter. If the seed has a mass of smaller than 1000, the intensity maximum is detected, and all smaller masses are discarded. This reflects the averagine distribution for small masses where no minimum on the left side can be found.The third one is `check_averagine` that relies on `pattern_to_mz` and `cosine_averagine`. It is used to ensure that the extracted isotope pattern has a cosine correlation of the averagine isotope pattern of the same mass of at least 0.6.After the longest consistent isotope pattern is found, the hills are removed from the pre-isotope pattern, and the process is repeated until no more isotope patterns can be extracted from the pre-isotope patterns.
###Code
#export
from alphapept.chem import mass_to_dist
from alphapept.constants import averagine_aa, isotopes, Isotope
from numba.typed import Dict
@alphapept.performance.compile_function(compilation_mode="numba")
def check_averagine(stats:np.ndarray, pattern:np.ndarray, charge:int, averagine_aa:Dict, isotopes:Dict)->float:
"""Function to compare a pattern to an averagine model.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
Returns:
float: Averagine correlation.
"""
masses, intensity = pattern_to_mz(stats, pattern, charge)
spec_one = np.floor(masses).astype(np.int64)
int_one = intensity
spec_two, int_two = mass_to_dist(np.min(masses), averagine_aa, isotopes) # maybe change to no rounded version
spec_two = np.floor(spec_two).astype(np.int64)
return cosine_averagine(int_one, int_two, spec_one, spec_two)
@alphapept.performance.compile_function(compilation_mode="numba")
def pattern_to_mz(stats:np.ndarray, pattern:np.ndarray, charge:int)-> (np.ndarray, np.ndarray):
"""Function to calculate masses and intensities from pattern for a given charge.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge of the pattern.
Returns:
np.ndarray: masses
np.ndarray: intensity
"""
mzs = np.zeros(len(pattern))
ints = np.zeros(len(pattern))
for i in range(len(pattern)):
entry = pattern[i]
mzs[i] = mz_to_mass(stats[entry,0], charge)
ints[i] = stats[entry,2]
sortindex = np.argsort(mzs)
masses = mzs[sortindex]
intensity = ints[sortindex]
return masses, intensity
@alphapept.performance.compile_function(compilation_mode="numba")
def cosine_averagine(int_one:np.ndarray, int_two:np.ndarray, spec_one:np.ndarray, spec_two:np.ndarray)-> float:
"""Calculate the cosine correlation of two hills.
Args:
int_one (np.ndarray): Intensity of the first hill.
int_two (np.ndarray): Intensity of the second hill.
spec_one (np.ndarray): Scan numbers of the first hill.
spec_two (np.ndarray): Scan numbers of the second hill.
Returns:
float: Cosine
"""
min_one, max_one = spec_one[0], spec_one[-1]
min_two, max_two = spec_two[0], spec_two[-1]
min_s = np.min(np.array([min_one, min_two]))
max_s = np.max(np.array([max_one, max_two]))
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[spec_one - min_s] = int_one
int_two_scaled[spec_two - min_s] = int_two
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
@alphapept.performance.compile_function(compilation_mode="numba")
def int_list_to_array(numba_list:List)->np.ndarray:
"""Numba compatbilte function to convert a numba list with integers to a numpy array
Args:
numba_list (List): Input numba-typed List.
Returns:
np.ndarray: Output numpy array.
"""
array = np.zeros(len(numba_list), dtype=np.int64)
for i in range(len(array)):
array[i] = numba_list[i]
return array
M_PROTON = mass_dict['Proton']
@alphapept.performance.compile_function(compilation_mode="numba")
def mz_to_mass(mz:float, charge:int)->float:
"""Function to calculate the mass from a mz value.
Args:
mz (float): M/z
charge (int): Charge.
Raises:
NotImplementedError: When a negative charge is used.
Returns:
float: mass
"""
if charge < 0:
raise NotImplementedError("Negative Charges not implemented.")
mass = mz * charge - charge * M_PROTON
return mass
#hide
if False:
def test_truncate():
"""
Generate an intensity profile with local minima
Check wheter the the profile is correctly truncated with respect to the seed
"""
array = np.arange(0, 20)
intensity_profile = np.ones(20) * 10
iso_split_level = 1.3
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
seedpos = 5
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([3, 4, 5, 6, 7]))
seedpos = 0
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([0, 1, 2, 3]))
seedpos = len(array)
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([17, 18, 19]))
test_truncate()
###Output
_____no_output_____
###Markdown
Isotope PatternsThe wrapper function `get_isotope_patterns` iterates over all pre_isotope_patterns.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def isolate_isotope_pattern(pre_pattern:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, iso_mass_range:float, charge_range:List, averagine_aa:Dict, isotopes:Dict, iso_n_seeds:int, cc_cutoff:float, iso_split_level:float)->(np.ndarray, int):
"""Isolate isotope patterns.
Args:
pre_pattern (np.ndarray): Pre isotope pattern.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
iso_mass_range (float): Mass range for checking isotope patterns.
charge_range (List): Charge range.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_n_seeds (int): Number of seeds.
cc_cutoff (float): Cutoff value for what is considered correlating.
iso_split_level (float): Split level when isotopes are split.
Returns:
np.ndarray: Array with the best pattern.
int: Charge of the best pattern.
"""
longest_trace = 0
champion_trace = None
champion_charge = 0
champion_intensity = 0
# Sort patterns by mass
sortindex = np.argsort(stats[pre_pattern][:,0]) #intensity
sorted_pattern = pre_pattern[sortindex]
massindex = np.argsort(stats[sorted_pattern][:,2])[::-1][:iso_n_seeds]
# Use all the elements in the pre_pattern as seed
for seed in massindex: # Loop through all seeds
seed_global = sorted_pattern[seed]
trails = get_trails(seed, sorted_pattern, stats, charge_range, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
for index, trail in enumerate(trails):
if len(trail) >= longest_trace: # Needs to be longer than the current champion
arr = int_list_to_array(trail)
intensity_profile = stats[arr][:,2]
seedpos = np.nonzero(arr==seed_global)[0][0]
# truncate around the seed...
arr = truncate(arr, intensity_profile, seedpos, iso_split_level)
intensity_profile = stats[arr][:,2]
# Remove lower masses:
# Take the index of the maximum and remove all masses on the left side
if charge_range[index] * stats[seed_global, 0] < 1000:
maxpos = np.argmax(intensity_profile)
arr = arr[maxpos:]
intensity_profile = stats[arr][:,2]
if (len(arr) > longest_trace) | ((len(arr) == longest_trace) & (intensity_profile.sum() > champion_intensity)):
# Averagine check
cc = check_averagine(stats, arr, charge_range[index], averagine_aa, isotopes)
if cc > 0.6:
# Update the champion
champion_trace = arr
champion_charge = charge_range[index]
longest_trace = len(arr)
champion_intensity = intensity_profile.sum()
return champion_trace, champion_charge
#hide
if False:
def test_get_isotope_patterns():
test_centroids = [
[
(300, 50, 1, 1),
(300.501, 40, 1, 1),
(301.003, 30, 1, 1),
(301.504, 20, 1, 1),
(302.006, 10, 1, 1),
],
[
(300, 50, 2, 2),
(300.501, 40, 2, 2),
(301.003, 30, 2, 2),
(301.504, 20, 2, 2),
(302.006, 10, 2, 2),
],
[
(300, 50, 3, 3),
(300.501, 40, 3, 3),
(301.003, 30, 3, 3),
(301.504, 20, 3, 3),
(302.006, 10, 3, 3),
],
[
(300, 50, 4, 4),
(300.501, 40, 4, 4),
(301.003, 30, 4, 4),
(301.504, 20, 4, 4),
(302.006, 10, 4, 4),
],
[
(300, 50, 5, 5),
(300.501, 40, 5, 5),
(301.003, 30, 5, 5),
(301.504, 20, 5, 5),
(302.006, 10, 5, 5),
],
[(400, 10, 6, 6), (401, 10, 6, 6), (402, 10, 6, 6)],
[(400, 10, 7, 7), (401, 10, 7, 7), (402, 10, 7, 7)],
[(400, 10, 8, 8), (401, 10, 8, 8), (402, 10, 8, 8)],
[(400, 10, 9, 9), (401, 10, 9, 9), (402, 10, 9, 9)],
]
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
test_centroids_tmp = [np.array(_, dtype=centroid_dtype) for _ in test_centroids]
test_centroids = List([_ for _ in test_centroids_tmp])
test_hills = get_hills(test_centroids)
sorted_hills, stats, data, hill_data, hill_ptrs = get_hill_data(test_hills, test_centroids)
pre_patterns = get_edges(stats, data)
isotope_patterns, isotope_charges = get_isotope_patterns(pre_patterns, stats, data, averagine_aa, isotopes)
assert np.all(isotope_patterns[0] == np.array([0, 1, 2, 3, 4]))
assert isotope_charges[0] == 2
assert np.all(isotope_patterns[1] == np.array([5,6,7]))
assert isotope_charges[1] == 1
test_get_isotope_patterns()
#export
from numba.typed import List
from typing import Callable, Union
def get_isotope_patterns(pre_isotope_patterns:list, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, averagine_aa:Dict, isotopes:Dict, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:float = 5, iso_n_seeds:int = 100, cc_cutoff:float=0.6, iso_split_level:float = 1.3, callback:Union[Callable, None]=None) -> (np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to iterate over pre_isotope_patterns.
Args:
pre_isotope_patterns (list): List of pre-isotope patterns.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
iso_n_seeds (int, optional): Number of isotope seeds. Defaults to 100.
cc_cutoff (float, optional): Cuttoff for correlation.. Defaults to 0.6.
iso_split_level (float, optional): Isotope split level.. Defaults to 1.3.
callback (Union[Callable, None], optional): Callback function for progress. Defaults to None.
Returns:
list: List of isotope patterns.
np.ndarray: Iso idx.
np.ndarray: Array containing isotope charges.
"""
isotope_patterns = []
isotope_charges = []
charge_range = List()
for i in range(iso_charge_min, iso_charge_max + 1):
charge_range.append(i)
isotope_patterns = []
isotope_charges = []
for idx, pre_pattern in enumerate(pre_isotope_patterns):
extract = True
while extract:
isotope_pattern, isotope_charge = isolate_isotope_pattern(np.array(pre_pattern), hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, iso_mass_range, charge_range, averagine_aa, isotopes, iso_n_seeds, cc_cutoff, iso_split_level)
if isotope_pattern is None:
length = 0
else:
length = len(isotope_pattern)
if length > 1:
isotope_charges.append(isotope_charge)
isotope_patterns.append(isotope_pattern)
pre_pattern = [_ for _ in pre_pattern if _ not in isotope_pattern]
if len(pre_pattern) <= 1:
extract = False
else:
extract = False
if callback:
callback((idx+1)/len(pre_isotope_patterns))
iso_patterns = np.zeros(sum([len(_) for _ in isotope_patterns]), dtype=np.int64)
iso_idx = np.zeros(len(isotope_patterns)+1, dtype='int')
start = 0
for idx, _ in enumerate(isotope_patterns):
iso_patterns[start:start+len(_)] = _
start += len(_)
iso_idx[idx+1] = start
return iso_patterns, iso_idx, np.array(isotope_charges)
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def report_(idx:np.ndarray, isotope_charges:list, isotope_patterns:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, results:np.ndarray, lookup_idx:np.ndarray):
"""Function to extract summary statstics from a list of isotope patterns and charges.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
results (np.ndarray): Recordarray with isotope pattern summary statistics.
lookup_idx (np.ndarray): Lookup array for each centroid.
"""
pattern = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
isotope_data = stats[pattern]
mz = np.min(isotope_data[:, 0])
mz_std = np.mean(isotope_data[:, 1])
charge = isotope_charges[idx]
mass = mz_to_mass(mz, charge)
int_max_idx = np.argmax(isotope_data[:, 2])
mz_most_abundant = isotope_data[:, 0][int_max_idx]
int_max = isotope_data[:,2][int_max_idx]
rt_start = isotope_data[int_max_idx, 4] # This is the start of the most abundant trace
rt_end = isotope_data[int_max_idx, 5]
# better measurement of the peak with interpolation
rt_min_ = min(isotope_data[:, 4])
rt_max_ = max(isotope_data[:, 5])
rt_range = np.linspace(rt_min_, rt_max_, 100)
trace_sum = np.zeros_like(rt_range)
for i, k in enumerate(pattern):
x = sortindex_[k]
start = hill_ptrs[x]
end = hill_ptrs[x + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
rts = rt_[rt_idx[idx_]]
lookup_idx[idx_, 0] = idx
lookup_idx[idx_, 1] = i
interpolation = np.interp(rt_range, rts, int_)
#Filter
interpolation[:(rt_range < rts[0]).sum()] = 0
right_cut = (rt_range > rts[-1]).sum()
if right_cut > 0:
interpolation[-right_cut:]= 0
trace_sum += interpolation
rt_apex_idx = trace_sum.argmax()
rt_apex = rt_range[rt_apex_idx]
trace = trace_sum
half_max = trace.max()/2
if rt_apex_idx == 0:
left_apex = 0
else:
left_apex = np.abs(trace[:rt_apex_idx]-half_max).argmin()
right_apex = np.abs(trace[rt_apex_idx:]-half_max).argmin()+rt_apex_idx
ms1_int_apex = trace_sum[rt_apex_idx]
fwhm = rt_range[right_apex] - rt_range[left_apex]
n_isotopes = len(pattern)
rt_cutoff = 0.95 #5%
if rt_apex_idx == 0:
rt_min_idx = 0
else:
rt_min_idx = np.abs(trace[:rt_apex_idx]-trace.max()*(1-rt_cutoff)).argmin()
rt_max_idx = np.abs(trace[rt_apex_idx:]-trace.max()*(1-rt_cutoff)).argmin()+rt_apex_idx
#plt.xlabel('rt')
#plt.ylabel('int')
#plt.show()
#plt.plot(rt_range, trace_sum)
#plt.plot([rt_range[left_apex], rt_range[right_apex]], [(trace[left_apex] + trace[right_apex])/2]*2, 'k:')
#plt.plot(rt_range[rt_apex_idx], trace[rt_apex_idx], 'k*')
#plt.plot(rt_range[rt_min_idx], trace[rt_min_idx], 'k*')
#plt.plot(rt_range[rt_max_idx], trace[rt_max_idx], 'k*')
#plt.show()
rt_start = rt_range[rt_min_idx]
rt_end = rt_range[rt_max_idx]
ms1_int_area = np.abs(np.trapz(trace_sum[rt_min_idx:rt_max_idx], rt_range[rt_min_idx:rt_max_idx]))
ms1_int_sum = trace_sum.sum()
results[idx,:] = np.array([mz, mz_std, mz_most_abundant, charge, rt_start, rt_apex, rt_end, fwhm, n_isotopes, mass, ms1_int_apex, ms1_int_area, ms1_int_sum])
#export
import pandas as pd
def feature_finder_report(query_data:dict, isotope_patterns:list, isotope_charges:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray)->pd.DataFrame:
"""Creates a report dataframe with summary statistics of the found isotope patterns.
Args:
query_data (dict): Data structure containing the query data.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to the isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
Returns:
pd.DataFrame: DataFrame with isotope pattern summary statistics.
"""
rt_ = np.array(query_data['rt_list_ms1'])
indices_ = np.array(query_data['indices_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
rt_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
lookup_idx= np.zeros((len(mass_data),2), dtype=np.int)-1
int_data = np.array(query_data['int_list_ms1'])
results = np.zeros((len(isotope_charges), 13))
report_(range(len(isotope_charges)), isotope_charges, isotope_patterns, iso_idx, stats, sortindex_, hill_ptrs, hill_data, int_data, rt_, rt_idx, results, lookup_idx)
df = pd.DataFrame(results, columns = ['mz','mz_std','mz_most_abundant','charge','rt_start','rt_apex','rt_end','fwhm','n_isotopes','mass','ms1_int_apex','ms1_int_area', 'ms1_int_sum'])
df.sort_values(['rt_start','mz'])
return df, lookup_idx
###Output
_____no_output_____
###Markdown
Data OutputFor each feature that is found we extract summary statistics and put it in tabular form to be used as as pandas dataframe. PlottingFor quality control reasons we also employ a function to plot a feature in its local environment. External Feature FinderTo utilize the command-line Feature Finder from Bruker `4DFF-3.13` - `uff-cmdline2.exe`, we call it via a subprocess and wait until completion.
###Code
#export
import subprocess
import os
import platform
def extract_bruker(file:str, base_dir:str = "ext/bruker/FF", config:str = "proteomics_4d.config"):
"""Call Bruker Feautre Finder via subprocess.
Args:
file (str): Filename for feature finding.
base_dir (str, optional): Base dir where the feature finder is stored.. Defaults to "ext/bruker/FF".
config (str, optional): Config file for feature finder. Defaults to "proteomics_4d.config".
Raises:
NotImplementedError: Unsupported operating system.
FileNotFoundError: Feature finder not found.
FileNotFoundError: Config file not found.
FileNotFoundError: Feature file not found.
"""
feature_path = file + '/'+ os.path.split(file)[-1] + '.features'
base_dir = os.path.join(os.path.dirname(__file__), base_dir)
operating_system = platform.system()
if operating_system == 'Linux':
ff_dir = os.path.join(base_dir, 'linux64','uff-cmdline2')
logging.info('Using Linux FF')
elif operating_system == 'Windows':
ff_dir = os.path.join(base_dir, 'win64','uff-cmdline2.exe')
logging.info('Using Windows FF')
else:
raise NotImplementedError(f"System {operating_system} not supported.")
if os.path.exists(feature_path):
return feature_path
else:
if not os.path.isfile(ff_dir):
raise FileNotFoundError(f'Bruker feature finder cmd not found here {ff_dir}.')
config_path = base_dir + '/'+ config
if not os.path.isfile(config_path):
raise FileNotFoundError(f'Config file not found here {config_path}.')
if operating_system == 'Windows':
FF_parameters = [ff_dir,'--ff 4d',f'--readconfig "{config_path}"', f'--analysisDirectory "{file}"']
process = subprocess.Popen(' '.join(FF_parameters), stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, b''):
logtxt = line.decode('utf8')
logging.info(logtxt[48:].rstrip()) #Remove logging info from FF
elif operating_system == 'Linux':
FF_parameters = [
ff_dir,
'--ff',
'4d',
'--readconfig',
config_path,
'--analysisDirectory',
file
]
process = subprocess.run(FF_parameters, stdout=subprocess.PIPE)
if os.path.exists(feature_path):
return feature_path
else:
raise FileNotFoundError(f"Feature file {feature_path} does not exist.")
import sqlalchemy as db
def convert_bruker(feature_path:str)->pd.DataFrame:
"""Reads feature table and converts to feature table to be used with AlphaPept.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
feature_table = pd.read_sql_table('LcTimsMsFeature', engine_featurefile)
feature_cluster_mapping = pd.read_sql_table('FeatureClusterMapping', engine_featurefile)
from alphapept.constants import mass_dict
M_PROTON = mass_dict['Proton']
feature_table['Mass'] = feature_table['MZ'].values * feature_table['Charge'].values - feature_table['Charge'].values*M_PROTON
feature_table = feature_table.rename(columns={"MZ": "mz","Mass": "mass", "RT": "rt_apex", "RT_lower":"rt_start", "RT_upper":"rt_end", "Mobility": "mobility", "Mobility_lower": "mobility_lower", "Mobility_upper": "mobility_upper", "Charge":"charge","Intensity":'ms1_int_sum',"ClusterCount":'n_isotopes'})
feature_table['rt_apex'] = feature_table['rt_apex']/60
feature_table['rt_start'] = feature_table['rt_start']/60
feature_table['rt_end'] = feature_table['rt_end']/60
feature_cluster_mapping = feature_cluster_mapping.rename(columns={"FeatureId": "feature_id", "ClusterId": "cluster_id", "Monoisotopic": "monoisotopic", "Intensity": "ms1_int_sum"})
return feature_table, feature_cluster_mapping
def map_bruker(feature_path:str, feature_table:pd.DataFrame, query_data:dict)->pd.DataFrame:
"""Map Ms1 to Ms2 via Table FeaturePrecursorMapping from Bruker FF.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
feature_table (pd.DataFrame): Pandas DataFrame containing the features.
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
mapping = pd.read_sql_table('FeaturePrecursorMapping', engine_featurefile)
mapping = mapping.set_index('PrecursorId')
feature_table= feature_table.set_index('Id')
query_prec_id = query_data['prec_id']
#Now look up the feature for each precursor
mass_matched = []
mz_matched = []
rt_matched = []
query_idx = []
f_idx = []
for idx, prec_id in tqdm(enumerate(query_prec_id)):
try:
f_id = mapping.loc[prec_id]['FeatureId']
all_matches = feature_table.loc[f_id]
if type(f_id) == np.int64:
match = all_matches
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
else:
for k in range(len(all_matches)):
match = all_matches.iloc[k]
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
except KeyError:
pass
features = pd.DataFrame(np.array([mass_matched, mz_matched, rt_matched, query_idx, f_idx]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched', 'query_idx', 'feature_idx'])
features['query_idx'] = features['query_idx'].astype('int')
return features
###Output
_____no_output_____
###Markdown
Isotope Export
###Code
#export
def get_stats(isotope_patterns, iso_idx, stats):
columns = ['mz_average','delta_m','int_sum','int_area','rt_min','rt_max']
stats_idx = np.zeros(iso_idx[-1], dtype=np.int64)
stats_map = np.zeros(iso_idx[-1], dtype=np.int64)
start_ = 0
end_ = 0
for idx in range(len(iso_idx)-1):
k = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
end_ += len(k)
stats_idx[start_:end_] = k
stats_map[start_:end_] = idx
start_ = end_
k = pd.DataFrame(stats[stats_idx], columns=columns)
k['feature_id'] = stats_map
return k
###Output
_____no_output_____
###Markdown
Wrapper
###Code
#export
import numpy as np
import logging
import os
from alphapept.search import query_data_to_features
import alphapept.io
import functools
def find_features(to_process:tuple, callback:Union[Callable, None] = None, parallel:bool = False)-> Union[str, bool]:
"""Wrapper for feature finding.
Args:
to_process (tuple): to_process tuple, to be used from a proces spool.
callback (Union[Callable, None], optional): Optional callback function. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Currently unused. Defaults to False.
Raises:
NotImplementedError: Error if the file extension is not understood.
Returns:
Union[str, bool]: Returns true if function was sucessfull, otherwise the exception as string.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base, ext = os.path.splitext(file_name)
if ext.lower() == '.raw':
datatype='thermo'
elif ext.lower() == '.d':
datatype='bruker'
elif ext.lower() == '.mzml':
datatype='mzml'
else:
raise NotImplementedError('File extension {} not understood.'.format(ext))
out_file = f"{base}.ms_data.hdf"
skip = True
if os.path.isfile(out_file):
try:
alphapept.io.MS_Data_File(
out_file
).read(dataset_name="features")
logging.info(
'Found *.hdf with features for {}'.format(out_file)
)
except KeyError:
logging.info(
'No *.hdf file with features found for {}. Adding to feature finding list.'.format(out_file)
)
skip = False
if not skip:
ms_file = alphapept.io.MS_Data_File(out_file, is_read_only=False)
query_data = ms_file.read_DDA_query_data()
feature_cluster_mapping = pd.DataFrame()
if not settings['workflow']["find_features"]:
features = query_data_to_features(query_data)
else:
if datatype in ['thermo','mzml']:
from alphapept.constants import averagine_aa, isotopes
f_settings = settings['features']
max_gap = f_settings['max_gap']
centroid_tol = f_settings['centroid_tol']
hill_split_level = f_settings['hill_split_level']
iso_split_level = f_settings['iso_split_level']
#Cleanup if
int_data = np.array(query_data['int_list_ms1'])
window = f_settings['hill_smoothing']
hill_check_large = f_settings['hill_check_large']
iso_charge_min = f_settings['iso_charge_min']
iso_charge_max = f_settings['iso_charge_max']
iso_n_seeds = f_settings['iso_n_seeds']
hill_nboot_max = f_settings['hill_nboot_max']
hill_nboot = f_settings['hill_nboot']
iso_mass_range = f_settings['iso_mass_range']
iso_corr_min = f_settings['iso_corr_min']
logging.info('Feature finding on {}'.format(file_name))
logging.info(f'Hill extraction with centroid_tol {centroid_tol} and max_gap {max_gap}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, centroid_tol)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
logging.info(f'Repeating hill extraction with centroid_tol {score_median+score_std*3:.2f}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, score_median+score_std*3)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
hill_ptrs, hill_data = remove_duplicate_hills(hill_ptrs, hill_data, path_node_cnt)
logging.info(f'After duplicate removal of hills {len(hill_ptrs):,}')
hill_ptrs = split_hills(hill_ptrs, hill_data, int_data, hill_split_level=hill_split_level, window = window) #hill lenght is inthere already
logging.info(f'After split hill_ptrs {len(hill_ptrs):,}')
hill_data, hill_ptrs = filter_hills(hill_data, hill_ptrs, int_data, hill_check_large = hill_check_large, window=window)
logging.info(f'After filter hill_ptrs {len(hill_ptrs):,}')
stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs = get_hill_data(query_data, hill_ptrs, hill_data, hill_nboot_max = hill_nboot_max, hill_nboot = hill_nboot)
logging.info('Extracting hill stats complete')
pre_isotope_patterns = get_pre_isotope_patterns(stats, idxs_upper, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, maximum_offset, iso_charge_min=iso_charge_min, iso_charge_max=iso_charge_max, iso_mass_range=iso_mass_range, cc_cutoff=iso_corr_min)
logging.info('Found {:,} pre isotope patterns.'.format(len(pre_isotope_patterns)))
isotope_patterns, iso_idx, isotope_charges = get_isotope_patterns(pre_isotope_patterns, hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, averagine_aa, isotopes, iso_charge_min = iso_charge_min, iso_charge_max = iso_charge_max, iso_mass_range = iso_mass_range, iso_n_seeds = iso_n_seeds, cc_cutoff = iso_corr_min, iso_split_level=iso_split_level, callback=None)
logging.info('Extracted {:,} isotope patterns.'.format(len(isotope_charges)))
feature_table, lookup_idx = feature_finder_report(query_data, isotope_patterns, isotope_charges, iso_idx, stats, sortindex_, hill_ptrs, hill_data)
lookup_idx_df = pd.DataFrame(lookup_idx, columns = ['isotope_pattern', 'isotope_pattern_hill'])
ms_file.write(lookup_idx_df, dataset_name="feature_table_idx")
feature_cluster_mapping = get_stats(isotope_patterns, iso_idx, stats)
logging.info('Report complete.')
elif datatype == 'bruker':
logging.info('Feature finding on {}'.format(file_name))
feature_path = extract_bruker(file_name)
feature_table, feature_cluster_mapping = convert_bruker(feature_path)
logging.info('Bruker featurer finder complete. Extracted {:,} features.'.format(len(feature_table)))
# Calculate additional params
feature_table['rt_length'] = feature_table['rt_end'] - feature_table['rt_start']
feature_table['rt_right'] = feature_table['rt_end'] - feature_table['rt_apex']
feature_table['rt_left'] = feature_table['rt_apex'] - feature_table['rt_start']
feature_table['rt_tail'] = feature_table['rt_right'] / feature_table['rt_left']
logging.info('Matching features to query data.')
if 'mono_mzs2' not in query_data.keys():
logging.info('No MS2-data to match.')
features = pd.DataFrame()
else:
features = map_ms2(feature_table, query_data, **settings['features'])
ms_file.write(feature_cluster_mapping, dataset_name="feature_cluster_mapping")
logging.info('Saving feature table.')
ms_file.write(feature_table, dataset_name="feature_table")
logging.info('Feature table saved to {}'.format(out_file))
logging.info('Saving features.')
ms_file.write(features, dataset_name="features")
logging.info(f'Feature finding of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Feature finding of file {file_name} failed. Exception {e}')
return f"{e}" #Can't return exception object, cast as string
###Output
_____no_output_____
###Markdown
MappingMapping MS1 to MS2
###Code
#export
from sklearn.neighbors import KDTree
import pandas as pd
import numpy as np
def replace_infs(array:np.ndarray)->np.ndarray:
"""Replace nans and infs with 0
Args:
array (np.ndarray): Input array.
Returns:
np.ndarray: Output array without nans and infs.
"""
array[array == -np.inf] = 0
array[array == np.inf] = 0
array[np.isnan(array)] = 0
return array
def map_ms2(feature_table:pd.DataFrame, query_data:dict, map_mz_range:float = 1, map_rt_range:float = 0.5, map_mob_range:float = 0.3, map_n_neighbors:int=5, search_unidentified:bool = False, **kwargs)->pd.DataFrame:
"""Map MS1 features to MS2 based on rt and mz.
If ccs is included also add.
Args:
feature_table (pd.DataFrame): Pandas DataFrame with features.
query_data (dict): Data structure containing the query data.
map_mz_range (float, optional): Mapping range for mz (Da). Defaults to 1.
map_rt_range (float, optional): Mapping range for rt (min). Defaults to 0.5.
map_mob_range (float, optional): Mapping range for mobility (%). Defaults to 0.3.
map_n_neighbors (int, optional): Maximum number of neighbors to be extracted. Defaults to 5.
search_unidentified (bool, optional): Flag to perform search on features that have no isotope pattern. Defaults to False.
Returns:
pd.DataFrame: Table with features.
"""
feature_table['rt'] = feature_table['rt_apex']
range_dict = {}
range_dict['mz'] = ('mono_mzs2', map_mz_range)
range_dict['rt'] = ('rt_list_ms2', map_rt_range)
range_dict['mobility'] = ('mobility', map_mob_range)
query_dict = {}
query_dict['rt'] = 'rt_list_ms2'
query_dict['mass'] = 'prec_mass_list2'
query_dict['mz'] = 'mono_mzs2'
query_dict['charge'] = 'charge2'
query_dict['mobility'] = 'mobility'
if 'mobility' not in feature_table.columns:
del range_dict['mobility']
del query_dict['mobility']
use_mob = False
else:
use_mob = True
tree_points = feature_table[list(range_dict.keys())].values
for i, key in enumerate(range_dict):
tree_points[:,i] = tree_points[:,i]/range_dict[key][1]
matching_tree = KDTree(tree_points, metric="euclidean")
ref_points = np.array([query_data[range_dict[_][0]] / range_dict[_][1] for _ in range_dict]).T
ref_points = replace_infs(ref_points)
dist, idx = matching_tree.query(ref_points, k=map_n_neighbors)
ref_matched = np.zeros(ref_points.shape[0], dtype=np.bool_)
all_df = []
for neighbor in range(map_n_neighbors):
ref_df = pd.DataFrame(np.array([query_data[query_dict[_]] for _ in query_dict]).T, columns = query_dict.keys())
for _ in query_dict:
ref_df[_+'_matched'] = feature_table.iloc[idx[:,neighbor]][_].values
ref_df[_+'_offset'] = ref_df[_+'_matched'] - ref_df[_]
ref_df['query_idx'] = ref_df.index
ref_df['feature_idx'] = idx[:,neighbor]
for field in ['ms1_int_sum','ms1_int_apex','rt_start','rt_apex','rt_end','fwhm','mobility_lower','mobility_upper']:
if field in feature_table.keys():
ref_df[field] = feature_table.iloc[idx[:,neighbor]][field].values
rt_check = (ref_df['rt_start'] <= ref_df['rt']) & (ref_df['rt'] <= ref_df['rt_end'])
# check isolation window (win=3)
mass_check = np.abs(ref_df['mz_offset'].values) <= 3
_check = rt_check & mass_check
if use_mob:
mob_check = (ref_df['mobility_lower'] <= ref_df['mobility']) & (ref_df['mobility'] <= ref_df['mobility_upper'])
_check &= mob_check
ref_matched |= _check
ref_df['feature_dist'] = dist[:,neighbor]
ref_df = ref_df[_check]
all_df.append(ref_df)
if search_unidentified:
if use_mob:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2'], query_data['mobility']]).T, columns=['rt', 'mass', 'mz', 'charge','mobility'])
else:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2']]).T, columns=['rt', 'mass', 'mz', 'charge'])
unmatched_ref = unmatched_ref[~ref_matched]
unmatched_ref['mass_matched'] = unmatched_ref['mass']
unmatched_ref['mass_offset'] = 0
unmatched_ref['rt_matched'] = unmatched_ref['rt']
unmatched_ref['rt_offset'] = 0
unmatched_ref['mz_matched'] = unmatched_ref['mz']
unmatched_ref['mz_offset'] = 0
unmatched_ref['charge_matched'] = unmatched_ref['charge']
unmatched_ref['query_idx'] = unmatched_ref.index
unmatched_ref['feature_idx'] = np.nan
if use_mob:
ref_df['mobility_matched'] = unmatched_ref['mobility']
ref_df['mobility_offset'] = np.nan
for field in ['ms1_int_sum','ms1_int_apex','rt_start','rt_apex','rt_end','fwhm']:
if field in feature_table.keys():
unmatched_ref[field] = np.nan
unmatched_ref['feature_dist'] = np.nan
all_df.append(unmatched_ref)
features = pd.concat(all_df)
features = features.sort_values('mass_matched', ascending=True)
features = features.reset_index(drop=True)
return features
#hide
from nbdev.showdoc import *
#hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_settings.ipynb.
Converted 01_chem.ipynb.
Converted 02_io.ipynb.
Converted 03_fasta.ipynb.
Converted 04_feature_finding.ipynb.
Converted 05_search.ipynb.
Converted 06_score.ipynb.
Converted 07_recalibration.ipynb.
Converted 08_quantification.ipynb.
Converted 09_matching.ipynb.
Converted 10_constants.ipynb.
Converted 11_interface.ipynb.
Converted 12_performance.ipynb.
Converted 13_export.ipynb.
Converted 14_display.ipynb.
Converted 15_label.ipynb.
Converted additional_code.ipynb.
Converted contributing.ipynb.
Converted file_formats.ipynb.
Converted index.ipynb.
###Markdown
Feature Finding> Functions related to feature finding This part describes the implementation of the feature-finding algorithm. The core of the algorithm is described in the [MaxQuant-Paper](https://www.nature.com/articles/nbt.1511).The supplementary material explains the underlying methodology in great detail and is the foundation of the theoretical background that is described here.A refined version of the algorithm was presented with [Dinosaur](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4933939/), which was also used as a reference for the Python implementation.For the algorithm, we need serval modules:1. Connecting Centroids to Hills2. Refinement of Hills3. Calculating Hill Statistics4. Combining Hills to Isotope Patterns5. Deconvolution of Isotope Patterns Loading DataFrom the `IO` library, we already have an `*.ms_data.hdf` container that contains centroided data. To use it in feature finding, we directly load the data. Connecting Centroids to Hills> Note: Feature finding relies heavily on the performance function decorator from the performance notebook: `@alphapept.performance.performance_function`. Part of this is that the functions will not have return values to be GPU compatible. Please check out this notebook for further information. Connecting centroidsFeature finding starts with connecting centroids. For this we look at subsequent scans and compare peaks that are withing a defined mass tolerance (`centroid_tol`). Imagine you have three scans with the following centroids:* Scan 0: 10, 20, 30* Scan 1: 10.2, 40.1* Scan 2: 40, 50, 60When comparing consecutive scans and defining the maximum delta mass to be 0.5 find the following connections: (Scan No, Centroid No) -> (Scan No, Centroid No). As we cannot easily store tuples in the matrix, we convert tuple containing the position of the connected centroid to an integer.* (0,0) -> (1,0) -> (3): 10 & 10.2 -> delta = 0.2* (1,1) -> (2,0) -> (6): 40.1 & 40 -> delta = 0.1Finally, we store this in the `results` matrix:$\begin{bmatrix}3 & -1 & -1 \\ -1 & 6 & -1\\ -1 & -1 & -1 \end{bmatrix}$The coressponding `scores` matrix will look as follows:$\begin{bmatrix}0.2 & -1 & -1 \\ -1 & 0.1 & -1\\ -1 & -1 & -1 \end{bmatrix}$This allows us to not only easily store connections between centroids but also perform a quick lookup for the delta of an existing connection. Note that it also only stores the best connection for each centroid. To extract the connected centroids, we can use `np.where(results >= 0)`. This implementation allows getting millions of connections within seconds. As we are also allowing gaps, refering to that we can have connections between Scan 0 and Scan 2, we make the aforementioned matrix multdimensional, so that e.g. a first matrix stores the conncetions for no gap, the second matrix the connections with a gap of 1.The functionality for this step is implemented in `connect_centroids_unidirection` and the wrapper `find_centroid_connections`.
###Code
#export
import numpy as np
import alphapept.performance
#This function is tested by being called from find_centroid_connections
@alphapept.performance.performance_function
def connect_centroids_unidirection(x:np.ndarray, row_borders:np.ndarray, connections:np.ndarray, scores:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Connect centroids.
Args:
x (np.ndarray): Index to datapoint. Note that this using the performance_function, so one passes an ndarray.
row_borders (np.ndarray): Row borders of the centroids array.
connections (np.ndarray): Connections matrix to store the connections
scores (np.ndarray): Score matrix to store the connections
centroids (np.ndarray): 1D Array containing the masses of the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
for gap in range(max_gap + 1):
y = x + gap + 1
if y >= row_borders.shape[0]:
return
start_index_f = 0
if x > 0:
start_index_f = row_borders[x - 1]
centroids_1 = centroids[start_index_f: row_borders[x]]
start_index_b = row_borders[y - 1]
centroids_2 = centroids[start_index_b: row_borders[y]]
i = 0
j = 0
while (i < len(centroids_1)) & (j < len(centroids_2)):
mz1, mz2 = centroids_1[i], centroids_2[j]
diff = mz1 - mz2
mz_sum = mz1 + mz2
delta = 2 * 1e6 * abs(diff) / mz_sum
if delta < centroid_tol:
if scores[x, i, gap] > delta:
scores[x, i, gap] = delta
connections[x, i, gap] = (connections.shape[1] * y) + j
if diff > 0:
j += 1
else:
i += 1
def find_centroid_connections(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float):
"""Wrapper function to call connect_centroids_unidirection
Args:
rowwise_peaks (np.ndarray): Length of centroids with respect to the row borders.
row_borders (np.ndarray): Row borders of the centroids array.
centroids (np.ndarray): Array containing the centroids data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
max_centroids = int(cupy.max(rowwise_peaks))
spectra_cnt = len(row_borders) - 1
connections = cupy.full((spectra_cnt, max_centroids, max_gap + 1), -1, dtype=np.int32)
score = cupy.full((spectra_cnt, max_centroids, max_gap + 1), np.inf)
connect_centroids_unidirection(range(len(row_borders)),
row_borders,
connections,
score,
centroids,
max_gap,
centroid_tol)
score = score[cupy.where(score < np.inf)]
score_median = cupy.median(score)
score_std = cupy.std(score)
del score, max_centroids, spectra_cnt
c_shape = connections.shape
from_r, from_c, from_g = cupy.where(connections >= 0)
to_r = connections[from_r, from_c, from_g] // c_shape[1]
to_c = connections[from_r, from_c, from_g] - to_r * c_shape[1]
del connections, from_g
return from_r, from_c, to_r, to_c, score_median, score_std
#hide
def test_find_centroid_connections():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 40.1, 40, 50, 60])
centroid_tol = 0.5*1e6
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_r, np.array([0, 0, 1, 1])) #e.g. 0,0 is connected to 0,1 -> 10 to 10.2
assert np.allclose(from_c, np.array([0, 2, 1, 2]))
assert np.allclose(to_r, np.array([1, 1, 2, 2]))
assert np.allclose(to_c, np.array([0, 1, 0, 0]))
test_find_centroid_connections()
###Output
_____no_output_____
###Markdown
We wrap the centroid connections in the function `connect_centroids`. This function converts the connections into an usable array.
###Code
#export
#the performance functions are tested with the wrapper function connect_centroids
@alphapept.performance.performance_function
def convert_connections_to_array(x:np.ndarray, from_r:np.ndarray, from_c:np.ndarray, to_r:np.ndarray, to_c:np.ndarray, row_borders:np.ndarray, out_from_idx:np.ndarray, out_to_idx:np.ndarray):
"""Convert integer indices of a matrix to coordinates.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_r (np.ndarray): From array with row coordinates.
from_c (np.ndarray): From array with column coordinates.
to_r (np.ndarray): To array with row coordinates.
to_c (np.ndarray): To array with column coordinates.
row_borders (np.ndarray): Row borders (for indexing).
out_from_idx (np.ndarray): Reporting array: 1D index from.
out_to_idx (np.ndarray): Reporting array: 1D index to.
"""
row = from_r[x]
col = from_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_from_idx[x] = start_index_f + col
row = to_r[x]
col = to_c[x]
start_index_f = 0
if row > 0:
start_index_f = row_borders[row - 1]
out_to_idx[x] = start_index_f + col
@alphapept.performance.performance_function
def eliminate_overarching_vertex(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray):
"""Eliminate overacrhing vertex.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
"""
if x == 0:
return
if from_idx[x - 1] == from_idx[x]:
to_idx[x] = -1
def connect_centroids(rowwise_peaks:np.ndarray, row_borders:np.ndarray, centroids:np.ndarray, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, float, float):
"""Function to connect centroids.
Args:
rowwise_peaks (np.ndarray): Indexes for centroids.
row_borders (np.ndarray): Row borders (for indexing).
centroids (np.ndarray): Centroid data.
max_gap: Maximum gap.
centroid_tol: Centroid tol for matching centroids.
Returns:
np.ndarray: From index.
np.ndarray: To index.
float: Median score.
float: Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
from_r, from_c, to_r, to_c, score_median, score_std = find_centroid_connections(rowwise_peaks,
row_borders,
centroids,
max_gap,
centroid_tol)
from_idx = cupy.zeros(len(from_r), np.int32)
to_idx = cupy.zeros(len(from_r), np.int32)
convert_connections_to_array(range(len(from_r)),
from_r,
from_c,
to_r,
to_c,
row_borders,
from_idx,
to_idx)
eliminate_overarching_vertex(range(len(from_idx)), from_idx, to_idx)
relavent_idx = cupy.where(to_idx >= 0)
from_idx = cupy.take(from_idx, relavent_idx)[0]
to_idx = cupy.take(to_idx, relavent_idx)[0]
del from_r, from_c, to_r, to_c, relavent_idx
return from_idx, to_idx, score_median, score_std
#Sample snippet to show centroid conncetions
import matplotlib.pyplot as plt
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
scan_no = np.array([0, 0, 0, 1, 1, 2, 2, 2])
plt.figure(figsize=(5,5))
for i, _ in enumerate(row_borders):
ctrd = centroids[_-rowwise_peaks[i]:_]
plt.plot(ctrd, np.ones_like(ctrd)*i, 'o')
for i, _ in enumerate(from_idx):
from_ = _
to_ = to_idx[i]
plt.plot([centroids[from_], centroids[to_]], [scan_no[from_], scan_no[to_]], 'k:')
plt.ylabel('scan')
plt.xlabel('m/z')
plt.ylim(len(row_borders)+0.5, -1.5)
plt.title('Peak connections')
plt.show()
#hide
def test_connect_centroids():
row_borders = np.array([3, 6, 9])
rowwise_peaks = np.array([3, 3, 3])
max_gap = 2
score = np.full((3,3, max_gap), np.inf)
connections = np.full((3,3, max_gap), -1)
centroids = np.array([10, 20, 30, 10.2, 20, 10, 30, 40])
centroid_tol = 0.5*1e5
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, centroids, max_gap, centroid_tol)
assert np.allclose(from_idx, np.array([0, 1, 2]))
assert np.allclose(to_idx, np.array([3, 4, 6]))
test_connect_centroids()
###Output
_____no_output_____
###Markdown
Extracting hills.To extract hills we extract connected components from the connections.
###Code
#export
@alphapept.performance.performance_function
def path_finder(x:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, forward:np.ndarray, backward:np.ndarray):
"""Extracts path information and writes to path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
from_idx (np.ndarray): Array containing from indices.
to_idx (np.ndarray): Array containing to indices.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
"""
fr = from_idx[x]
to = to_idx[x]
forward[fr] = to
backward[to] = fr
@alphapept.performance.performance_function
def find_path_start(x:np.ndarray, forward:np.ndarray, backward:np.ndarray, path_starts:np.ndarray):
"""Function to find the start of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
forward (np.ndarray): Array to report forward connection.
backward (np.ndarray): Array to report backward connection.
path_starts (np.ndarray): Array to report path starts.
"""
if forward[x] > -1 and backward[x] == -1:
path_starts[x] = 0
@alphapept.performance.performance_function
def find_path_length(x:np.ndarray, path_starts:np.ndarray, forward:np.ndarray, path_cnt:np.ndarray):
"""Function to extract the length of a path.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forward (np.ndarray): Array that stores forward information.
path_cnt (np.ndarray): Reporting array to count the paths.
"""
ctr = 1
idx = path_starts[x]
while forward[idx] > -1:
ctr += 1
idx = forward[idx]
path_cnt[x] = ctr
@alphapept.performance.performance_function
def fill_path_matrix(x:np.ndarray, path_start:np.ndarray, forwards:np.ndarray, out_hill_data:np.ndarray, out_hill_ptr:np.ndarray):
"""Function to fill the path matrix.
Args:
x (np.ndarray): Input index. Note that we are using the performance function so this is a range.
path_starts (np.ndarray): Array that stores the starts of the paths.
forwards (np.ndarray): Forward array.
out_hill_data (np.ndarray): Array containing the indices to hills.
out_hill_ptr (np.ndarray): Array containing the bounds to out_hill_data.
"""
path_position = 0
idx = path_start[x]
while idx > -1:
out_hill_data[out_hill_ptr[x] + path_position] = idx
idx = forwards[idx]
path_position += 1
def get_hills(centroids:np.ndarray, from_idx:np.ndarray, to_idx:np.ndarray, hill_length_min:int=3)-> (np.ndarray, np.ndarray, int):
"""Function to get hills from centroid connections.
Args:
centroids (np.ndarray): 1D Array containing the masses of the centroids.
from_idx (np.ndarray): From index.
to_idx (np.ndarray): To index.
hill_length_min (int): Minimum hill length:
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
forward = cupy.full(centroids.shape[0], -1)
backward = cupy.full(centroids.shape[0], -1)
path_starts = cupy.full(centroids.shape[0], -1)
path_finder(range(len(from_idx)), from_idx, to_idx, forward, backward)
find_path_start(range(len(forward)), forward, backward, path_starts)
# path_starts will now container the first index of all connected centroids
path_starts = cupy.where(path_starts == 0)[0]
path_node_cnt = cupy.full(path_starts.shape[0], -1)
find_path_length(range(len(path_starts)), path_starts, forward, path_node_cnt)
relavant_path_node = cupy.where(path_node_cnt >= hill_length_min)[0]
path_starts = cupy.take(path_starts, relavant_path_node)
path_node_cnt = cupy.take(path_node_cnt, relavant_path_node)
del relavant_path_node
# Generate the hill matix indice ptr data
hill_ptrs = cupy.empty((path_starts.shape[0] + 1), dtype=cupy.int32)
hill_ptrs[0] = 0
hill_ptrs[1:] = path_node_cnt.cumsum()
hill_data = cupy.empty((int(hill_ptrs[-1])), np.int32)
fill_path_matrix(range(len(path_starts)), path_starts, forward, hill_data, hill_ptrs)
del from_idx, to_idx, path_starts, forward, backward
return hill_ptrs, hill_data, path_node_cnt
def extract_hills(query_data:dict, max_gap:int, centroid_tol:float)-> (np.ndarray, np.ndarray, int, float, float):
"""[summary]
Args:
query_data (dict): Data structure containing the query data.
max_gap (int): Maximum gap when connecting centroids.
centroid_tol (float): Centroid tolerance.
Returns:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
path_node_cnt (int): Number of elements in this path.
score_median (float): Median score.
score_std (float): Std deviation of the score.
"""
if alphapept.performance.COMPILATION_MODE == "cuda":
import cupy
cupy = cupy
else:
import numpy
cupy = numpy
indices = cupy.array(query_data['indices_ms1'])
mass_data = cupy.array(query_data['mass_list_ms1'])
rowwise_peaks = indices[1:] - indices[:-1]
row_borders = indices[1:]
from_idx, to_idx, score_median, score_std = connect_centroids(rowwise_peaks, row_borders, mass_data, max_gap, centroid_tol)
hill_ptrs, hill_data, path_node_cnt = get_hills(mass_data, from_idx, to_idx)
del mass_data
del indices
if cupy.__name__ != 'numpy':
hill_ptrs = hill_ptrs.get()
hill_data = hill_data.get()
path_node_cnt = path_node_cnt.get()
score_median = score_median.get()
score_std = score_std.get()
return hill_ptrs, hill_data, path_node_cnt, score_median, score_std
###Output
_____no_output_____
###Markdown
Hill SplittingWhen having a hill with two or more maxima, we would like to split it at the minimum position. For this, we use a recursive approach. First, the minimum of a hill is detected. A hill is split at this minimum if the smaller of the surrounding maxima is at least the factor `hill_split_level` larger than the minimum. For each split, the process is repeated.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def fast_minima(y:np.ndarray)->np.ndarray:
"""Function to calculate the local minimas of an array.
Args:
y (np.ndarray): Input array.
Returns:
np.ndarray: Array containing minima positions.
"""
minima = np.zeros(len(y))
start = 0
end = len(y)
for i in range(start + 2, end - 2):
if ((y[i - 1] > y[i]) & (y[i + 1] > y[i])) \
or ((y[i - 1] > y[i]) & (y[i + 1] == y[i]) & (y[i + 2] > y[i])) \
or ((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] > y[i])) \
or (((y[i - 2] > y[i]) & (y[i - 1] == y[i]) & (y[i + 1] == y[i]) & \
(y[i + 2] > y[i]))):
minima[i] = 1
minima = minima.nonzero()[0]
return minima
#hide
def test_fast_minima():
assert fast_minima(np.array([3,2,1,0,1,2,3])) == 3
assert fast_minima(np.array([4,3,2,1,0,1,2])) == 4
assert len(fast_minima(np.array([5,4,3,2,1,0,1]))) == 0
assert len(fast_minima(np.array([6,5,4,3,2,1,0]))) == 0
test_fast_minima()
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def split(k:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_data:np.ndarray, splits:np.ndarray, hill_split_level:float, window:int):
"""Function to split hills.
Args:
k (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_data (np.ndarray): Array containing the indices to hills.
splits (np.ndarray): Array containing splits.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
"""
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_trace = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.median(int_trace[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_trace[i] = np.mean(int_trace[min_index:max_index])
#minima = (np.diff(np.sign(np.diff(int_trace))) > 0).nonzero()[0] + 1 #This works also but is slower
minima = fast_minima(int_trace)
sorted_minima = np.argsort(int_trace[minima])
minima = minima[sorted_minima]
for min_ in minima:
minval = int_trace[min_]
left_max = max(int_trace[:min_])
right_max = max(int_trace[min_:])
min_max = min(left_max, right_max)
if (minval == 0) or ((min_max / minval) > hill_split_level):
splits[k] = start+min_
break # Split only once per iteration
def split_hills(hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, hill_split_level:float, window:int)->np.ndarray:
"""Wrapper function to split hills
Args:
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_split_level (float): Split level for hills.
window (int): Smoothing window.
Returns:
np.ndarray: Array containing the bounds to the hill_data with splits.
"""
splits = np.zeros(len(int_data), dtype=np.int32)
to_check = np.arange(len(hill_ptrs)-1)
while len(to_check) > 0:
split(to_check, hill_ptrs, int_data, hill_data, splits, hill_split_level, window)
splitpoints = splits.nonzero()[0]
to_check = np.zeros(len(hill_ptrs))
to_check[splitpoints] = 1
to_check = np.insert(to_check, splitpoints+1, np.ones(len(splitpoints))).nonzero()[0] #array, index, what
hill_ptrs = np.insert(hill_ptrs, splitpoints+1, splits[splitpoints]) #array, index, what
splits = np.zeros(len(hill_ptrs), dtype=np.int32) #was cupy np.int32
return hill_ptrs
###Output
_____no_output_____
###Markdown
Filter HillsTo filter hills, we define a minimum length `hill_min_length`. All peaks below the threshold `hill_peak_min_length` are accepted as is. For longer hills, the intensity at the start and the end are compared to the maximum intensity. If the ratio of the maximum raw intensity to the smoothed intensity and the beginning and end are larger than `hill_peak_factor` the hills are accepted.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def check_large_hills(idx:np.ndarray, large_peaks:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, to_remove:np.ndarray, large_peak:int = 40, hill_peak_factor:float = 2, window:int=1):
"""Function to check large hills and flag them for removal.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
large_peaks (np.ndarray): Array containing large peaks.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
to_remove (np.ndarray): Array with indexes to remove.
large_peak (int, optional): Length criterion when a peak is large. Defaults to 40.
hill_peak_factor (float, optional): Hill maximum criterion. Defaults to 2.
window (int, optional): Smoothing window.. Defaults to 1.
"""
k = large_peaks[idx]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
int_idx = hill_data[start:end] #index to hill data
int_smooth_ = int_data[int_idx]
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.median(int_smooth_[min_index:max_index])
for i in range(len(int_idx)):
min_index = max(0, i - window)
max_index = min(len(int_idx), i + window + 1)
int_smooth_[i] = np.mean(int_smooth_[min_index:max_index])
int_ = int_data[int_idx]
max_ = np.max(int_)
if (max_ / int_smooth_[0] > hill_peak_factor) & (max_ / int_smooth_[-1] > hill_peak_factor):
to_remove[idx] = 0
def filter_hills(hill_data:np.ndarray, hill_ptrs:np.ndarray, int_data:np.ndarray, hill_check_large:int =40, window:int = 1) -> (np.ndarray, np.ndarray):
"""Filters large hills.
Args:
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
int_data (np.ndarray): Array containing the intensity to each centroid.
hill_check_large (int, optional): Length criterion when a hill is considered large.. Defaults to 40.
window (int, optional): Smoothing window. Defaults to 1.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
"""
large_peaks = np.where(np.diff(hill_ptrs)>=hill_check_large)[0]
to_remove = np.ones(len(large_peaks), dtype=np.int32)
check_large_hills(range(len(large_peaks)), large_peaks, hill_ptrs, hill_data, int_data, to_remove, window)
idx_ = np.ones(len(hill_data), dtype = np.int32)
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
to_remove = to_remove.nonzero()[0]
for _ in to_remove:
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_
###Output
_____no_output_____
###Markdown
Since the mass estimate min the equation above is more complicated than just an average of the mj, a standard deviation based estimate of the error would not be appropriate. Therefore we calculate the error as a bootstrap2 estimate over B=150 bootstrap replications Calculating Hill StatisticsNext, we calculate summary statistics for the connected centroids. We can obtain a high precision mass estimate for each hill by taking the average of the the masses and weighting this by their intensiteis:$$\overline{m} = \frac{\sum_{j=1}^nm_jI_j}{\sum_{j=1}^nI_j}$$To estimate the mass error, we calculate the error as a boostrap estimate: $$\Delta \overline{m} = \sqrt{\frac{\sum_{b=1}^{B}(\overline{m}_b - \overline{m} )}{(B-1)}}$$The calculation of hill statistics for a single hill is implemented in `get_hill_stats`. To calculate the hill stats for a list of hills, we can call the wrapper `get_hill_data`.
###Code
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def hill_stats(idx:np.ndarray, hill_range:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, mass_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, stats:np.ndarray, hill_nboot_max:int, hill_nboot:int):
"""Function to calculate hill stats.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
hill_range (np.ndarray): Hill range.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
mass_data (np.ndarray): Array containing mass data.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
"""
np.random.seed(42)
start = hill_ptrs[idx]
end = hill_ptrs[idx + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
mz_ = mass_data[idx_]
int_sum = np.sum(int_)
int_area = np.abs(np.trapz(rt_[rt_idx[idx_]], int_)) #Area
rt_min = rt_[rt_idx[idx_]].min()
rt_max = rt_[rt_idx[idx_]].max()
if len(idx_) > hill_nboot_max:
bootsize = hill_nboot_max
else:
bootsize = len(idx_)
averages = np.zeros(hill_nboot)
average = 0
for i in range(hill_nboot):
boot = np.random.choice(len(int_), bootsize, replace=True)
boot_mz = np.sum((mz_[boot] * int_[boot])) / np.sum(int_[boot])
averages[i] = boot_mz
average += boot_mz
average_mz = average/hill_nboot
delta = 0
for i in range(hill_nboot):
delta += (average_mz - averages[i]) ** 2 #maybe easier?
delta_m = np.sqrt(delta / (hill_nboot - 1))
stats[idx,0] = average_mz
stats[idx,1] = delta_m
stats[idx,2] = int_sum
stats[idx,3] = int_area
stats[idx,4] = rt_min
stats[idx,5] = rt_max
def remove_duplicates(stats:np.ndarray, hill_data:np.ndarray, hill_ptrs:np.ndarray)-> (np.ndarray, np.ndarray, np.ndarray):
"""Remove duplicate hills.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
hill_data (np.ndarray): Array containing the indices to hills.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
Returns:
np.ndarray: Filtered hill data.
np.ndarray: Filtered hill points.
np.ndarray: Filtered hill stats.
"""
dups = pd.DataFrame(stats).duplicated() #all duplicated hills
idx_ = np.ones(len(hill_data), dtype = np.int32) #keep all
keep = np.ones(len(hill_ptrs)-1, dtype = np.int32)
for _ in np.arange(len(stats))[dups]: #duplicates will be assigned zeros
idx_[hill_ptrs[_]:hill_ptrs[_+1]] = 0
keep[_] = 0
hill_lens = np.diff(hill_ptrs)
keep_ = hill_lens[keep.nonzero()[0]]
hill_data_ = hill_data[idx_.nonzero()[0]]
hill_ptrs_ = np.empty((len(keep_) + 1), dtype=np.int32)
hill_ptrs_[0] = 0
hill_ptrs_[1:] = keep_.cumsum()
return hill_data_, hill_ptrs_, stats[~dups]
def get_hill_data(query_data:dict, hill_ptrs:np.ndarray, hill_data:np.ndarray, hill_nboot_max:int = 300, hill_nboot:int = 150) -> (np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to get the hill data.
Args:
query_data (dict): Data structure containing the query data.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
hill_nboot_max (int): Maximum number of bootstrap comparisons.
hill_nboot (int): Number of bootstrap comparisons
Returns:
np.ndarray: Hill stats.
np.ndarray: Sortindex.
np.ndarray: Upper index.
np.ndarray: Scan index.
np.ndarray: Hill data.
np.ndarray: Hill points.
"""
indices_ = np.array(query_data['indices_ms1'])
rt_ = np.array(query_data['rt_list_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
scan_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
stats = np.zeros((len(hill_ptrs)-1, 6)) #mz, delta, rt_min, rt_max, sum_max
hill_stats(range(len(hill_ptrs)-1), np.arange(len(hill_ptrs)-1), hill_ptrs, hill_data, int_data, mass_data, rt_, scan_idx, stats, hill_nboot_max, hill_nboot)
# sort the stats
sortindex = np.argsort(stats[:,4]) #Sorted by rt_min
stats = stats[sortindex,:]
idxs_upper = stats[:,4].searchsorted(stats[:,5], side="right")
sortindex_ = np.arange(len(sortindex))[sortindex]
return stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs
###Output
_____no_output_____
###Markdown
Combining Hills to Isotope PatternsAfter obtaining summary statistics of hills, the next step is to check whether they belong together to form an isotope pattern. For this, we check wheter it is possible that they are neighbors in an isotope pattern, e.g. one having a 12C atom that has been replaced by a 13C version. The detailed criterion for the check is implemented in `check_isotope_pattern` and is as follows:$$\left | \Delta m-\frac{\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m_{1}}^{2} +\Delta {m_{2}}^{2}}$$The left side contains $\Delta m$, being the delta of the precise mass estimates from the summary statistics and $\Delta M = 1.00286864$, which is the mass difference ebtween the 13C peak and the monoisotopic peak in an averagine molecule of 1500 Da mass divided by the charge $z$.The right side contains $\Delta S = 0.0109135$, which is the maximum shift that a sulphur atom can cause ($\Delta S = 2m(^{13}C) - 2m(^{12}C) - m(^{34}S) + m(^{32}S)$) and $\Delta {m_{1}}$ and $\Delta {m_{2}}$, which are the bootstrapped mass standard deviations.
###Code
#export
from alphapept.constants import mass_dict
DELTA_M = mass_dict['delta_M']
DELTA_S = mass_dict['delta_S']
maximum_offset = DELTA_M + DELTA_S
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, iso_mass_range:int = 5)-> bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
iso_mass_range (int, optional): Mass range. Defaults to 5.
Returns:
bool: Flag to see if pattern belongs to the same pattern.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
delta_mass = np.abs(mass1 - mass2)
left_side = np.abs(delta_mass - DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
#hide
def test_check_isotope_pattern():
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == True
mass2, delta_mass2 = 102.1, 0.05
assert check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge) == False
test_check_isotope_pattern()
charge = 1
mass1, delta_mass1 = 100, 0.1
mass2, delta_mass2 = 101.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
mass2, delta_mass2 = 102.1, 0.05
print(check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge))
###Output
True
False
###Markdown
Cosine Correlation of two hills An additional criterion that is being checked is that the intensity profiles have sufficient overalp in retention time. This is validated by ensuring that two hills have a cosine correlation of at least 0.6.$$\frac{\sum_{s=s_{min}}^{s_{max}}I_sJ_s}{\sum_{s=s_{min}}^{s_{max}}I_s^{2} \sum_{s=s_{min}}^{s_{max}}J_s^{2}} \geq 0.6$$The intensities of two hills are only compared if both have an intensity value in a particular scan. Otherwise, the intensity is set to zero. Additionally, an overlap of at least three elements is required.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def correlate(scans_:np.ndarray, scans_2:np.ndarray, int_:np.ndarray, int_2:np.ndarray)->float:
"""Correlate two scans.
Args:
scans_ (np.ndarray): Masses of the first scan.
scans_2 (np.ndarray): Masses of the second scan.
int_ (np.ndarray): Intensity of the first scan.
int_2 (np.ndarray): Intensity of the second scan.
Returns:
float: Correlation.
"""
min_one, max_one = scans_[0], scans_[-1]
min_two, max_two = scans_2[0], scans_2[-1]
if min_one + 3 > max_two: # at least an overlap of 3 elements
corr = 0
elif min_two + 3 > max_one:
corr = 0
else:
min_s = min(min_one, min_two)
max_s = max(max_one, max_two)
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[scans_ - min_s] = int_
int_two_scaled[scans_2 - min_s] = int_2
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
###Output
_____no_output_____
###Markdown
Extracting pre-Isotope PatternsNow having two criteria to check whether hills could, in principle, belong together, we define the wrapper functions `extract_edge` and `get_edges` to extract the connected hills. To minimize the number of comparisons we need to perform, we only compare the hills that overlap in time (i.e., the start of one hill `rt_min` needs to be before the end of the other hill `rt_max`) and are less than the sum of $\Delta M$ and $\Delta S$ apart. To extract all hills that belong together, we again rely on the `NetworkX`-package to extract the connected components.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def extract_edge(stats:np.ndarray, idxs_upper:np.ndarray, runner:int, max_index:int, maximum_offset:float, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:int=5)->list:
"""Extract edges.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparing.
runner (int): Index.
max_index (int): Unused.
maximum_offset (float): Maximum offset when comparing edges.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
Returns:
list: List of edges.
"""
edges = []
mass1 = stats[runner, 0]
delta_mass1 = stats[runner, 1]
for j in range(runner+1, idxs_upper[runner]):
mass2 = stats[j, 0]
if np.abs(mass2 - mass1) <= maximum_offset:
delta_mass2 = stats[j, 1]
for charge in range(iso_charge_min, iso_charge_max + 1):
if check_isotope_pattern(mass1, mass2, delta_mass1, delta_mass2, charge, iso_mass_range):
edges.append((runner, j))
break
return edges
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def edge_correlation(idx:np.ndarray, to_keep:np.ndarray, sortindex_:np.ndarray, pre_edges:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float):
"""Correlates two edges and flag them it they should be kept.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
to_keep (np.ndarray): Array with indices which edges should be kept.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
pre_edges (np.ndarray): Array with pre edges.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
"""
edge = pre_edges[idx,:]
y = sortindex_[edge[0]]
start = hill_ptrs[y]
end = hill_ptrs[y + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
con = sortindex_[edge[1]]
start = hill_ptrs[con]
end = hill_ptrs[con + 1]
idx_2 = hill_data[start:end]
int_2 = int_data[idx_2]
scans_2 = scan_idx[idx_2]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
to_keep[idx] = 1
#export
import networkx as nx
def get_pre_isotope_patterns(stats:np.ndarray, idxs_upper:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, maximum_offset:float, iso_charge_min:int=1, iso_charge_max:int=6, iso_mass_range:float=5, cc_cutoff:float=0.6)->list:
"""Function to extract pre isotope patterns.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
idxs_upper (np.ndarray): Upper index for comparison.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
maximum_offset (float): Maximum offset when matching.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
cc_cutoff (float, optional): Correlation cutoff. Defaults to 0.6.
Returns:
list: List of pre isotope patterns.
"""
pre_edges = []
# Step 1
for runner in range(len(stats)):
pre_edges.extend(extract_edge(stats, idxs_upper, runner, idxs_upper[runner], maximum_offset, iso_charge_min, iso_charge_max, iso_mass_range))
to_keep = np.zeros(len(pre_edges), dtype='int')
pre_edges = np.array(pre_edges)
edge_correlation(range(len(to_keep)), to_keep, sortindex_, pre_edges, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
edges = pre_edges[to_keep.nonzero()]
G2 = nx.Graph()
for i in range(len(edges)):
G2.add_edge(edges[i][0], edges[i][1])
pre_isotope_patterns = [
sorted(list(c))
for c in sorted(nx.connected_components(G2), key=len, reverse=True)
]
return pre_isotope_patterns
###Output
_____no_output_____
###Markdown
Extracting Isotope PatternsThe extracted pre-isotope patterns may not be consistent because their pair-wise mass differences may not correspond to the same charge. To extract isotope patterns from pre-isotope patterns, we need to ensure that they are consistent for a single charge. To do this, we start with the 100 most intense peaks from a pre-isotope pattern to be used as a seed. For each seed and charge we then try to extract the longest consistent isotope pattern. To check wheter a hill is consistent with the seed we employ a modified checking criterion (`check_isotope_pattern_directed`) to be as follows:$$\left | m-m_j-\frac{j\Delta M}{z} \right |\leq \sqrt{\left ( \frac{\Delta S}{z} \right )^{2}+\Delta {m}^{2} +\Delta {m_{j}}^{2}}$$Here $m$ is the mass of a seed peak, and $m_{j}$ refers to a peak relative to the seed. $j$ refers to the peaks to the left or right (negative or positive index) within the pattern. $j$ needs to run over consecutive values so that gaps are not allowed. Besides this consistency check, two hills are also checked to have a cosine correlation of at least 0.6.Programmatically, this is implemented in `grow_trail` and `grow`. These function uses a recursive approach that adds matching hills to the seed on the left and right side until no more hills can be added.
###Code
#export
from numba.typed import List
@alphapept.performance.compile_function(compilation_mode="numba")
def check_isotope_pattern_directed(mass1:float, mass2:float, delta_mass1:float, delta_mass2:float, charge:int, index:int, iso_mass_range:float)->bool:
"""Check if two masses could belong to the same isotope pattern.
Args:
mass1 (float): Mass of the first pattern.
mass2 (float): Mass of the second pattern.
delta_mass1 (float): Delta mass of the first pattern.
delta_mass2 (float): Delta mass of the second pattern.
charge (int): Charge.
index (int): Index (unused).
iso_mass_range (float): Isotope mass ranges.
Returns:
bool: Flag if two isotope patterns belong together.
"""
delta_mass1 = delta_mass1 * iso_mass_range
delta_mass2 = delta_mass2 * iso_mass_range
left_side = np.abs(mass1 - mass2 - index * DELTA_M / charge)
right_side = np.sqrt((DELTA_S / charge) ** 2 + delta_mass1 ** 2 + delta_mass2 ** 2)
return left_side <= right_side
@alphapept.performance.compile_function(compilation_mode="numba")
def grow(trail:List, seed:int, direction:int, relative_pos:int, index:int, stats:np.ndarray, pattern:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Grows isotope pattern based on a seed and direction.
Args:
trail (List): List of hills belonging to a pattern.
seed (int): Seed position.
direction (int): Direction in which to grow the trail
relative_pos (int): Relative position.
index (int): Index.
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: List of hills belonging to a pattern.
"""
x = pattern[seed] # This is the seed
mass1 = stats[x,0]
delta_mass1 = stats[x,1]
k = sortindex_[x]
start = hill_ptrs[k]
end = hill_ptrs[k + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
scans_ = scan_idx[idx_]
growing = True
while growing:
if direction == 1:
if seed + relative_pos == len(pattern):
growing = False
break
else:
if seed + relative_pos < 0:
growing = False
break
y = pattern[seed + relative_pos] # This is a reference peak
l = sortindex_[y]
mass2 = stats[y,0]
delta_mass2 = stats[y,1]
start = hill_ptrs[l]
end = hill_ptrs[l + 1]
idx_ = hill_data[start:end]
int_2 = int_data[idx_]
scans_2 = scan_idx[idx_]
if correlate(scans_, scans_2, int_, int_2) > cc_cutoff:
if check_isotope_pattern_directed(mass1, mass2, delta_mass1, delta_mass2, charge, -direction * index, iso_mass_range):
if direction == 1:
trail.append(y)
else:
trail.insert(0, y)
index += (
1
) # Greedy matching: Only one edge for a specific distance, will not affect the following matches
delta_mass = np.abs(mass1 - mass2)
if (delta_mass > (DELTA_M+DELTA_S) * index): # the pattern is sorted so there is a maximum to look back
break
relative_pos += direction
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def grow_trail(seed:int, pattern:np.ndarray, stats:np.ndarray, charge:int, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to grow an isotope pattern to the left and right side.
Args:
seed (int): Seed position.
pattern (np.ndarray): Isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge (int): Charge.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Isotope pattern.
"""
x = pattern[seed]
trail = List()
trail.append(x)
trail = grow(trail, seed, -1, -1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trail = grow(trail, seed, 1, 1, 1, stats, pattern, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
return trail
@alphapept.performance.compile_function(compilation_mode="numba")
def get_trails(seed:int, pattern:np.ndarray, stats:np.ndarray, charge_range:List, iso_mass_range:float, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, cc_cutoff:float)->List:
"""Wrapper to extract trails for a given charge range.
Args:
seed (int): Seed index.
pattern (np.ndarray): Pre isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
charge_range (List): Charge range.
iso_mass_range (float): Mass range for checking isotope patterns.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
cc_cutoff (float): Cutoff value for what is considered correlating.
Returns:
List: Trail of consistent hills.
"""
trails = []
for charge in charge_range:
trail = grow_trail(seed, pattern, stats, charge, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
trails.append(trail)
return trails
#export
def plot_pattern(pattern:np.ndarray, sorted_hills:np.ndarray, centroids:np.ndarray, hill_data:np.ndarray):
"""Helper function to plot a pattern.
Args:
pattern (np.ndarray): Pre isotope pattern.
sorted_hills (np.ndarray): Hills, sorted.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
hill_data (np.ndarray): Array containing the indices to hills.
"""
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(10,10))
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
mzs = []
rts = []
ints = []
for entry in pattern:
hill = sorted_hills[entry]
hill_data = np.array([centroids[_[0]][_[1]] for _ in hill], dtype=centroid_dtype)
int_profile = hill_data["int"]
ax1.plot(hill_data["rt"], hill_data["int"])
ax2.scatter(hill_data["rt"], hill_data["mz"], s = hill_data["int"]/5e5 )
ax1.set_title('Pattern')
ax1.set_xlabel('RT (min)')
ax1.set_ylabel('Intensity')
ax2.set_xlabel('RT (min)')
ax2.set_ylabel('m/z')
plt.show()
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def get_minpos(y:np.ndarray, iso_split_level:float)->List:
"""Function to get a list of minima in a trace.
A minimum is returned if the ratio of lower of the surrounding maxima to the minimum is larger than the splitting factor.
Args:
y (np.ndarray): Input array.
iso_split_level (float): Isotope split level.
Returns:
List: List with min positions.
"""
minima = get_local_minima(y)
minima_list = List()
for minpos in minima:
minval = y[minpos]
left_max = (y[:minpos]).max()
right_max = (y[minpos:]).max()
minimum_max = min(left_max, right_max)
if minimum_max / minval >= iso_split_level:
minima_list.append(minpos)
return minima_list
@alphapept.performance.compile_function(compilation_mode="numba")
def get_local_minima(y:np.ndarray)->List:
"""Function to return all local minima of a array
Args:
y (np.ndarray): Input array.
Returns:
List: List with indices to minima.
"""
minima = List()
for i in range(1, len(y) - 1):
if is_local_minima(y, i):
minima.append(i)
return minima
@alphapept.performance.compile_function(compilation_mode="numba")
def is_local_minima(y:np.ndarray, i:int)->bool:
"""Check if position is a local minima.
Args:
y (np.ndarray): Input array.
i (int): Position to check.
Returns:
bool: Flag if position is minima or not.
"""
return (y[i - 1] > y[i]) & (y[i + 1] > y[i])
@alphapept.performance.compile_function(compilation_mode="numba")
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array
#hide
def test_get_minpos():
"""
Generate an intensity profile with local minima
Check that the minima are found
"""
intensity_profile = np.ones(20) * 10
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
minima = get_minpos(intensity_profile, 2)
minima_list = [_ for _ in minima]
assert minima_list == minima_ref
test_get_minpos()
###Output
_____no_output_____
###Markdown
Isolating Isotope_patternsThe extraction of the longest consistent isotope pattern is implemented in `isolate_isotope_pattern`. Here, three additional checks for an isotope pattern are implemented. The first one is `truncate`. Here, one checks the seed position, whether it has a minimum to its left or right side. If a minimum is found, the isotope pattern is cut off at this position.The second one is a mass filter. If the seed has a mass of smaller than 1000, the intensity maximum is detected, and all smaller masses are discarded. This reflects the averagine distribution for small masses where no minimum on the left side can be found.The third one is `check_averagine` that relies on `pattern_to_mz` and `cosine_averagine`. It is used to ensure that the extracted isotope pattern has a cosine correlation of the averagine isotope pattern of the same mass of at least 0.6.After the longest consistent isotope pattern is found, the hills are removed from the pre-isotope pattern, and the process is repeated until no more isotope patterns can be extracted from the pre-isotope patterns.
###Code
#export
from alphapept.chem import mass_to_dist
from alphapept.constants import averagine_aa, isotopes, Isotope
from numba.typed import Dict
@alphapept.performance.compile_function(compilation_mode="numba")
def check_averagine(stats:np.ndarray, pattern:np.ndarray, charge:int, averagine_aa:Dict, isotopes:Dict)->float:
"""Function to compare a pattern to an averagine model.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
Returns:
float: Averagine correlation.
"""
masses, intensity = pattern_to_mz(stats, pattern, charge)
spec_one = np.floor(masses).astype(np.int64)
int_one = intensity
spec_two, int_two = mass_to_dist(np.min(masses), averagine_aa, isotopes) # maybe change to no rounded version
spec_two = np.floor(spec_two).astype(np.int64)
return cosine_averagine(int_one, int_two, spec_one, spec_two)
@alphapept.performance.compile_function(compilation_mode="numba")
def pattern_to_mz(stats:np.ndarray, pattern:np.ndarray, charge:int)-> (np.ndarray, np.ndarray):
"""Function to calculate masses and intensities from pattern for a given charge.
Args:
stats (np.ndarray): Stats array that contains summary statistics of hills.
pattern (np.ndarray): Isotope pattern.
charge (int): Charge of the pattern.
Returns:
np.ndarray: masses
np.ndarray: intensity
"""
mzs = np.zeros(len(pattern))
ints = np.zeros(len(pattern))
for i in range(len(pattern)):
entry = pattern[i]
mzs[i] = mz_to_mass(stats[entry,0], charge)
ints[i] = stats[entry,2]
sortindex = np.argsort(mzs)
masses = mzs[sortindex]
intensity = ints[sortindex]
return masses, intensity
@alphapept.performance.compile_function(compilation_mode="numba")
def cosine_averagine(int_one:np.ndarray, int_two:np.ndarray, spec_one:np.ndarray, spec_two:np.ndarray)-> float:
"""Calculate the cosine correlation of two hills.
Args:
int_one (np.ndarray): Intensity of the first hill.
int_two (np.ndarray): Intensity of the second hill.
spec_one (np.ndarray): Scan numbers of the first hill.
spec_two (np.ndarray): Scan numbers of the second hill.
Returns:
float: Cosine
"""
min_one, max_one = spec_one[0], spec_one[-1]
min_two, max_two = spec_two[0], spec_two[-1]
min_s = np.min(np.array([min_one, min_two]))
max_s = np.max(np.array([max_one, max_two]))
int_one_scaled = np.zeros(int(max_s - min_s + 1))
int_two_scaled = np.zeros(int(max_s - min_s + 1))
int_one_scaled[spec_one - min_s] = int_one
int_two_scaled[spec_two - min_s] = int_two
corr = np.sum(int_one_scaled * int_two_scaled) / np.sqrt(
np.sum(int_one_scaled ** 2) * np.sum(int_two_scaled ** 2)
)
return corr
@alphapept.performance.compile_function(compilation_mode="numba")
def int_list_to_array(numba_list:List)->np.ndarray:
"""Numba compatbilte function to convert a numba list with integers to a numpy array
Args:
numba_list (List): Input numba-typed List.
Returns:
np.ndarray: Output numpy array.
"""
array = np.zeros(len(numba_list), dtype=np.int64)
for i in range(len(array)):
array[i] = numba_list[i]
return array
M_PROTON = mass_dict['Proton']
@alphapept.performance.compile_function(compilation_mode="numba")
def mz_to_mass(mz:float, charge:int)->float:
"""Function to calculate the mass from a mz value.
Args:
mz (float): M/z
charge (int): Charge.
Raises:
NotImplementedError: When a negative charge is used.
Returns:
float: mass
"""
if charge < 0:
raise NotImplementedError("Negative Charges not implemented.")
mass = mz * charge - charge * M_PROTON
return mass
#hide
if False:
def test_truncate():
"""
Generate an intensity profile with local minima
Check wheter the the profile is correctly truncated with respect to the seed
"""
array = np.arange(0, 20)
intensity_profile = np.ones(20) * 10
iso_split_level = 1.3
minima_ref = [3, 7, 10, 17]
for minimum in minima_ref:
intensity_profile[minimum] = 1
seedpos = 5
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([3, 4, 5, 6, 7]))
seedpos = 0
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([0, 1, 2, 3]))
seedpos = len(array)
truncated = truncate(array, intensity_profile, seedpos, iso_split_level)
assert np.all(truncated == np.array([17, 18, 19]))
test_truncate()
###Output
_____no_output_____
###Markdown
Isotope PatternsThe wrapper function `get_isotope_patterns` iterates over all pre_isotope_patterns.
###Code
#export
@alphapept.performance.compile_function(compilation_mode="numba")
def isolate_isotope_pattern(pre_pattern:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, iso_mass_range:float, charge_range:List, averagine_aa:Dict, isotopes:Dict, iso_n_seeds:int, cc_cutoff:float, iso_split_level:float)->(np.ndarray, int):
"""Isolate isotope patterns.
Args:
pre_pattern (np.ndarray): Pre isotope pattern.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
iso_mass_range (float): Mass range for checking isotope patterns.
charge_range (List): Charge range.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_n_seeds (int): Number of seeds.
cc_cutoff (float): Cutoff value for what is considered correlating.
iso_split_level (float): Split level when isotopes are split.
Returns:
np.ndarray: Array with the best pattern.
int: Charge of the best pattern.
"""
longest_trace = 0
champion_trace = None
champion_charge = 0
champion_intensity = 0
# Sort patterns by mass
sortindex = np.argsort(stats[pre_pattern][:,0]) #intensity
sorted_pattern = pre_pattern[sortindex]
massindex = np.argsort(stats[sorted_pattern][:,2])[::-1][:iso_n_seeds]
# Use all the elements in the pre_pattern as seed
for seed in massindex: # Loop through all seeds
seed_global = sorted_pattern[seed]
trails = get_trails(seed, sorted_pattern, stats, charge_range, iso_mass_range, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, cc_cutoff)
for index, trail in enumerate(trails):
if len(trail) >= longest_trace: # Needs to be longer than the current champion
arr = int_list_to_array(trail)
intensity_profile = stats[arr][:,2]
seedpos = np.nonzero(arr==seed_global)[0][0]
# truncate around the seed...
arr = truncate(arr, intensity_profile, seedpos, iso_split_level)
intensity_profile = stats[arr][:,2]
# Remove lower masses:
# Take the index of the maximum and remove all masses on the left side
if charge_range[index] * stats[seed_global, 0] < 1000:
maxpos = np.argmax(intensity_profile)
arr = arr[maxpos:]
intensity_profile = stats[arr][:,2]
if (len(arr) > longest_trace) | ((len(arr) == longest_trace) & (intensity_profile.sum() > champion_intensity)):
# Averagine check
cc = check_averagine(stats, arr, charge_range[index], averagine_aa, isotopes)
if cc > 0.6:
# Update the champion
champion_trace = arr
champion_charge = charge_range[index]
longest_trace = len(arr)
champion_intensity = intensity_profile.sum()
return champion_trace, champion_charge
if False:
def test_get_isotope_patterns():
test_centroids = [
[
(300, 50, 1, 1),
(300.501, 40, 1, 1),
(301.003, 30, 1, 1),
(301.504, 20, 1, 1),
(302.006, 10, 1, 1),
],
[
(300, 50, 2, 2),
(300.501, 40, 2, 2),
(301.003, 30, 2, 2),
(301.504, 20, 2, 2),
(302.006, 10, 2, 2),
],
[
(300, 50, 3, 3),
(300.501, 40, 3, 3),
(301.003, 30, 3, 3),
(301.504, 20, 3, 3),
(302.006, 10, 3, 3),
],
[
(300, 50, 4, 4),
(300.501, 40, 4, 4),
(301.003, 30, 4, 4),
(301.504, 20, 4, 4),
(302.006, 10, 4, 4),
],
[
(300, 50, 5, 5),
(300.501, 40, 5, 5),
(301.003, 30, 5, 5),
(301.504, 20, 5, 5),
(302.006, 10, 5, 5),
],
[(400, 10, 6, 6), (401, 10, 6, 6), (402, 10, 6, 6)],
[(400, 10, 7, 7), (401, 10, 7, 7), (402, 10, 7, 7)],
[(400, 10, 8, 8), (401, 10, 8, 8), (402, 10, 8, 8)],
[(400, 10, 9, 9), (401, 10, 9, 9), (402, 10, 9, 9)],
]
centroid_dtype = [("mz", float), ("int", float), ("scan_no", int), ("rt", float)]
test_centroids_tmp = [np.array(_, dtype=centroid_dtype) for _ in test_centroids]
test_centroids = List([_ for _ in test_centroids_tmp])
test_hills = get_hills(test_centroids)
sorted_hills, stats, data, hill_data, hill_ptrs = get_hill_data(test_hills, test_centroids)
pre_patterns = get_edges(stats, data)
isotope_patterns, isotope_charges = get_isotope_patterns(pre_patterns, stats, data, averagine_aa, isotopes)
assert np.all(isotope_patterns[0] == np.array([0, 1, 2, 3, 4]))
assert isotope_charges[0] == 2
assert np.all(isotope_patterns[1] == np.array([5,6,7]))
assert isotope_charges[1] == 1
test_get_isotope_patterns()
#export
from numba.typed import List
from typing import Callable, Union
def get_isotope_patterns(pre_isotope_patterns:list, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, scan_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, averagine_aa:Dict, isotopes:Dict, iso_charge_min:int = 1, iso_charge_max:int = 6, iso_mass_range:float = 5, iso_n_seeds:int = 100, cc_cutoff:float=0.6, iso_split_level:float = 1.3, callback:Union[Callable, None]=None) -> (np.ndarray, np.ndarray, np.ndarray):
"""Wrapper function to iterate over pre_isotope_patterns.
Args:
pre_isotope_patterns (list): List of pre-isotope patterns.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
scan_idx (np.ndarray): Array containing the scan index for a centroid.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
averagine_aa (Dict): Dict containing averagine masses.
isotopes (Dict): Dict containing isotopes.
iso_charge_min (int, optional): Minimum isotope charge. Defaults to 1.
iso_charge_max (int, optional): Maximum isotope charge. Defaults to 6.
iso_mass_range (float, optional): Mass search range. Defaults to 5.
iso_n_seeds (int, optional): Number of isotope seeds. Defaults to 100.
cc_cutoff (float, optional): Cuttoff for correlation.. Defaults to 0.6.
iso_split_level (float, optional): Isotope split level.. Defaults to 1.3.
callback (Union[Callable, None], optional): Callback function for progress. Defaults to None.
Returns:
list: List of isotope patterns.
np.ndarray: Iso idx.
np.ndarray: Array containing isotope charges.
"""
isotope_patterns = []
isotope_charges = []
charge_range = List()
for i in range(iso_charge_min, iso_charge_max + 1):
charge_range.append(i)
isotope_patterns = []
isotope_charges = []
for idx, pre_pattern in enumerate(pre_isotope_patterns):
extract = True
while extract:
isotope_pattern, isotope_charge = isolate_isotope_pattern(np.array(pre_pattern), hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, iso_mass_range, charge_range, averagine_aa, isotopes, iso_n_seeds, cc_cutoff, iso_split_level)
if isotope_pattern is None:
length = 0
else:
length = len(isotope_pattern)
if length > 1:
isotope_charges.append(isotope_charge)
isotope_patterns.append(isotope_pattern)
pre_pattern = [_ for _ in pre_pattern if _ not in isotope_pattern]
if len(pre_pattern) <= 1:
extract = False
else:
extract = False
if callback:
callback((idx+1)/len(pre_isotope_patterns))
iso_patterns = np.zeros(sum([len(_) for _ in isotope_patterns]), dtype=np.int64)
iso_idx = np.zeros(len(isotope_patterns)+1, dtype='int')
start = 0
for idx, _ in enumerate(isotope_patterns):
iso_patterns[start:start+len(_)] = _
start += len(_)
iso_idx[idx+1] = start
return iso_patterns, iso_idx, np.array(isotope_charges)
#export
@alphapept.performance.performance_function(compilation_mode="numba-multithread")
def report_(idx:np.ndarray, isotope_charges:list, isotope_patterns:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray, int_data:np.ndarray, rt_:np.ndarray, rt_idx:np.ndarray, results:np.ndarray):
"""Function to extract summary statstics from a list of isotope patterns and charges.
Args:
idx (np.ndarray): Input index. Note that we are using the performance function so this is a range.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
int_data (np.ndarray): Array containing the intensity to each centroid.
rt_ (np.ndarray): Array with retention time information for each scan.
rt_idx (np.ndarray): Lookup array to match centroid idx to rt.
results (np.ndarray): Recordarray with isotope pattern summary statistics.
"""
pattern = isotope_patterns[iso_idx[idx]:iso_idx[idx+1]]
isotope_data = stats[pattern]
mz = np.min(isotope_data[:, 0])
mz_std = np.mean(isotope_data[:, 1])
charge = isotope_charges[idx]
mass = mz_to_mass(mz, charge)
int_max_idx = np.argmax(isotope_data[:, 2])
mz_most_abundant = isotope_data[:, 0][int_max_idx]
int_max = isotope_data[:,2][int_max_idx]
rt_start = isotope_data[int_max_idx, 4] # This is the start of the most abundant trace
rt_end = isotope_data[int_max_idx, 5]
# better measurement of the peak with interpolation
rt_min_ = min(isotope_data[:, 4])
rt_max_ = max(isotope_data[:, 5])
rt_range = np.linspace(rt_min_, rt_max_, 100)
trace_sum = np.zeros_like(rt_range)
for k in pattern:
x = sortindex_[k]
start = hill_ptrs[x]
end = hill_ptrs[x + 1]
idx_ = hill_data[start:end]
int_ = int_data[idx_]
rts = rt_[rt_idx[idx_]]
interpolation = np.interp(rt_range, rts, int_)
#Filter
interpolation[:(rt_range < rts[0]).sum()] = 0
right_cut = (rt_range > rts[-1]).sum()
if right_cut > 0:
interpolation[-right_cut:]= 0
trace_sum += interpolation
rt_apex_idx = trace_sum.argmax()
rt_apex = rt_range[rt_apex_idx]
trace = trace_sum
half_max = trace.max()/2
if rt_apex_idx == 0:
left_apex = 0
else:
left_apex = np.abs(trace[:rt_apex_idx]-half_max).argmin()
right_apex = np.abs(trace[rt_apex_idx:]-half_max).argmin()+rt_apex_idx
int_apex = trace_sum[rt_apex_idx]
fwhm = rt_range[right_apex] - rt_range[left_apex]
n_isotopes = len(pattern)
rt_cutoff = 0.95 #5%
if rt_apex_idx == 0:
rt_min_idx = 0
else:
rt_min_idx = np.abs(trace[:rt_apex_idx]-trace.max()*(1-rt_cutoff)).argmin()
rt_max_idx = np.abs(trace[rt_apex_idx:]-trace.max()*(1-rt_cutoff)).argmin()+rt_apex_idx
#plt.xlabel('rt')
#plt.ylabel('int')
#plt.show()
#plt.plot(rt_range, trace_sum)
#plt.plot([rt_range[left_apex], rt_range[right_apex]], [(trace[left_apex] + trace[right_apex])/2]*2, 'k:')
#plt.plot(rt_range[rt_apex_idx], trace[rt_apex_idx], 'k*')
#plt.plot(rt_range[rt_min_idx], trace[rt_min_idx], 'k*')
#plt.plot(rt_range[rt_max_idx], trace[rt_max_idx], 'k*')
#plt.show()
rt_start = rt_range[rt_min_idx]
rt_end = rt_range[rt_max_idx]
int_area = np.abs(np.trapz(trace_sum[rt_min_idx:rt_max_idx], rt_range[rt_min_idx:rt_max_idx]))
int_sum = trace_sum.sum()
results[idx,:] = np.array([mz, mz_std, mz_most_abundant, charge, rt_start, rt_apex, rt_end, fwhm, n_isotopes, mass, int_apex, int_area, int_sum])
#export
import pandas as pd
def feature_finder_report(query_data:dict, isotope_patterns:list, isotope_charges:list, iso_idx:np.ndarray, stats:np.ndarray, sortindex_:np.ndarray, hill_ptrs:np.ndarray, hill_data:np.ndarray)->pd.DataFrame:
"""Creates a report dataframe with summary statistics of the found isotope patterns.
Args:
query_data (dict): Data structure containing the query data.
isotope_patterns (list): List containing isotope patterns (indices to hills).
isotope_charges (list): List with charges assigned to the isotope patterns.
iso_idx (np.ndarray): Index to the isotope pattern.
stats (np.ndarray): Stats array that contains summary statistics of hills.
sortindex_ (np.ndarray): Sortindex to access the hills from stats.
hill_ptrs (np.ndarray): Array containing the bounds to the hill_data.
hill_data (np.ndarray): Array containing the indices to hills.
Returns:
pd.DataFrame: DataFrame with isotope pattern summary statistics.
"""
rt_ = np.array(query_data['rt_list_ms1'])
indices_ = np.array(query_data['indices_ms1'])
mass_data = np.array(query_data['mass_list_ms1'])
rt_idx = np.searchsorted(indices_, np.arange(len(mass_data)), side='right') - 1
int_data = np.array(query_data['int_list_ms1'])
results = np.zeros((len(isotope_charges), 13))
report_(range(len(isotope_charges)), isotope_charges, isotope_patterns, iso_idx, stats, sortindex_, hill_ptrs, hill_data, int_data, rt_, rt_idx, results)
df = pd.DataFrame(results, columns = ['mz','mz_std','mz_most_abundant','charge','rt_start','rt_apex','rt_end','fwhm','n_isotopes','mass','int_apex','int_area', 'int_sum'])
df.sort_values(['rt_start','mz'])
return df
###Output
_____no_output_____
###Markdown
Data OutputFor each feature that is found we extract summary statistics and put it in tabular form to be used as as pandas dataframe. PlottingFor quality control reasons we also employ a function to plot a feature in its local environment.
###Code
#export
def plot_isotope_pattern(index:int, df:pd.DataFrame, sorted_stats:np.ndarray, centroids:np.ndarray, scan_range:int=100, mz_range:float=2, plot_hills:bool = False):
"""Plot an isotope pattern in its local environment.
Args:
index (int): Index to the pattern.
df (pd.DataFrame): Pandas DataFrame containing the patterns.
sorted_stats (np.ndarray): Stats array that contains summary statistics of hills.
centroids (np.ndarray): 1D Array containing the masses of the centroids.
scan_range (int, optional): Scan range to plot. Defaults to 100.
mz_range (float, optional): MZ range to plot. Defaults to 2.
plot_hills (bool, optional): Flag to plot hills. Defaults to False.
"""
markersize = 10
plot_offset_mz = 1
plot_offset_rt = 2
feature = df.loc[index]
scan = rt_dict[feature['rt_apex']]
start_scan = scan-scan_range
end_scan = scan+scan_range
mz_min = feature['mz']-mz_range-plot_offset_mz
mz_max = feature['mz']+mz_range+plot_offset_mz
sub_data = np.hstack(centroids[start_scan:end_scan])
selection = sub_data[(sub_data['mz']>mz_min) & (sub_data['mz']<mz_max)]
min_rt = selection['rt'].min() - plot_offset_rt
max_rt = selection['rt'].max() + plot_offset_rt
hill_selection = sorted_stats[(sorted_stats['mz_avg']>mz_min) & (sorted_stats['mz_avg']<mz_max) & (sorted_stats['rt_max']<max_rt) & (sorted_stats['rt_min']>min_rt)]
plt.style.use('dark_background')
plt.figure(figsize=(15,15))
plt.scatter(selection['rt'], selection['mz'], c= np.log(selection['int']), marker='s', s=markersize, alpha=0.9)
plt.colorbar()
plt.grid(False)
plt.xlabel('RT (min)')
plt.ylabel('m/z')
box_height = mz_range/50
if plot_hills:
for hill in hill_selection:
bbox = [hill['rt_min'], hill['mz_avg']-box_height, hill['rt_max'], hill['mz_avg']+box_height]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='w', linewidth=1, alpha = 0.3)
plt.gca().add_patch(rect)
feature_selection = df[(df['mz']>mz_min) & (df['mz']<mz_max) & (df['rt_end']<max_rt) & (df['rt_start']>min_rt)]
for f_idx in feature_selection.index:
for c_idx in range(len(sorted_stats[isotope_patterns[f_idx]])-1):
start = sorted_stats[isotope_patterns[f_idx]][c_idx]
end = sorted_stats[isotope_patterns[f_idx]][c_idx+1]
start_mass = start['mz_avg']
start_rt = (start['rt_min']+start['rt_max'])/2
end_mass = end['mz_avg']
end_rt = (end['rt_min']+end['rt_max'])/2
plt.plot([start_rt, end_rt], [start_mass, end_mass], '+', color='y')
plt.plot([start_rt, end_rt], [start_mass, end_mass], ':', color='y')
if plot_hills:
for hill_idx in isotope_patterns[f_idx]:
hill = sorted_stats[hill_idx]
bbox = [hill['rt_min'], hill['mz_avg']-box_height, hill['rt_max'], hill['mz_avg']+box_height]
rect = plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='g', linewidth=1, alpha = 0.8)
plt.gca().add_patch(rect)
plt.xlim([min_rt+plot_offset_rt, max_rt-plot_offset_rt])
plt.ylim([mz_min+plot_offset_mz, mz_max-plot_offset_mz])
plt.title('Pattern')
plt.show()
plt.style.use('ggplot')
###Output
_____no_output_____
###Markdown
External Feature FinderTo utilize the command-line Feature Finder from Bruker `4DFF-3.13` - `uff-cmdline2.exe`, we call it via a subprocess and wait until completion.
###Code
#export
import subprocess
import os
import platform
def extract_bruker(file:str, base_dir:str = "ext/bruker/FF", config:str = "proteomics_4d.config"):
"""Call Bruker Feautre Finder via subprocess.
Args:
file (str): Filename for feature finding.
base_dir (str, optional): Base dir where the feature finder is stored.. Defaults to "ext/bruker/FF".
config (str, optional): Config file for feature finder. Defaults to "proteomics_4d.config".
Raises:
NotImplementedError: Unsupported operating system.
FileNotFoundError: Feature finder not found.
FileNotFoundError: Config file not found.
FileNotFoundError: Feature file not found.
"""
feature_path = file + '/'+ os.path.split(file)[-1] + '.features'
base_dir = os.path.join(os.path.dirname(__file__), base_dir)
operating_system = platform.system()
if operating_system == 'Linux':
ff_dir = os.path.join(base_dir, 'linux64','uff-cmdline2')
logging.info('Using Linux FF')
elif operating_system == 'Windows':
ff_dir = os.path.join(base_dir, 'win64','uff-cmdline2.exe')
logging.info('Using Windows FF')
else:
raise NotImplementedError(f"System {operating_system} not supported.")
if os.path.exists(feature_path):
return feature_path
else:
if not os.path.isfile(ff_dir):
raise FileNotFoundError(f'Bruker feature finder cmd not found here {ff_dir}.')
config_path = base_dir + '/'+ config
if not os.path.isfile(config_path):
raise FileNotFoundError(f'Config file not found here {config_path}.')
if operating_system == 'Windows':
FF_parameters = [ff_dir,'--ff 4d',f'--readconfig "{config_path}"', f'--analysisDirectory "{file}"']
process = subprocess.Popen(' '.join(FF_parameters), stdout=subprocess.PIPE)
for line in iter(process.stdout.readline, b''):
logtxt = line.decode('utf8')
logging.info(logtxt[48:].rstrip()) #Remove logging info from FF
elif operating_system == 'Linux':
FF_parameters = [
ff_dir,
'--ff',
'4d',
'--readconfig',
config_path,
'--analysisDirectory',
file
]
process = subprocess.run(FF_parameters, stdout=subprocess.PIPE)
if os.path.exists(feature_path):
return feature_path
else:
raise FileNotFoundError(f"Feature file {feature_path} does not exist.")
import sqlalchemy as db
def convert_bruker(feature_path:str)->pd.DataFrame:
"""Reads feature table and converts to feature table to be used with AlphaPept.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
feature_table = pd.read_sql_table('LcTimsMsFeature', engine_featurefile)
from alphapept.constants import mass_dict
M_PROTON = mass_dict['Proton']
feature_table['Mass'] = feature_table['MZ'].values * feature_table['Charge'].values - feature_table['Charge'].values*M_PROTON
feature_table = feature_table.rename(columns={"MZ": "mz","Mass": "mass", "RT": "rt_apex", "RT_lower":"rt_start", "RT_upper":"rt_end", "Mobility": "mobility", "Mobility_lower": "mobility_lower", "Mobility_upper": "mobility_upper", "Charge":"charge","Intensity":'int_sum',"ClusterCount":'n_isotopes'})
feature_table['rt_apex'] = feature_table['rt_apex']/60
feature_table['rt_start'] = feature_table['rt_start']/60
feature_table['rt_end'] = feature_table['rt_end']/60
return feature_table
def map_bruker(feature_path:str, feature_table:pd.DataFrame, query_data:dict)->pd.DataFrame:
"""Map Ms1 to Ms2 via Table FeaturePrecursorMapping from Bruker FF.
Args:
feature_path (str): Path to the feature file from Bruker FF (.features-file).
feature_table (pd.DataFrame): Pandas DataFrame containing the features.
query_data (dict): Data structure containing the query data.
Returns:
pd.DataFrame: DataFrame containing features information.
"""
engine_featurefile = db.create_engine('sqlite:///{}'.format(feature_path))
mapping = pd.read_sql_table('FeaturePrecursorMapping', engine_featurefile)
mapping = mapping.set_index('PrecursorId')
feature_table= feature_table.set_index('Id')
query_prec_id = query_data['prec_id']
#Now look up the feature for each precursor
mass_matched = []
mz_matched = []
rt_matched = []
query_idx = []
f_idx = []
for idx, prec_id in tqdm(enumerate(query_prec_id)):
try:
f_id = mapping.loc[prec_id]['FeatureId']
all_matches = feature_table.loc[f_id]
if type(f_id) == np.int64:
match = all_matches
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
else:
for k in range(len(all_matches)):
match = all_matches.iloc[k]
mz_matched.append(match['mz'])
rt_matched.append(match['rt_apex'])
mass_matched.append(match['mass'])
query_idx.append(idx)
f_idx.append(match['FeatureId'])
except KeyError:
pass
features = pd.DataFrame(np.array([mass_matched, mz_matched, rt_matched, query_idx, f_idx]).T, columns = ['mass_matched', 'mz_matched', 'rt_matched', 'query_idx', 'feature_idx'])
features['query_idx'] = features['query_idx'].astype('int')
return features
###Output
_____no_output_____
###Markdown
Wrapper
###Code
#export
import numpy as np
import logging
import os
from alphapept.search import query_data_to_features
import alphapept.io
import functools
def find_features(to_process:tuple, callback:Union[Callable, None] = None, parallel:bool = False)-> Union[str, bool]:
"""Wrapper for feature finding.
Args:
to_process (tuple): to_process tuple, to be used from a proces spool.
callback (Union[Callable, None], optional): Optional callback function. Defaults to None.
parallel (bool, optional): Flag to use parallel processing. Currently unused. Defaults to False.
Raises:
NotImplementedError: Error if the file extension is not understood.
Returns:
Union[str, bool]: Returns true if function was sucessfull, otherwise the exception as string.
"""
try:
index, settings = to_process
file_name = settings['experiment']['file_paths'][index]
base, ext = os.path.splitext(file_name)
if ext.lower() == '.raw':
datatype='thermo'
elif ext.lower() == '.d':
datatype='bruker'
elif ext.lower() == '.mzml':
datatype='mzml'
else:
raise NotImplementedError('File extension {} not understood.'.format(ext))
out_file = f"{base}.ms_data.hdf"
skip = True
if os.path.isfile(out_file):
try:
alphapept.io.MS_Data_File(
out_file
).read(dataset_name="features")
logging.info(
'Found *.hdf with features for {}'.format(out_file)
)
except KeyError:
logging.info(
'No *.hdf file with features found for {}. Adding to feature finding list.'.format(out_file)
)
skip = False
if not skip:
ms_file = alphapept.io.MS_Data_File(out_file, is_read_only=False)
query_data = ms_file.read_DDA_query_data()
if not settings['workflow']["find_features"]:
features = query_data_to_features(query_data)
else:
if datatype in ['thermo','mzml']:
from alphapept.constants import averagine_aa, isotopes
f_settings = settings['features']
max_gap = f_settings['max_gap']
centroid_tol = f_settings['centroid_tol']
hill_split_level = f_settings['hill_split_level']
iso_split_level = f_settings['iso_split_level']
window = f_settings['hill_smoothing']
hill_check_large = f_settings['hill_check_large']
iso_charge_min = f_settings['iso_charge_min']
iso_charge_max = f_settings['iso_charge_max']
iso_n_seeds = f_settings['iso_n_seeds']
hill_nboot_max = f_settings['hill_nboot_max']
hill_nboot = f_settings['hill_nboot']
iso_mass_range = f_settings['iso_mass_range']
iso_corr_min = f_settings['iso_corr_min']
logging.info('Feature finding on {}'.format(file_name))
logging.info(f'Hill extraction with centroid_tol {centroid_tol} and max_gap {max_gap}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, centroid_tol)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
logging.info(f'Repeating hill extraction with centroid_tol {score_median+score_std*3:.2f}')
hill_ptrs, hill_data, path_node_cnt, score_median, score_std = extract_hills(query_data, max_gap, score_median+score_std*3)
logging.info(f'Number of hills {len(hill_ptrs):,}, len = {np.mean(path_node_cnt):.2f}')
int_data = np.array(query_data['int_list_ms1'])
hill_ptrs = split_hills(hill_ptrs, hill_data, int_data, hill_split_level=hill_split_level, window = window) #hill lenght is inthere already
logging.info(f'After split hill_ptrs {len(hill_ptrs):,}')
hill_data, hill_ptrs = filter_hills(hill_data, hill_ptrs, int_data, hill_check_large =hill_check_large, window=window)
logging.info(f'After filter hill_ptrs {len(hill_ptrs):,}')
stats, sortindex_, idxs_upper, scan_idx, hill_data, hill_ptrs = get_hill_data(query_data, hill_ptrs, hill_data, hill_nboot_max = hill_nboot_max, hill_nboot = hill_nboot)
logging.info('Extracting hill stats complete')
pre_isotope_patterns = get_pre_isotope_patterns(stats, idxs_upper, sortindex_, hill_ptrs, hill_data, int_data, scan_idx, maximum_offset, iso_charge_min=iso_charge_min, iso_charge_max=iso_charge_max, iso_mass_range=iso_mass_range, cc_cutoff=iso_corr_min)
logging.info('Found {:,} pre isotope patterns.'.format(len(pre_isotope_patterns)))
isotope_patterns, iso_idx, isotope_charges = get_isotope_patterns(pre_isotope_patterns, hill_ptrs, hill_data, int_data, scan_idx, stats, sortindex_, averagine_aa, isotopes, iso_charge_min = iso_charge_min, iso_charge_max = iso_charge_max, iso_mass_range = iso_mass_range, iso_n_seeds = iso_n_seeds, cc_cutoff = iso_corr_min, iso_split_level=iso_split_level, callback=None)
logging.info('Extracted {:,} isotope patterns.'.format(len(isotope_charges)))
feature_table = feature_finder_report(query_data, isotope_patterns, isotope_charges, iso_idx, stats, sortindex_, hill_ptrs, hill_data)
logging.info('Report complete.')
elif datatype == 'bruker':
logging.info('Feature finding on {}'.format(file_name))
feature_path = extract_bruker(file_name)
feature_table = convert_bruker(feature_path)
logging.info('Bruker featurer finder complete. Extracted {:,} features.'.format(len(feature_table)))
# Calculate additional params
feature_table['rt_length'] = feature_table['rt_end'] - feature_table['rt_start']
feature_table['rt_right'] = feature_table['rt_end'] - feature_table['rt_apex']
feature_table['rt_left'] = feature_table['rt_apex'] - feature_table['rt_start']
feature_table['rt_tail'] = feature_table['rt_right'] / feature_table['rt_left']
logging.info('Matching features to query data.')
features = map_ms2(feature_table, query_data, **settings['features'])
logging.info('Saving feature table.')
ms_file.write(feature_table, dataset_name="feature_table")
logging.info('Feature table saved to {}'.format(out_file))
logging.info('Saving features.')
ms_file.write(features, dataset_name="features")
logging.info(f'Feature finding of file {file_name} complete.')
return True
except Exception as e:
logging.error(f'Feature finding of file {file_name} failed. Exception {e}')
return f"{e}" #Can't return exception object, cast as string
###Output
_____no_output_____
###Markdown
MappingMapping MS1 to MS2
###Code
#export
from sklearn.neighbors import KDTree
import pandas as pd
import numpy as np
def replace_infs(array:np.ndarray)->np.ndarray:
"""Replace nans and infs with 0
Args:
array (np.ndarray): Input array.
Returns:
np.ndarray: Output array without nans and infs.
"""
array[array == -np.inf] = 0
array[array == np.inf] = 0
array[np.isnan(array)] = 0
return array
def map_ms2(feature_table:pd.DataFrame, query_data:dict, map_mz_range:float = 1, map_rt_range:float = 0.5, map_mob_range:float = 0.3, map_n_neighbors:int=5, search_unidentified:bool = False, **kwargs)->pd.DataFrame:
"""Map MS1 features to MS2 based on rt and mz.
If ccs is included also add.
Args:
feature_table (pd.DataFrame): Pandas DataFrame with features.
query_data (dict): Data structure containing the query data.
map_mz_range (float, optional): Mapping range for mz (Da). Defaults to 1.
map_rt_range (float, optional): Mapping range for rt (min). Defaults to 0.5.
map_mob_range (float, optional): Mapping range for mobility (%). Defaults to 0.3.
map_n_neighbors (int, optional): Maximum number of neighbors to be extracted. Defaults to 5.
search_unidentified (bool, optional): Flag to perform search on features that have no isotope pattern. Defaults to False.
Returns:
pd.DataFrame: Table with features.
"""
feature_table['rt'] = feature_table['rt_apex']
range_dict = {}
range_dict['mz'] = ('mono_mzs2', map_mz_range)
range_dict['rt'] = ('rt_list_ms2', map_rt_range)
range_dict['mobility'] = ('mobility', map_mob_range)
query_dict = {}
query_dict['rt'] = 'rt_list_ms2'
query_dict['mass'] = 'prec_mass_list2'
query_dict['mz'] = 'mono_mzs2'
query_dict['charge'] = 'charge2'
query_dict['mobility'] = 'mobility'
if 'mobility' not in feature_table.columns:
del range_dict['mobility']
del query_dict['mobility']
use_mob = False
else:
use_mob = True
tree_points = feature_table[list(range_dict.keys())].values
for i, key in enumerate(range_dict):
tree_points[:,i] = tree_points[:,i]/range_dict[key][1]
matching_tree = KDTree(tree_points, metric="minkowski")
ref_points = np.array([query_data[range_dict[_][0]] / range_dict[_][1] for _ in range_dict]).T
ref_points = replace_infs(ref_points)
dist, idx = matching_tree.query(ref_points, k=map_n_neighbors)
ref_matched = np.zeros(ref_points.shape[0], dtype=np.bool_)
all_df = []
for neighbor in range(map_n_neighbors):
ref_df = pd.DataFrame(np.array([query_data[query_dict[_]] for _ in query_dict]).T, columns = query_dict.keys())
for _ in query_dict:
ref_df[_+'_matched'] = feature_table.iloc[idx[:,neighbor]][_].values
ref_df[_+'_offset'] = ref_df[_+'_matched'] - ref_df[_]
ref_df['query_idx'] = ref_df.index
ref_df['feature_idx'] = idx[:,neighbor]
for field in ['int_sum','int_apex','rt_start','rt_apex','rt_end','fwhm','mobility_lower','mobility_upper']:
if field in feature_table.keys():
ref_df[field] = feature_table.iloc[idx[:,neighbor]][field].values
rt_check = (ref_df['rt_start'] <= ref_df['rt']) & (ref_df['rt'] <= ref_df['rt_end'])
# check isolation window (win=3)
mass_check = np.abs(ref_df['mz_offset'].values) <= 3
_check = rt_check & mass_check
if use_mob:
mob_check = (ref_df['mobility_lower'] <= ref_df['mobility']) & (ref_df['mobility'] <= ref_df['mobility_upper'])
_check &= mob_check
ref_matched |= _check
ref_df['dist'] = dist[:,neighbor]
ref_df = ref_df[_check]
all_df.append(ref_df)
if search_unidentified:
if use_mob:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2'], query_data['mobility']]).T, columns=['rt', 'mass', 'mz', 'charge','mobility'])
else:
unmatched_ref = pd.DataFrame(np.array([query_data['rt_list_ms2'], query_data['prec_mass_list2'], query_data['mono_mzs2'], query_data['charge2']]).T, columns=['rt', 'mass', 'mz', 'charge'])
unmatched_ref = unmatched_ref[~ref_matched]
unmatched_ref['mass_matched'] = unmatched_ref['mass']
unmatched_ref['mass_offset'] = 0
unmatched_ref['rt_matched'] = unmatched_ref['rt']
unmatched_ref['rt_offset'] = 0
unmatched_ref['mz_matched'] = unmatched_ref['mz']
unmatched_ref['mz_offset'] = 0
unmatched_ref['charge_matched'] = unmatched_ref['charge']
unmatched_ref['query_idx'] = unmatched_ref.index
unmatched_ref['feature_idx'] = np.nan
if use_mob:
ref_df['mobility_matched'] = unmatched_ref['mobility']
ref_df['mobility_offset'] = np.nan
for field in ['int_sum','int_apex','rt_start','rt_apex','rt_end','fwhm']:
if field in feature_table.keys():
unmatched_ref[field] = np.nan
unmatched_ref['dist'] = np.nan
all_df.append(unmatched_ref)
features = pd.concat(all_df)
features = features.sort_values('mass_matched', ascending=True)
features = features.reset_index(drop=True)
return features
#hide
from nbdev.showdoc import *
#hide
from nbdev.export import *
notebook2script()
###Output
Converted 00_settings.ipynb.
Converted 01_chem.ipynb.
Converted 02_io.ipynb.
Converted 03_fasta.ipynb.
Converted 04_feature_finding.ipynb.
Converted 05_search.ipynb.
Converted 06_score.ipynb.
Converted 07_recalibration.ipynb.
Converted 08_quantification.ipynb.
Converted 09_matching.ipynb.
Converted 10_constants.ipynb.
Converted 11_interface.ipynb.
Converted 12_performance.ipynb.
Converted 13_export.ipynb.
Converted additional_code.ipynb.
Converted contributing.ipynb.
Converted file_formats.ipynb.
Converted index.ipynb.
|
tutorials/wrn_chunking_strategies.ipynb | ###Markdown
Chunking strategies for a Wide-ResNetThis tutorial shows how to utilize a hypernet container [HContainer](../hnets/hnet_container.py) and class [StructuredHMLP](../hnets/structured_mlp_hnet.py) (a certain kind of hypernetwork that allows *smart* chunking) in combination with a Wide-ResNet [WRN](../mnets/wide_resnet.py).
###Code
# Ensure code of repository is visible to this tutorial.
import sys
sys.path.insert(0, '..')
import numpy as np
import torch
from hnets.structured_hmlp_examples import wrn_chunking
from hnets import HContainer, StructuredHMLP
from mnets import WRN
###Output
_____no_output_____
###Markdown
Instantiate a WRN-28-10-B(3,3)First, we instantiate a WRN-28-10 (i.e., a WRN containing $28$ convolutional layers (and an additional fully-connected output layer) and a widening factor $k=10$) with no internal weights (`no_weights=True`). Thus, it's weights are expected to originate externally (in our case from a hypernetwork) and to be passed to its `forward` method.In particular, we are interested in instantiating a network that matches the one used in the study [Sacramento et al., "Economical ensembles with hypernetworks", 2020](https://arxiv.org/abs/2007.12927) (accessed August 18th, 2020). Therefore, the convolutional layers won't have bias terms (but the final fully-connected layer will).
###Code
net = WRN(in_shape=(32, 32, 3), num_classes=10, n=4, k=10,
num_feature_maps=(16, 16, 32, 64), use_bias=False,
use_fc_bias=True, no_weights=False, use_batch_norm=True,
dropout_rate=-1)
###Output
Creating a WideResnet "WRN-28-10-B(3,3)" with 36479194 weights. The network uses batchnorm.
###Markdown
Reproduce the chunking strategy from Sacramento et al.We first design a hypernetwork that matches the chunking strategy described in [Sacramento et al.](https://arxiv.org/abs/2007.12927). Thus, not all parameters are produced by a hypernetwork. Batchnorm weights will be shared among conditions (in their case, each condition represents one ensemble member), while the output layer weights will be condition-specific (ensemble-member-specific). The remaining weight are produced via linear hypernetworks (no bias terms in the hypernets) using a specific chunking strategy, which is described in the paper and in the docstring of function [wrn_chunking](../hnets/structured_hmlp_examples.py). To realize the mixture between shared weights (batchnorm), condition-specific weights (output weights) and hypernetwork-produced weights, we employ the special hypernetwork class [HContainer](../hnets/hnet_container.py).We first create an instance of class [StructuredHMLP](../hnets/structured_mlp_hnet.py) for all hypernetwork-produced weights.
###Code
# Number of conditions (ensemble members). Arbitrarily chosen!
num_conds = 10
# Split the network's parameter shapes into shapes corresponding to batchnorm-weights,
# hypernet-produced weights and output weights.
# Here, we make use of implementation specific knowledge, which could also be retrieved
# via the network's "param_shapes_meta" attribute, which contains meta information
# about all parameters.
bn_shapes = net.param_shapes[:2*len(net.batchnorm_layers)] # Batchnorm weight shapes
hnet_shapes = net.param_shapes[2*len(net.batchnorm_layers):-2] # Conv layer weight shapes
out_shapes = net.param_shapes[-2:] # Output layer weight shapes
# This function already defines the network chunking in the same way the paper
# specifies it.
chunk_shapes, num_per_chunk, assembly_fct = wrn_chunking(net, ignore_bn_weights=True,
ignore_out_weights=True,
gcd_chunking=False)
# Taken from table S1 in the paper.
chunk_emb_sizes = [10, 7, 14, 14, 14, 7, 7, 7]
# Important, the underlying hypernetworks should be linear, i.e., no hidden layers:
# ``layers': []``
# They also should not use bias vectors -> hence, weights are simply generated via a
# matrix vector product (chunk embedding input times hypernet, which is a weight matrix).
# Note, we make the chunk embeddings conditional and tell the hypernetwork, that
# it doesn't have to expect any other input except those learned condition-specific
# embeddings.
shnet = StructuredHMLP(hnet_shapes, chunk_shapes, num_per_chunk, chunk_emb_sizes,
{'layers': [], 'use_bias': False}, assembly_fct,
cond_chunk_embs=True, uncond_in_size=0,
cond_in_size=0, num_cond_embs=num_conds)
###Output
Created Structured Chunked MLP Hypernet.
It manages 8 full hypernetworks internally that produce 42 chunks in total.
The internal hypernetworks have a combined output size of 2816432 compared to 36454832 weights produced by this network.
Hypernetwork with 37462680 weights and 36454832 outputs (compression ratio: 1.03).
The network consists of 37457120 unconditional weights (37457120 internally maintained) and 5560 conditional weights (5560 internally maintained).
###Markdown
Now, we combine the above produce `shnet` with shared batchnorm weights and condition-specific output weights in an instance of class [HContainer](../hnets/hnet_container.py), which will represent the final hypernetwork.
###Code
# We first have to create a simple function handle that tells the `HContainer` how to
# recombine the batchnorm-weights, hypernet-produced weights and output weights.
def simple_assembly_func(list_of_hnet_tensors, uncond_tensors, cond_tensors):
# `list_of_hnet_tensors`: Contains outputs of all linear hypernets (conv
# layer weights).
# `uncond_tensors`: Contains the single set of shared batchnorm weights.
# `cond_tensors`: Contains the condition-specific output weights.
return uncond_tensors + list_of_hnet_tensors[0] + cond_tensors
hnet = HContainer(net.param_shapes, simple_assembly_func, hnets=[shnet],
uncond_param_shapes=bn_shapes, cond_param_shapes=out_shapes,
num_cond_embs=num_conds)
###Output
Created Hypernet Container for 1 hypernet(s). Container maintains 50 plain unconditional parameter tensors. Container maintains 2 plain conditional parameter tensors for each of 10 condiditions.
Hypernetwork with 37544732 weights and 36479194 outputs (compression ratio: 1.03).
The network consists of 37475072 unconditional weights (37475072 internally maintained) and 69660 conditional weights (69660 internally maintained).
###Markdown
Create sample predictions for 3 different ensemble members.
###Code
# Batch of inputs.
batch_size = 1
x = torch.rand((batch_size, 32*32*3))
# Which ensemble members to consider?
cond_ids = [2,3,7]
# Generate weights for ensemble members defined above.
weights = hnet.forward(cond_id=cond_ids)
# Compute prediction for each ensemble member.
for i in range(len(cond_ids)):
pred = net.forward(x, weights=weights[i])
# Apply softmax.
pred = torch.nn.functional.softmax(pred, dim=1).cpu().detach().numpy()
print('Prediction of ensemble member %d: %s' \
% (cond_ids[i], np.array2string(pred, precision=3, separator=', ')))
###Output
Prediction of ensemble member 2: [[0.102, 0.102, 0.1 , 0.098, 0.099, 0.1 , 0.098, 0.097, 0.1 , 0.105]]
Prediction of ensemble member 3: [[0.103, 0.098, 0.101, 0.099, 0.102, 0.096, 0.098, 0.101, 0.104, 0.098]]
Prediction of ensemble member 7: [[0.102, 0.101, 0.097, 0.101, 0.099, 0.101, 0.097, 0.1 , 0.102, 0.101]]
###Markdown
Create a batch-ensemble networkNow, we consider the special case where all parameters are shared except for batchnorm weights and output weights. Thus, no "hypernetwork" are required. Though, we use the class [HContainer](../hnets/hnet_container.py) for convinience.
###Code
def simple_assembly_func2(list_of_hnet_tensors, uncond_tensors, cond_tensors):
# `list_of_hnet_tensors`: None
# `uncond_tensors`: Contains all conv layer weights.
# `cond_tensors`: Contains the condition-specific batchnorm and output weights.
return cond_tensors[:-2] + uncond_tensors + cond_tensors[-2:]
hnet2 = HContainer(net.param_shapes, simple_assembly_func2, hnets=None,
uncond_param_shapes=hnet_shapes,
cond_param_shapes=bn_shapes+out_shapes,
num_cond_embs=num_conds)
# Batch of inputs.
batch_size = 1
x = torch.rand((batch_size, 32*32*3))
# Which ensemble members to consider?
cond_ids = [2,3,7]
# Generate weights for ensemble members defined above.
weights = hnet2.forward(cond_id=cond_ids)
# Compute prediction for each ensemble member.
for i in range(len(cond_ids)):
pred = net.forward(x, weights=weights[i])
# Apply softmax.
pred = torch.nn.functional.softmax(pred, dim=1).cpu().detach().numpy()
print('Prediction of ensemble member %d: %s' \
% (cond_ids[i], np.array2string(pred, precision=3, separator=', ')))
###Output
Prediction of ensemble member 2: [[0.101, 0.102, 0.1 , 0.1 , 0.1 , 0.103, 0.101, 0.098, 0.098, 0.097]]
Prediction of ensemble member 3: [[0.098, 0.1 , 0.101, 0.104, 0.1 , 0.1 , 0.101, 0.1 , 0.097, 0.098]]
Prediction of ensemble member 7: [[0.098, 0.099, 0.101, 0.101, 0.1 , 0.102, 0.099, 0.101, 0.098, 0.101]]
|
notebooks/shop_analysis.ipynb | ###Markdown
UberEats店舗データ分析(武蔵中原近辺)
###Code
import pandas as pd
import datetime
import seaborn as sns
from pyproj import Geod
import japanize_matplotlib
FILE_PATH = './../data/shop_master.csv'
master = pd.read_csv(FILE_PATH, index_col='id')
len(master)
master.head(3)
df = master.assign(
shop_position_id = master.apply(lambda x: str(x.latitude) + "-" + str(x.longitude), axis=1)).drop(["address", "url"], axis=1)
df.head(3)
###Output
_____no_output_____
###Markdown
最寄り駅ごとにお店を分類する 武蔵中原、武蔵新城、武蔵小杉、武蔵溝ノ口、その他の分類でお店を分類する。半径1km県内を最寄りと定義する。
###Code
kosugi_lat, kosugi_lng = 35.5766335,139.6572773 # 武蔵小杉駅
nakahara_lat, nakahara_lng = 35.5807143,139.6399183 # 武蔵中原駅
shinjo_lat, shinjo_lng = 35.58718,139.6276537 # 武蔵新城
mizonokuchi_lat, mizonokuchi_lng = 35.5991055,139.6087969 # 武蔵溝ノ口駅
stations = [
["武蔵小杉", [kosugi_lat, kosugi_lng]],
["武蔵中原", [nakahara_lat, nakahara_lng]],
["武蔵新城", [shinjo_lat, shinjo_lng]],
["武蔵溝ノ口", [mizonokuchi_lat, mizonokuchi_lng]]
]
def is_near_station(station_lat, station_lng, shop_lat, shop_lng):
grs80 = Geod(ellps='GRS80') # GRS80楕円体
_, _, distance = grs80.inv(station_lng, station_lat, shop_lng, shop_lat)
return distance <= 1000
def get_near_station_name(station, shop_lat, shop_lng, v):
station_name = station[0]
station_position = station[1]
if is_near_station(station_position[0], station_position[1], shop_lat, shop_lng):
return station_name
else:
if v != "その他":
return v
else:
return "その他"
df["near_station"] = "その他"
for station in stations:
df["near_station"] = df.apply(lambda x: get_near_station_name(station, x.latitude, x.longitude, x.near_station), axis=1)
output = pd.DataFrame(df.groupby("near_station").count()["name"])
output.index.name = "最寄り駅"
output = output.rename(columns = {"name": "店舗数"})
sns.barplot(x="最寄り駅", y="店舗数", data=output.reset_index())
###Output
_____no_output_____
###Markdown
店舗数の集計
###Code
output
###Output
_____no_output_____
###Markdown
レビュー数と評価の集計
###Code
output2 = pd.DataFrame(df.groupby("near_station").sum()[["reviews", "point"]])
output2
###Output
_____no_output_____
###Markdown
小杉の方が溝の口よりも、レビュー数が少ないにも関わらず評価ポイントの合計が高いということは、溝の口よりも小杉のほうがよい店が多いということ? チェーン店の調査
###Code
def is_chain(x):
if "店" in x:
return True
else:
return False
df["is_chain"] = df["name"].apply(is_chain)
pd.crosstab(df['near_station'], df['is_chain'])
print("武蔵小杉: {}%".format(round((25/48)*100)))
print("武蔵溝ノ口: {}%".format(round((32/53)*100)))
###Output
武蔵小杉: 52%
武蔵溝ノ口: 60%
###Markdown
UberEats店舗データ分析(武蔵中原近辺)
###Code
import pandas as pd
import datetime
import seaborn as sns
from pyproj import Geod
import japanize_matplotlib
FILE_PATH = './../data/shop_master.csv'
master = pd.read_csv(FILE_PATH, index_col='id')
len(master)
master.head(3)
df = master.assign(
shop_position_id = master.apply(lambda x: str(x.latitude) + "-" + str(x.longitude), axis=1)).drop(["address", "url"], axis=1)
df.head(3)
###Output
_____no_output_____
###Markdown
最寄り駅ごとにお店を分類する 武蔵中原、武蔵新城、武蔵小杉、武蔵溝ノ口、その他の分類でお店を分類する。半径1km県内を最寄りと定義する。
###Code
kosugi_lat, kosugi_lng = 35.5766335,139.6572773 # 武蔵小杉駅
nakahara_lat, nakahara_lng = 35.5807143,139.6399183 # 武蔵中原駅
shinjo_lat, shinjo_lng = 35.58718,139.6276537 # 武蔵新城
mizonokuchi_lat, mizonokuchi_lng = 35.5991055,139.6087969 # 武蔵溝ノ口駅
stations = [
["武蔵小杉", [kosugi_lat, kosugi_lng]],
["武蔵中原", [nakahara_lat, nakahara_lng]],
["武蔵新城", [shinjo_lat, shinjo_lng]],
["武蔵溝ノ口", [mizonokuchi_lat, mizonokuchi_lng]]
]
def is_near_station(station_lat, station_lng, shop_lat, shop_lng):
grs80 = Geod(ellps='GRS80') # GRS80楕円体
_, _, distance = grs80.inv(station_lng, station_lat, shop_lng, shop_lat)
return distance <= 1000
def get_near_station_name(station, shop_lat, shop_lng, v):
station_name = station[0]
station_position = station[1]
if is_near_station(station_position[0], station_position[1], shop_lat, shop_lng):
return station_name
else:
if v != "その他":
return v
else:
return "その他"
df["near_station"] = "その他"
for station in stations:
df["near_station"] = df.apply(lambda x: get_near_station_name(station, x.latitude, x.longitude, x.near_station), axis=1)
output = pd.DataFrame(df.groupby("near_station").count()["name"])
output.index.name = "最寄り駅"
output = output.rename(columns = {"name": "店舗数"})
sns.barplot(x="最寄り駅", y="店舗数", data=output.reset_index())
###Output
_____no_output_____
###Markdown
店舗数の集計
###Code
output
###Output
_____no_output_____
###Markdown
レビュー数と評価の集計
###Code
output2 = pd.DataFrame(df.groupby("near_station").sum()[["reviews", "point"]])
output2
###Output
_____no_output_____
###Markdown
小杉の方が溝の口よりも、レビュー数が少ないにも関わらず評価ポイントの合計が高いということは、溝の口よりも小杉のほうがよい店が多いということ? チェーン店の調査
###Code
def is_chain(x):
if "店" in x:
return True
else:
return False
df["is_chain"] = df["name"].apply(is_chain)
pd.crosstab(df['near_station'], df['is_chain'])
print("武蔵小杉: {}%".format(round((25/48)*100)))
print("武蔵溝ノ口: {}%".format(round((32/53)*100)))
###Output
武蔵小杉: 52%
武蔵溝ノ口: 60%
|
testing/notebooks/helm_cluster_wide_tests.ipynb | ###Markdown
Example Seldon Core Deployments using Helm PrerequistesYou will need - [Git clone of Seldon Core](https://github.com/SeldonIO/seldon-core) - A running Kubernetes cluster with kubectl authenticated - [python grpc tools](https://grpc.io/docs/quickstart/python.html) - [Helm client](https://helm.sh/) Creating a Kubernetes ClusterFollow the [Kubernetes documentation to create a cluster](https://kubernetes.io/docs/setup/).Once created ensure ```kubectl``` is authenticated against the running cluster. Setup
###Code
!kubectl create namespace seldon
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
!kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default
###Output
_____no_output_____
###Markdown
Install Helm
###Code
!kubectl -n kube-system create sa tiller
!kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
!helm init --service-account tiller
!kubectl rollout status deploy/tiller-deploy -n kube-system
###Output
_____no_output_____
###Markdown
Start seldon-core
###Code
!helm install ../../helm-charts/seldon-core-crd --name seldon-core-crd --set usage_metrics.enabled=true
!helm install ../../helm-charts/seldon-core --name seldon-core --namespace seldon --set ambassador.enabled=true --set single_namespace=false
!kubectl rollout status deploy/seldon-core-seldon-cluster-manager
!kubectl rollout status deploy/seldon-core-seldon-apiserver
!kubectl rollout status deploy/seldon-core-ambassador
###Output
_____no_output_____
###Markdown
Set up REST and gRPC methods**Ensure you port forward ambassador and API gateway**:```kubectl port-forward $(kubectl get pods -n seldon -l service=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080``````kubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8002:8080``````kubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8004:5000``` Install gRPC modules for the prediction protos.
###Code
!cp ../../proto/prediction.proto ./proto
!cd ../../proto/tensorflow && make create_protos
!cp -vr ../../proto/tensorflow/tensorflow .
!python -m grpc.tools.protoc -I. --python_out=. --grpc_python_out=. ./proto/prediction.proto
###Output
_____no_output_____
###Markdown
Serve Single Model
###Code
!kubectl create namespace test1
!helm install ../../helm-charts/seldon-single-model --name mymodel --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1
!kubectl rollout status deploy/mymodel-mymodel-7cd068f -n test1
###Output
_____no_output_____
###Markdown
Get predictions
###Code
import sys
sys.path.append("../../notebooks")
from seldon_utils import *
API_AMBASSADOR="localhost:8003"
API_GATEWAY_REST="localhost:8002"
API_GATEWAY_GRPC="localhost:8004"
###Output
_____no_output_____
###Markdown
REST Request
###Code
r = rest_request_api_gateway("oauth-key","oauth-secret","test1",API_GATEWAY_REST)
print(r.text)
r = rest_request_ambassador("mymodel","test1",API_AMBASSADOR)
print(r.text)
###Output
_____no_output_____
###Markdown
gRPC Request
###Code
grpc_request_ambassador("mymodel","test1",API_AMBASSADOR)
grpc_request_api_gateway("oauth-key","oauth-secret","test1",rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
!helm delete mymodel --purge
###Output
_____no_output_____
###Markdown
Serve AB Test
###Code
!helm install ../../helm-charts/seldon-abtest --name myabtest --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1
!kubectl rollout status deploy/myabtest-abtest-41de5b8 -n test1
!kubectl rollout status deploy/myabtest-abtest-df66c5c -n test1
###Output
_____no_output_____
###Markdown
Get predictions
###Code
r = rest_request_api_gateway("oauth-key","oauth-secret","test1",API_GATEWAY_REST)
print(r.text)
r = rest_request_ambassador("myabtest","test1",API_AMBASSADOR)
print(r.text)
###Output
_____no_output_____
###Markdown
gRPC Request
###Code
grpc_request_ambassador("myabtest","test1",API_AMBASSADOR)
grpc_request_api_gateway("oauth-key","oauth-secret","test1",rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
!helm delete myabtest --purge
###Output
_____no_output_____
###Markdown
Serve Multi-Armed Bandit
###Code
!helm install ../../helm-charts/seldon-mab --name mymab --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1
!kubectl rollout status deploy/mymab-abtest-41de5b8 -n test1
!kubectl rollout status deploy/mymab-abtest-b8038b2 -n test1
!kubectl rollout status deploy/mymab-abtest-df66c5c -n test1
###Output
_____no_output_____
###Markdown
Get predictions
###Code
r = rest_request_api_gateway("oauth-key","oauth-secret","test1",API_GATEWAY_REST)
print(r.text)
r = rest_request_ambassador("mymab","test1",API_AMBASSADOR)
print(r.text)
###Output
_____no_output_____
###Markdown
gRPC Request
###Code
grpc_request_ambassador("mymab","test1",API_AMBASSADOR)
grpc_request_api_gateway("oauth-key","oauth-secret","test1",rest_endpoint=API_GATEWAY_REST,grpc_endpoint=API_GATEWAY_GRPC)
!helm delete mymab --purge
###Output
_____no_output_____
###Markdown
Example Seldon Core Deployments using Helm PrerequistesYou will need - [Git clone of Seldon Core](https://github.com/SeldonIO/seldon-core) - A running Kubernetes cluster with kubectl authenticated - [python grpc tools](https://grpc.io/docs/quickstart/python.html) - [Helm client](https://helm.sh/) Creating a Kubernetes ClusterFollow the [Kubernetes documentation to create a cluster](https://kubernetes.io/docs/setup/).Once created ensure ```kubectl``` is authenticated against the running cluster. Setup
###Code
!kubectl create namespace seldon
!kubectl config set-context $(kubectl config current-context) --namespace=seldon
!kubectl create clusterrolebinding kube-system-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default
###Output
_____no_output_____
###Markdown
Install Helm
###Code
!kubectl -n kube-system create sa tiller
!kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
!helm init --service-account tiller
!kubectl rollout status deploy/tiller-deploy -n kube-system
###Output
_____no_output_____
###Markdown
Start seldon-core
###Code
!helm install ../../helm-charts/seldon-core-crd --name seldon-core-crd --set usage_metrics.enabled=true
!helm install ../../helm-charts/seldon-core --name seldon-core --namespace seldon --set ambassador.enabled=true --set single_namespace=false --set ambassador.rbac.namespaced=false
!kubectl rollout status deploy/seldon-core-seldon-cluster-manager
!kubectl rollout status deploy/seldon-core-seldon-apiserver
!kubectl rollout status deploy/seldon-core-ambassador
###Output
_____no_output_____
###Markdown
Set up REST and gRPC methods**Ensure you port forward ambassador and API gateway**:```kubectl port-forward $(kubectl get pods -n seldon -l app.kubernetes.io/name=ambassador -o jsonpath='{.items[0].metadata.name}') -n seldon 8003:8080``````kubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8002:8080``````kubectl port-forward $(kubectl get pods -n seldon -l app=seldon-apiserver-container-app -o jsonpath='{.items[0].metadata.name}') -n seldon 8004:5000``` Serve Single Model
###Code
!kubectl create namespace test1
!helm install ../../helm-charts/seldon-single-model --name mymodel --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1
!kubectl rollout status deploy/mymodel-mymodel-7cd068f -n test1
###Output
_____no_output_____
###Markdown
Get predictions
###Code
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="mymodel",namespace="test1",oauth_key="oauth-key",oauth_secret="oauth-secret")
###Output
_____no_output_____
###Markdown
REST Request
###Code
p = sc.predict(gateway="seldon",transport="rest")
print(p)
p = sc.predict(gateway="ambassador",transport="rest")
print(p)
###Output
_____no_output_____
###Markdown
gRPC Request
###Code
p = sc.predict(gateway="ambassador",transport="grpc")
print(p)
p = sc.predict(gateway="seldon",transport="grpc")
print(p)
!helm delete mymodel --purge
###Output
_____no_output_____
###Markdown
Serve AB Test
###Code
!helm install ../../helm-charts/seldon-abtest --name myabtest --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1
!kubectl rollout status deploy/myabtest-abtest-41de5b8 -n test1
!kubectl rollout status deploy/myabtest-abtest-df66c5c -n test1
###Output
_____no_output_____
###Markdown
Get predictions
###Code
sc = SeldonClient(deployment_name="myabtest",namespace="test1",oauth_key="oauth-key",oauth_secret="oauth-secret")
r = sc.predict(gateway="seldon",transport="rest")
print(r)
r = sc.predict(gateway="ambassador",transport="rest")
print(r)
###Output
_____no_output_____
###Markdown
gRPC Request
###Code
r = sc.predict(gateway="ambassador",transport="grpc")
print(r)
r = sc.predict(gateway="seldon",transport="grpc")
print(r)
!helm delete myabtest --purge
###Output
_____no_output_____
###Markdown
Serve Multi-Armed Bandit
###Code
!helm install ../../helm-charts/seldon-mab --name mymab --set oauth.key=oauth-key --set oauth.secret=oauth-secret --namespace test1
!kubectl rollout status deploy/mymab-abtest-41de5b8 -n test1
!kubectl rollout status deploy/mymab-abtest-b8038b2 -n test1
!kubectl rollout status deploy/mymab-abtest-df66c5c -n test1
###Output
_____no_output_____
###Markdown
Get predictions
###Code
sc = SeldonClient(deployment_name="mymab",namespace="test1",oauth_key="oauth-key",oauth_secret="oauth-secret")
r = sc.predict(gateway="seldon",transport="rest")
print(r)
r = sc.predict(gateway="ambassador",transport="rest")
print(r)
###Output
_____no_output_____
###Markdown
gRPC Request
###Code
r = sc.predict(gateway="ambassador",transport="grpc")
print(r)
r = sc.predict(gateway="seldon",transport="grpc")
print(r)
!helm delete mymab --purge
###Output
_____no_output_____ |
dev_notebooks/expipe/data-management-workshop-NRSN-DLN-2019-master/expipe/expipe-tutorial.ipynb | ###Markdown
Making a projectWhen making a project we require with a path to where we want the project to reside
###Code
project = expipe.require_project('ERT plant')
###Output
_____no_output_____
###Markdown
Adding user info in a module
###Code
project.modules['user_info'] = {
'Benjamin': {
'institute': 'UNIPD',
'sex': 'M'
},
'Guillaume': {
'institute': 'Liege',
'sex': 'M'
}
}
project.modules
###Output
_____no_output_____
###Markdown
Entity
###Code
rhizotron = project.require_entity('rhizotron')
rhizotron.users = ['Benjamin', 'Guillaume']
project.entities
###Output
_____no_output_____
###Markdown
Adding a module with templateIt can be tedious to add all the keys and according information every time we make a module, therefore we use templates. In order to use templates we must add our desired templates to project_path/templates.
###Code
rhizotron.modules['info'] = {
'weight': '10kg',
'soil': 'water'
}
###Output
_____no_output_____
###Markdown
ActionNow we are ready to start adding actions Adding one experimentThe experiments starts with a ERT where we use a Syscal Pro in Laboratory
###Code
project.actions['measurements'] = {
'type': 'ERT',
'entities': ['rhizotron'],
'users': ['Benjamin'],
'tags': ['Syscal Pro', 'laboratory']
}
###Output
_____no_output_____
###Markdown
Too see which templates are available we invoke the templates browser
###Code
project.actions
project.actions['measurements'].attributes
###Output
_____no_output_____
###Markdown
Adding a message to recording
###Code
rhizotron.create_message('Successful recording, albeit a bit noisy', user='Benjamin')
###Output
_____no_output_____
###Markdown
Creating a template
###Code
probe_location = {
'identifier': 'probe_location',
'description': 'v1l, v1r represents primary visual cortex left and right respectively.',
'v1l': {
'probe_0': ''
},
'v1r': {
'probe_0': ''
}
}
project.create_template('probe_location', contents=probe_location)
probe_location_2 = project.actions['ecephys_2'].create_module(template='probe_location')
probe_location_2['v1l']['probe_0'] = 1000 * pq.um
probe_location_2['v1r']['probe_0'] = 1000 * pq.um
project.actions['ecephys_2'].modules
###Output
_____no_output_____
###Markdown
Using the browser
###Code
expipe.Browser(project.path).display()
df = pd.read_csv(project.actions['all'].data_path('sessions'))
df
###Output
_____no_output_____ |
notebooks/Northwind Blows - oh how it blows.ipynb | ###Markdown
Explore data files on local disk
###Code
import pandas as pd
import numpy
dataroot = "/data/data-files/graph-data/northwind/data"
orders = pd.read_csv(dataroot + "/orders.csv")
order_details = pd.read_csv(dataroot + "/order-details.csv")
customers = pd.read_csv(dataroot + "/customers.csv")
reps = pd.read_csv(dataroot + "/employees.csv")
order_details.dtypes
order_details.describe().round()
order_details.info()
order_details.head(20)
customers['Region'].unique()
%%bash
# rm -fv orders.db
orders.info()
orders[orders['CommissionAmt'].notnull()]
# conn = sqlite3.connect('orders.db')
# orders.to_sql('orders', conn, if_exists='replace')
# order_details.to_sql('order_details', conn, if_exists='replace')
# conn.close()
reps['BirthDate']
###Output
_____no_output_____
###Markdown
Connect to the Northwind Graph using PyTigerGraph
###Code
# import sqlite3
import pyTigerGraph as tg
# host = "http://medsales.34.106.148.222.nip.io:14240"
host = "https://homelab-k3s.172.16.17.201.nip.io" # must include the protocol http or https
username = "tigergraph"
password = "Tigergraph"
graphName = "Northwind" # leave blank to use Global
restppPort = 9000 # default 9000
gsPort = 14240 # default 14240
mysecret = "ll5k45jrrvu0b4gmk4f7fokilgbgam4g"
# First establish a basic connection using a secret. Do *not* do this if you already have a token
# host = "http://34.106.148.222.nip.io"
# token="o4luvshk47rticuhqi0cdokiq739o8i9"
conn = tg.TigerGraphConnection(host=host, restppPort=restppPort, gsPort=gsPort, graphname=graphName, password=password)
token = conn.getToken(mysecret, setToken=True, lifetime=None)
# Next use the new token to establish a full access connection for use with GSQL
conn = tg.TigerGraphConnection(host=host, restppPort=restppPort, gsPort=gsPort, graphname=graphName, password=password, apiToken=token[0])
token[0]
conn.getVertexTypes()
# conn.getVertexTypes()
# conn.runInstalledQuery('floors')
print(conn.gsql('show edge *', options=[]))
conn.gsql('drop graph gort', options=[])
print(conn.gsql('show graph *', options=[]))
###Output
_____no_output_____ |
notebooks/Alternative-Combos-Of-Parameter-Values.ipynb | ###Markdown
Alternative Combinations of Parameter ValuesPlease write the names and email addresses of everyone who worked on this notebook on the line below.YOUR NAMES HERE IntroductionThe notebook "Micro-and-Macro-Implications-of-Very-Impatient-HHs" is an exercise that demonstrates the consequences of changing a key parameter of the [cstwMPC](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC) model, the time preference factor $\beta$.The [REMARK](https://github.com/econ-ark/REMARK) `SolvingMicroDSOPs` reproduces the last figure in the [SolvingMicroDSOPs](http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) lecture notes, which shows that there are classes of alternate values of $\beta$ and $\rho$ that fit the data almost as well as the exact 'best fit' combination.Inspired by this comparison, this notebook asks you to examine the consequences for:* The consumption function* The distribution of wealthOf _joint_ changes in $\beta$ and $\rho$ together. One way you can do this is to construct a list of alternative values of $\rho$ (say, values that range upward from the default value of $\rho$, in increments of 0.2, all the way to $\rho=5$). Then for each of these values of $\rho$ you will find the value of $\beta$ that leads the same value for target market resources, $\check{m}$.As a reminder, $\check{m}$ is defined as the value of $m$ at which the optimal value of ${c}$ is the value such that, at that value of ${c}$, the expected level of ${m}$ next period is the same as its current value:$\mathbb{E}_{t}[{m}_{t+1}] = {m}_{t}$ Other notes:* The cstwMPC model solves and simulates the problems of consumers with 7 different values of $\beta$ * You should do your exercise using the middle value of $\beta$ from that exercise: * `DiscFac_mean = 0.9855583`* You are likely to run into the problem, as you experiment with parameter values, that you have asked HARK to solve a model that does not satisfy one of the impatience conditions required for the model to have a solution. Those conditions are explained intuitively in the [TractableBufferStock](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/TractableBufferStock/) model. The versions of the impatience conditions that apply to the $\texttt{IndShockConsumerType}$ model can be found in the paper [BufferStockTheory](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory), table 2. * The conditions that need to be satisfied are: * The Growth Impatience Condition (GIC) * The Return Impatience Condition (RIC)* Please accumulate the list of solved consumers' problems in a list called `MyTypes` * For compatibility with a further part of the assignment below
###Code
# This cell merely imports and sets up some basic functions and packages
%matplotlib inline
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath('../lib'))
from util import log_progress
import numpy as np
from copy import deepcopy
import HARK # Prevents import error from Demos repo
from HARK.utilities import plotFuncs
# Import IndShockConsumerType
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
# Define a dictionary with calibrated parameters
cstwMPC_calibrated_parameters = {
"CRRA":1.0, # Coefficient of relative risk aversion
"Rfree":1.01/(1.0 - 1.0/160.0), # Survival probability,
"PermGroFac":[1.000**0.25], # Permanent income growth factor (no perm growth),
"PermGroFacAgg":1.0,
"BoroCnstArt":0.0,
"CubicBool":False,
"vFuncBool":False,
"PermShkStd":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income
"PermShkCount":5, # Number of points in permanent income shock grid
"TranShkStd":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,
"TranShkCount":5, # Number of points in transitory income shock grid
"UnempPrb":0.07, # Probability of unemployment while working
"IncUnemp":0.15, # Unemployment benefit replacement rate
"UnempPrbRet":None,
"IncUnempRet":None,
"aXtraMin":0.00001, # Minimum end-of-period assets in grid
"aXtraMax":40, # Maximum end-of-period assets in grid
"aXtraCount":32, # Number of points in assets grid
"aXtraExtra":[None],
"aXtraNestFac":3, # Number of times to 'exponentially nest' when constructing assets grid
"LivPrb":[1.0 - 1.0/160.0], # Survival probability
"DiscFac":0.97, # Default intertemporal discount factor; dummy value, will be overwritten
"cycles":0,
"T_cycle":1,
"T_retire":0,
'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)
'T_age': 400,
'IndL': 10.0/9.0, # Labor supply per individual (constant),
'aNrmInitMean':np.log(0.00001),
'aNrmInitStd':0.0,
'pLvlInitMean':0.0,
'pLvlInitStd':0.0,
'AgentCount':10000,
}
###Output
_____no_output_____
###Markdown
Simulating the Distribution of Wealth for Alternative CombinationsYou should now have constructed a list of consumer types all of whom have the same _target_ level of market resources $\check{m}$. But the fact that everyone has the same target ${m}$ does not mean that the _distribution_ of ${m}$ will be the same for all of these consumer types.In the code block below, fill in the contents of the loop to solve and simulate each agent type for many periods. To do this, you should invoke the methods $\texttt{solve}$, $\texttt{initializeSim}$, and $\texttt{simulate}$ in that order. Simulating for 1200 quarters (300 years) will approximate the long run distribution of wealth in the population.
###Code
for ThisType in log_progress(MyTypes, every=1):
ThisType.solve()
ThisType.initializeSim()
ThisType.simulate()
###Output
_____no_output_____
###Markdown
Now that you have solved and simulated these consumers, make a plot that shows the relationship between your alternative values of $\rho$ and the mean level of assets
###Code
# To help you out, we have given you the command needed to construct a list of the levels of assets for all consumers
aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# You should take the mean of aLvl for each consumer in MyTypes, divide it by the mean across all simulations
# and then plot the ratio of the values of mean(aLvl) for each group against the value of $\rho$
###Output
_____no_output_____
###Markdown
InterpretHere, you should attempt to give an intiutive explanation of the results you see in the figure you just constructed The Distribution of Wealth...Your next exercise is to show how the distribution of wealth differs for the different parameter values
###Code
from HARK.utilities import getLorenzShares, getPercentiles
# Finish filling in this function to calculate the Euclidean distance between the simulated and actual Lorenz curves.
def calcLorenzDistance(SomeTypes):
'''
Calculates the Euclidean distance between the simulated and actual (from SCF data) Lorenz curves at the
20th, 40th, 60th, and 80th percentiles.
Parameters
----------
SomeTypes : [AgentType]
List of AgentTypes that have been solved and simulated. Current levels of individual assets should
be stored in the attribute aLvlNow.
Returns
-------
lorenz_distance : float
Euclidean distance (square root of sum of squared differences) between simulated and actual Lorenz curves.
'''
# Define empirical Lorenz curve points
lorenz_SCF = np.array([-0.00183091, 0.0104425 , 0.0552605 , 0.1751907 ])
# Extract asset holdings from all consumer types
aLvl_sim = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# Calculate simulated Lorenz curve points
lorenz_sim = getLorenzShares(aLvl_sim,percentiles=[0.2,0.4,0.6,0.8])
# Calculate the Euclidean distance between the simulated and actual Lorenz curves
lorenz_distance = np.sqrt(np.sum((lorenz_SCF - lorenz_sim)**2))
# Return the Lorenz distance
return lorenz_distance
###Output
_____no_output_____
###Markdown
...and the Marginal Propensity to ConsumeNow let's look at the aggregate MPC. In the code block below, write a function that produces text output of the following form:$\texttt{The 35th percentile of the MPC is 0.15623}$Your function should take two inputs: a list of types of consumers and an array of percentiles (numbers between 0 and 1). It should return no outputs, merely print to screen one line of text for each requested percentile. The model is calibrated at a quarterly frequency, but Carroll et al report MPCs at an annual frequency. To convert, use the formula:$\kappa_{Y} \approx 1.0 - (1.0 - \kappa_{Q})^4$
###Code
# Write a function to tell us about the distribution of the MPC in this code block, then test it!
# You will almost surely find it useful to use a for loop in this function.
def describeMPCdstn(SomeTypes,percentiles):
MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in SomeTypes])
MPCpercentiles_quarterly = getPercentiles(MPC_sim,percentiles=percentiles)
MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4
for j in range(len(percentiles)):
print('The ' + str(100*percentiles[j]) + 'th percentile of the MPC is ' + str(MPCpercentiles_annual[j]))
describeMPCdstn(MyTypes,np.linspace(0.05,0.95,19))
###Output
_____no_output_____
###Markdown
Alternative Combinations of Parameter Values[](https://econ-ark.org/materials/alternative-combos-of-parameter-valueslaunch)Please write the names and email addresses of everyone who worked on this notebook on the line below.YOUR NAMES HERE IntroductionThe notebook "Micro-and-Macro-Implications-of-Very-Impatient-HHs" is an exercise that demonstrates the consequences of changing a key parameter of the [cstwMPC](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC) model, the time preference factor $\beta$.The [REMARK](https://github.com/econ-ark/REMARK) `SolvingMicroDSOPs` reproduces the last figure in the [SolvingMicroDSOPs](http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) lecture notes, which shows that there are classes of alternate values of $\beta$ and $\rho$ that fit the data almost as well as the exact 'best fit' combination.Inspired by this comparison, this notebook asks you to examine the consequences for:* The consumption function* The distribution of wealthOf _joint_ changes in $\beta$ and $\rho$ together. One way you can do this is to construct a list of alternative values of $\rho$ (say, values that range upward from the default value of $\rho$, in increments of 0.2, all the way to $\rho=5$). Then for each of these values of $\rho$ you will find the value of $\beta$ that leads the same value for target market resources, $\check{m}$.As a reminder, $\check{m}$ is defined as the value of $m$ at which the optimal value of ${c}$ is the value such that, at that value of ${c}$, the expected level of ${m}$ next period is the same as its current value:$\mathbb{E}_{t}[{m}_{t+1}] = {m}_{t}$ Other notes:* The cstwMPC model solves and simulates the problems of consumers with 7 different values of $\beta$ * You should do your exercise using the middle value of $\beta$ from that exercise: * `DiscFac_mean = 0.9855583`* You are likely to run into the problem, as you experiment with parameter values, that you have asked HARK to solve a model that does not satisfy one of the impatience conditions required for the model to have a solution. Those conditions are explained intuitively in the [TractableBufferStock](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/TractableBufferStock/) model. The versions of the impatience conditions that apply to the $\texttt{IndShockConsumerType}$ model can be found in the paper [BufferStockTheory](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory), table 2. * The conditions that need to be satisfied are: * The Growth Impatience Condition (GIC) * The Return Impatience Condition (RIC)* Please accumulate the list of solved consumers' problems in a list called `MyTypes` * For compatibility with a further part of the assignment below
###Code
# This cell merely imports and sets up some basic functions and packages
%matplotlib inline
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
from copy import deepcopy
import HARK # Prevents import error from Demos repo
from HARK.utilities import plot_funcs
# Import IndShockConsumerType
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
# Define a dictionary with calibrated parameters
cstwMPC_calibrated_parameters = {
"CRRA":1.0, # Coefficient of relative risk aversion
"Rfree":1.01/(1.0 - 1.0/160.0), # Survival probability,
"PermGroFac":[1.000**0.25], # Permanent income growth factor (no perm growth),
"PermGroFacAgg":1.0,
"BoroCnstArt":0.0,
"CubicBool":False,
"vFuncBool":False,
"PermShkStd":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income
"PermShkCount":5, # Number of points in permanent income shock grid
"TranShkStd":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,
"TranShkCount":5, # Number of points in transitory income shock grid
"UnempPrb":0.07, # Probability of unemployment while working
"IncUnemp":0.15, # Unemployment benefit replacement rate
"UnempPrbRet":None,
"IncUnempRet":None,
"aXtraMin":0.00001, # Minimum end-of-period assets in grid
"aXtraMax":40, # Maximum end-of-period assets in grid
"aXtraCount":32, # Number of points in assets grid
"aXtraExtra":[None],
"aXtraNestFac":3, # Number of times to 'exponentially nest' when constructing assets grid
"LivPrb":[1.0 - 1.0/160.0], # Survival probability
"DiscFac":0.97, # Default intertemporal discount factor; dummy value, will be overwritten
"cycles":0,
"T_cycle":1,
"T_retire":0,
'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)
'T_age': 400,
'IndL': 10.0/9.0, # Labor supply per individual (constant),
'aNrmInitMean':np.log(0.00001),
'aNrmInitStd':0.0,
'pLvlInitMean':0.0,
'pLvlInitStd':0.0,
'AgentCount':10000,
}
# Construct a list of solved consumers' problems, IndShockConsumerType is just a place holder
MyTypes = [IndShockConsumerType(verbose=0, **cstwMPC_calibrated_parameters)]
###Output
_____no_output_____
###Markdown
Simulating the Distribution of Wealth for Alternative CombinationsYou should now have constructed a list of consumer types all of whom have the same _target_ level of market resources $\check{m}$. But the fact that everyone has the same target ${m}$ does not mean that the _distribution_ of ${m}$ will be the same for all of these consumer types.In the code block below, fill in the contents of the loop to solve and simulate each agent type for many periods. To do this, you should invoke the methods $\texttt{solve}$, $\texttt{initialize_sim}$, and $\texttt{simulate}$ in that order. Simulating for 1200 quarters (300 years) will approximate the long run distribution of wealth in the population.
###Code
for ThisType in tqdm(MyTypes):
ThisType.solve()
ThisType.initialize_sim()
ThisType.simulate()
###Output
_____no_output_____
###Markdown
Now that you have solved and simulated these consumers, make a plot that shows the relationship between your alternative values of $\rho$ and the mean level of assets
###Code
# To help you out, we have given you the command needed to construct a list of the levels of assets for all consumers
aLvl_all = np.concatenate([ThisType.state_now["aLvl"] for ThisType in MyTypes])
# You should take the mean of aLvl for each consumer in MyTypes, divide it by the mean across all simulations
# and then plot the ratio of the values of mean(aLvl) for each group against the value of $\rho$
###Output
_____no_output_____
###Markdown
InterpretHere, you should attempt to give an intiutive explanation of the results you see in the figure you just constructed The Distribution of Wealth...Your next exercise is to show how the distribution of wealth differs for the different parameter values
###Code
from HARK.utilities import get_lorenz_shares, get_percentiles
# Finish filling in this function to calculate the Euclidean distance between the simulated and actual Lorenz curves.
def calcLorenzDistance(SomeTypes):
'''
Calculates the Euclidean distance between the simulated and actual (from SCF data) Lorenz curves at the
20th, 40th, 60th, and 80th percentiles.
Parameters
----------
SomeTypes : [AgentType]
List of AgentTypes that have been solved and simulated. Current levels of individual assets should
be stored in the attribute aLvl.
Returns
-------
lorenz_distance : float
Euclidean distance (square root of sum of squared differences) between simulated and actual Lorenz curves.
'''
# Define empirical Lorenz curve points
lorenz_SCF = np.array([-0.00183091, 0.0104425 , 0.0552605 , 0.1751907 ])
# Extract asset holdings from all consumer types
aLvl_sim = np.concatenate([ThisType.aLvl for ThisType in MyTypes])
# Calculate simulated Lorenz curve points
lorenz_sim = get_lorenz_shares(aLvl_sim,percentiles=[0.2,0.4,0.6,0.8])
# Calculate the Euclidean distance between the simulated and actual Lorenz curves
lorenz_distance = np.sqrt(np.sum((lorenz_SCF - lorenz_sim)**2))
# Return the Lorenz distance
return lorenz_distance
###Output
_____no_output_____
###Markdown
...and the Marginal Propensity to ConsumeNow let's look at the aggregate MPC. In the code block below, write a function that produces text output of the following form:$\texttt{The 35th percentile of the MPC is 0.15623}$Your function should take two inputs: a list of types of consumers and an array of percentiles (numbers between 0 and 1). It should return no outputs, merely print to screen one line of text for each requested percentile. The model is calibrated at a quarterly frequency, but Carroll et al report MPCs at an annual frequency. To convert, use the formula:$\kappa_{Y} \approx 1.0 - (1.0 - \kappa_{Q})^4$
###Code
# Write a function to tell us about the distribution of the MPC in this code block, then test it!
# You will almost surely find it useful to use a for loop in this function.
def describeMPCdstn(SomeTypes,percentiles):
MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in SomeTypes])
MPCpercentiles_quarterly = get_percentiles(MPC_sim,percentiles=percentiles)
MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4
for j in range(len(percentiles)):
print('The ' + str(100*percentiles[j]) + 'th percentile of the MPC is ' + str(MPCpercentiles_annual[j]))
describeMPCdstn(MyTypes,np.linspace(0.05,0.95,19))
###Output
_____no_output_____
###Markdown
Alternative Combinations of Parameter ValuesPlease write the names and email addresses of everyone who worked on this notebook on the line below.YOUR NAMES HERE IntroductionThe notebook "Micro-and-Macro-Implications-of-Very-Impatient-HHs" is an exercise that demonstrates the consequences of changing a key parameter of the [cstwMPC](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC) model, the time preference factor $\beta$.The [REMARK](https://github.com/econ-ark/REMARK) `SolvingMicroDSOPs` reproduces the last figure in the [SolvingMicroDSOPs](http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) lecture notes, which shows that there are classes of alternate values of $\beta$ and $\rho$ that fit the data almost as well as the exact 'best fit' combination.Inspired by this comparison, this notebook asks you to examine the consequences for:* The consumption function* The distribution of wealthOf _joint_ changes in $\beta$ and $\rho$ together. One way you can do this is to construct a list of alternative values of $\rho$ (say, values that range upward from the default value of $\rho$, in increments of 0.2, all the way to $\rho=5$). Then for each of these values of $\rho$ you will find the value of $\beta$ that leads the same value for target market resources, $\check{m}$.As a reminder, $\check{m}$ is defined as the value of $m$ at which the optimal value of ${c}$ is the value such that, at that value of ${c}$, the expected level of ${m}$ next period is the same as its current value:$\mathbb{E}_{t}[{m}_{t+1}] = {m}_{t}$ Other notes:* The cstwMPC model solves and simulates the problems of consumers with 7 different values of $\beta$ * You should do your exercise using the middle value of $\beta$ from that exercise: * `DiscFac_mean = 0.9855583`* You are likely to run into the problem, as you experiment with parameter values, that you have asked HARK to solve a model that does not satisfy one of the impatience conditions required for the model to have a solution. Those conditions are explained intuitively in the [TractableBufferStock](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/TractableBufferStock/) model. The versions of the impatience conditions that apply to the $\texttt{IndShockConsumerType}$ model can be found in the paper [BufferStockTheory](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory), table 2. * The conditions that need to be satisfied are: * The Growth Impatience Condition (GIC) * The Return Impatience Condition (RIC)* Please accumulate the list of solved consumers' problems in a list called `MyTypes` * For compatibility with a further part of the assignment below
###Code
# This cell merely imports and sets up some basic functions and packages
%matplotlib inline
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath('../lib'))
from util import log_progress
import numpy as np
from copy import deepcopy
import HARK # Prevents import error from Demos repo
from HARK.utilities import plotFuncs
# Import IndShockConsumerType
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
# Define a dictionary with calibrated parameters
cstwMPC_calibrated_parameters = {
"CRRA":1.0, # Coefficient of relative risk aversion
"Rfree":1.01/(1.0 - 1.0/160.0), # Survival probability,
"PermGroFac":[1.000**0.25], # Permanent income growth factor (no perm growth),
"PermGroFacAgg":1.0,
"BoroCnstArt":0.0,
"CubicBool":False,
"vFuncBool":False,
"PermShkStd":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income
"PermShkCount":5, # Number of points in permanent income shock grid
"TranShkStd":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,
"TranShkCount":5, # Number of points in transitory income shock grid
"UnempPrb":0.07, # Probability of unemployment while working
"IncUnemp":0.15, # Unemployment benefit replacement rate
"UnempPrbRet":None,
"IncUnempRet":None,
"aXtraMin":0.00001, # Minimum end-of-period assets in grid
"aXtraMax":40, # Maximum end-of-period assets in grid
"aXtraCount":32, # Number of points in assets grid
"aXtraExtra":[None],
"aXtraNestFac":3, # Number of times to 'exponentially nest' when constructing assets grid
"LivPrb":[1.0 - 1.0/160.0], # Survival probability
"DiscFac":0.97, # Default intertemporal discount factor; dummy value, will be overwritten
"cycles":0,
"T_cycle":1,
"T_retire":0,
'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)
'T_age': 400,
'IndL': 10.0/9.0, # Labor supply per individual (constant),
'aNrmInitMean':np.log(0.00001),
'aNrmInitStd':0.0,
'pLvlInitMean':0.0,
'pLvlInitStd':0.0,
'AgentCount':10000,
}
###Output
_____no_output_____
###Markdown
Simulating the Distribution of Wealth for Alternative CombinationsYou should now have constructed a list of consumer types all of whom have the same _target_ level of market resources $\check{m}$. But the fact that everyone has the same target ${m}$ does not mean that the _distribution_ of ${m}$ will be the same for all of these consumer types.In the code block below, fill in the contents of the loop to solve and simulate each agent type for many periods. To do this, you should invoke the methods $\texttt{solve}$, $\texttt{initializeSim}$, and $\texttt{simulate}$ in that order. Simulating for 1200 quarters (300 years) will approximate the long run distribution of wealth in the population.
###Code
for ThisType in log_progress(MyTypes, every=1):
ThisType.solve()
ThisType.initializeSim()
ThisType.simulate()
###Output
_____no_output_____
###Markdown
Now that you have solved and simulated these consumers, make a plot that shows the relationship between your alternative values of $\rho$ and the mean level of assets
###Code
# To help you out, we have given you the command needed to construct a list of the levels of assets for all consumers
aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# You should take the mean of aLvl for each consumer in MyTypes, divide it by the mean across all simulations
# and then plot the ratio of the values of mean(aLvl) for each group against the value of $\rho$
###Output
_____no_output_____
###Markdown
InterpretHere, you should attempt to give an intiutive explanation of the results you see in the figure you just constructed The Distribution of Wealth...Your next exercise is to show how the distribution of wealth differs for the different parameter values
###Code
from HARK.utilities import getLorenzShares, getPercentiles
# Finish filling in this function to calculate the Euclidean distance between the simulated and actual Lorenz curves.
def calcLorenzDistance(SomeTypes):
'''
Calculates the Euclidean distance between the simulated and actual (from SCF data) Lorenz curves at the
20th, 40th, 60th, and 80th percentiles.
Parameters
----------
SomeTypes : [AgentType]
List of AgentTypes that have been solved and simulated. Current levels of individual assets should
be stored in the attribute aLvlNow.
Returns
-------
lorenz_distance : float
Euclidean distance (square root of sum of squared differences) between simulated and actual Lorenz curves.
'''
# Define empirical Lorenz curve points
lorenz_SCF = np.array([-0.00183091, 0.0104425 , 0.0552605 , 0.1751907 ])
# Extract asset holdings from all consumer types
aLvl_sim = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# Calculate simulated Lorenz curve points
lorenz_sim = getLorenzShares(aLvl_sim,percentiles=[0.2,0.4,0.6,0.8])
# Calculate the Euclidean distance between the simulated and actual Lorenz curves
lorenz_distance = np.sqrt(np.sum((lorenz_SCF - lorenz_sim)**2))
# Return the Lorenz distance
return lorenz_distance
###Output
_____no_output_____
###Markdown
...and the Marginal Propensity to ConsumeNow let's look at the aggregate MPC. In the code block below, write a function that produces text output of the following form:$\texttt{The 35th percentile of the MPC is 0.15623}$Your function should take two inputs: a list of types of consumers and an array of percentiles (numbers between 0 and 1). It should return no outputs, merely print to screen one line of text for each requested percentile. The model is calibrated at a quarterly frequency, but Carroll et al report MPCs at an annual frequency. To convert, use the formula:$\kappa_{Y} \approx 1.0 - (1.0 - \kappa_{Q})^4$
###Code
# Write a function to tell us about the distribution of the MPC in this code block, then test it!
# You will almost surely find it useful to use a for loop in this function.
def describeMPCdstn(SomeTypes,percentiles):
MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in SomeTypes])
MPCpercentiles_quarterly = getPercentiles(MPC_sim,percentiles=percentiles)
MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4
for j in range(len(percentiles)):
print('The ' + str(100*percentiles[j]) + 'th percentile of the MPC is ' + str(MPCpercentiles_annual[j]))
describeMPCdstn(MyTypes,np.linspace(0.05,0.95,19))
###Output
_____no_output_____
###Markdown
Alternative Combinations of Parameter Values[](https://mybinder.org/v2/gh/econ-ark/DemArk/master?filepath=notebooks%2FAlternative-Combos-Of-Parameter-Values.ipynb)Please write the names and email addresses of everyone who worked on this notebook on the line below.YOUR NAMES HERE IntroductionThe notebook "Micro-and-Macro-Implications-of-Very-Impatient-HHs" is an exercise that demonstrates the consequences of changing a key parameter of the [cstwMPC](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC) model, the time preference factor $\beta$.The [REMARK](https://github.com/econ-ark/REMARK) `SolvingMicroDSOPs` reproduces the last figure in the [SolvingMicroDSOPs](http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) lecture notes, which shows that there are classes of alternate values of $\beta$ and $\rho$ that fit the data almost as well as the exact 'best fit' combination.Inspired by this comparison, this notebook asks you to examine the consequences for:* The consumption function* The distribution of wealthOf _joint_ changes in $\beta$ and $\rho$ together. One way you can do this is to construct a list of alternative values of $\rho$ (say, values that range upward from the default value of $\rho$, in increments of 0.2, all the way to $\rho=5$). Then for each of these values of $\rho$ you will find the value of $\beta$ that leads the same value for target market resources, $\check{m}$.As a reminder, $\check{m}$ is defined as the value of $m$ at which the optimal value of ${c}$ is the value such that, at that value of ${c}$, the expected level of ${m}$ next period is the same as its current value:$\mathbb{E}_{t}[{m}_{t+1}] = {m}_{t}$ Other notes:* The cstwMPC model solves and simulates the problems of consumers with 7 different values of $\beta$ * You should do your exercise using the middle value of $\beta$ from that exercise: * `DiscFac_mean = 0.9855583`* You are likely to run into the problem, as you experiment with parameter values, that you have asked HARK to solve a model that does not satisfy one of the impatience conditions required for the model to have a solution. Those conditions are explained intuitively in the [TractableBufferStock](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/TractableBufferStock/) model. The versions of the impatience conditions that apply to the $\texttt{IndShockConsumerType}$ model can be found in the paper [BufferStockTheory](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory), table 2. * The conditions that need to be satisfied are: * The Growth Impatience Condition (GIC) * The Return Impatience Condition (RIC)* Please accumulate the list of solved consumers' problems in a list called `MyTypes` * For compatibility with a further part of the assignment below
###Code
# This cell merely imports and sets up some basic functions and packages
%matplotlib inline
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath('../lib'))
from util import log_progress
import numpy as np
from copy import deepcopy
import HARK # Prevents import error from Demos repo
from HARK.utilities import plotFuncs
# Import IndShockConsumerType
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
# Define a dictionary with calibrated parameters
cstwMPC_calibrated_parameters = {
"CRRA":1.0, # Coefficient of relative risk aversion
"Rfree":1.01/(1.0 - 1.0/160.0), # Survival probability,
"PermGroFac":[1.000**0.25], # Permanent income growth factor (no perm growth),
"PermGroFacAgg":1.0,
"BoroCnstArt":0.0,
"CubicBool":False,
"vFuncBool":False,
"PermShkStd":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income
"PermShkCount":5, # Number of points in permanent income shock grid
"TranShkStd":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,
"TranShkCount":5, # Number of points in transitory income shock grid
"UnempPrb":0.07, # Probability of unemployment while working
"IncUnemp":0.15, # Unemployment benefit replacement rate
"UnempPrbRet":None,
"IncUnempRet":None,
"aXtraMin":0.00001, # Minimum end-of-period assets in grid
"aXtraMax":40, # Maximum end-of-period assets in grid
"aXtraCount":32, # Number of points in assets grid
"aXtraExtra":[None],
"aXtraNestFac":3, # Number of times to 'exponentially nest' when constructing assets grid
"LivPrb":[1.0 - 1.0/160.0], # Survival probability
"DiscFac":0.97, # Default intertemporal discount factor; dummy value, will be overwritten
"cycles":0,
"T_cycle":1,
"T_retire":0,
'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)
'T_age': 400,
'IndL': 10.0/9.0, # Labor supply per individual (constant),
'aNrmInitMean':np.log(0.00001),
'aNrmInitStd':0.0,
'pLvlInitMean':0.0,
'pLvlInitStd':0.0,
'AgentCount':10000,
}
###Output
_____no_output_____
###Markdown
Simulating the Distribution of Wealth for Alternative CombinationsYou should now have constructed a list of consumer types all of whom have the same _target_ level of market resources $\check{m}$. But the fact that everyone has the same target ${m}$ does not mean that the _distribution_ of ${m}$ will be the same for all of these consumer types.In the code block below, fill in the contents of the loop to solve and simulate each agent type for many periods. To do this, you should invoke the methods $\texttt{solve}$, $\texttt{initializeSim}$, and $\texttt{simulate}$ in that order. Simulating for 1200 quarters (300 years) will approximate the long run distribution of wealth in the population.
###Code
for ThisType in log_progress(MyTypes, every=1):
ThisType.solve()
ThisType.initializeSim()
ThisType.simulate()
###Output
_____no_output_____
###Markdown
Now that you have solved and simulated these consumers, make a plot that shows the relationship between your alternative values of $\rho$ and the mean level of assets
###Code
# To help you out, we have given you the command needed to construct a list of the levels of assets for all consumers
aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# You should take the mean of aLvl for each consumer in MyTypes, divide it by the mean across all simulations
# and then plot the ratio of the values of mean(aLvl) for each group against the value of $\rho$
###Output
_____no_output_____
###Markdown
InterpretHere, you should attempt to give an intiutive explanation of the results you see in the figure you just constructed The Distribution of Wealth...Your next exercise is to show how the distribution of wealth differs for the different parameter values
###Code
from HARK.utilities import getLorenzShares, getPercentiles
# Finish filling in this function to calculate the Euclidean distance between the simulated and actual Lorenz curves.
def calcLorenzDistance(SomeTypes):
'''
Calculates the Euclidean distance between the simulated and actual (from SCF data) Lorenz curves at the
20th, 40th, 60th, and 80th percentiles.
Parameters
----------
SomeTypes : [AgentType]
List of AgentTypes that have been solved and simulated. Current levels of individual assets should
be stored in the attribute aLvlNow.
Returns
-------
lorenz_distance : float
Euclidean distance (square root of sum of squared differences) between simulated and actual Lorenz curves.
'''
# Define empirical Lorenz curve points
lorenz_SCF = np.array([-0.00183091, 0.0104425 , 0.0552605 , 0.1751907 ])
# Extract asset holdings from all consumer types
aLvl_sim = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# Calculate simulated Lorenz curve points
lorenz_sim = getLorenzShares(aLvl_sim,percentiles=[0.2,0.4,0.6,0.8])
# Calculate the Euclidean distance between the simulated and actual Lorenz curves
lorenz_distance = np.sqrt(np.sum((lorenz_SCF - lorenz_sim)**2))
# Return the Lorenz distance
return lorenz_distance
###Output
_____no_output_____
###Markdown
...and the Marginal Propensity to ConsumeNow let's look at the aggregate MPC. In the code block below, write a function that produces text output of the following form:$\texttt{The 35th percentile of the MPC is 0.15623}$Your function should take two inputs: a list of types of consumers and an array of percentiles (numbers between 0 and 1). It should return no outputs, merely print to screen one line of text for each requested percentile. The model is calibrated at a quarterly frequency, but Carroll et al report MPCs at an annual frequency. To convert, use the formula:$\kappa_{Y} \approx 1.0 - (1.0 - \kappa_{Q})^4$
###Code
# Write a function to tell us about the distribution of the MPC in this code block, then test it!
# You will almost surely find it useful to use a for loop in this function.
def describeMPCdstn(SomeTypes,percentiles):
MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in SomeTypes])
MPCpercentiles_quarterly = getPercentiles(MPC_sim,percentiles=percentiles)
MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4
for j in range(len(percentiles)):
print('The ' + str(100*percentiles[j]) + 'th percentile of the MPC is ' + str(MPCpercentiles_annual[j]))
describeMPCdstn(MyTypes,np.linspace(0.05,0.95,19))
###Output
_____no_output_____
###Markdown
Alternative Combinations of Parameter ValuesPlease write the names and email addresses of everyone who worked on this notebook on the line below.YOUR NAMES HERE IntroductionThe notebook "Micro-and-Macro-Implications-of-Very-Impatient-HHs" is an exercise that demonstrates the consequences of changing a key parameter of the [cstwMPC](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC) model, the time preference factor $\beta$.The [REMARK](https://github.com/econ-ark/REMARK) `SolvingMicroDSOPs` reproduces the last figure in the [SolvingMicroDSOPs](http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) lecture notes, which shows that there are classes of alternate values of $\beta$ and $\rho$ that fit the data almost as well as the exact 'best fit' combination.Inspired by this comparison, this notebook asks you to examine the consequences for:* The consumption function* The distribution of wealthOf _joint_ changes in $\beta$ and $\rho$ together. One way you can do this is to construct a list of alternative values of $\rho$ (say, values that range upward from the default value of $\rho$, in increments of 0.2, all the way to $\rho=5$). Then for each of these values of $\rho$ you will find the value of $\beta$ that leads the same value for target market resources, $\check{m}$.As a reminder, $\check{m}$ is defined as the value of $m$ at which the optimal value of ${c}$ is the value such that, at that value of ${c}$, the expected level of ${m}$ next period is the same as its current value:$\mathbb{E}_{t}[{m}_{t+1}] = {m}_{t}$ Other notes:* The cstwMPC model solves and simulates the problems of consumers with 7 different values of $\beta$ * You should do your exercise using the middle value of $\beta$ from that exercise: * `DiscFac_mean = 0.9855583`* You are likely to run into the problem, as you experiment with parameter values, that you have asked HARK to solve a model that does not satisfy one of the impatience conditions required for the model to have a solution. Those conditions are explained intuitively in the [TractableBufferStock](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/TractableBufferStock/) model. The versions of the impatience conditions that apply to the $\texttt{IndShockConsumerType}$ model can be found in the paper [BufferStockTheory](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory), table 2. * The conditions that need to be satisfied are: * The Growth Impatience Condition (GIC) * The Return Impatience Condition (RIC)* Please accumulate the list of solved consumers' problems in a list called `MyTypes` * For compatibility with a further part of the assignment below
###Code
# This cell merely imports and sets up some basic functions and packages
%matplotlib inline
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
from copy import deepcopy
import HARK # Prevents import error from Demos repo
from HARK.utilities import plotFuncs
# Import IndShockConsumerType
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
# Define a dictionary with calibrated parameters
cstwMPC_calibrated_parameters = {
"CRRA":1.0, # Coefficient of relative risk aversion
"Rfree":1.01/(1.0 - 1.0/160.0), # Survival probability,
"PermGroFac":[1.000**0.25], # Permanent income growth factor (no perm growth),
"PermGroFacAgg":1.0,
"BoroCnstArt":0.0,
"CubicBool":False,
"vFuncBool":False,
"PermShkStd":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income
"PermShkCount":5, # Number of points in permanent income shock grid
"TranShkStd":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,
"TranShkCount":5, # Number of points in transitory income shock grid
"UnempPrb":0.07, # Probability of unemployment while working
"IncUnemp":0.15, # Unemployment benefit replacement rate
"UnempPrbRet":None,
"IncUnempRet":None,
"aXtraMin":0.00001, # Minimum end-of-period assets in grid
"aXtraMax":40, # Maximum end-of-period assets in grid
"aXtraCount":32, # Number of points in assets grid
"aXtraExtra":[None],
"aXtraNestFac":3, # Number of times to 'exponentially nest' when constructing assets grid
"LivPrb":[1.0 - 1.0/160.0], # Survival probability
"DiscFac":0.97, # Default intertemporal discount factor; dummy value, will be overwritten
"cycles":0,
"T_cycle":1,
"T_retire":0,
'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)
'T_age': 400,
'IndL': 10.0/9.0, # Labor supply per individual (constant),
'aNrmInitMean':np.log(0.00001),
'aNrmInitStd':0.0,
'pLvlInitMean':0.0,
'pLvlInitStd':0.0,
'AgentCount':10000,
}
# Construct a list of solved consumers' problems, IndShockConsumerType is just a place holder
MyTypes = [IndShockConsumerType(**cstwMPC_calibrated_parameters)]
###Output
_____no_output_____
###Markdown
Simulating the Distribution of Wealth for Alternative CombinationsYou should now have constructed a list of consumer types all of whom have the same _target_ level of market resources $\check{m}$. But the fact that everyone has the same target ${m}$ does not mean that the _distribution_ of ${m}$ will be the same for all of these consumer types.In the code block below, fill in the contents of the loop to solve and simulate each agent type for many periods. To do this, you should invoke the methods $\texttt{solve}$, $\texttt{initializeSim}$, and $\texttt{simulate}$ in that order. Simulating for 1200 quarters (300 years) will approximate the long run distribution of wealth in the population.
###Code
for ThisType in tqdm(MyTypes):
ThisType.solve()
ThisType.initializeSim()
ThisType.simulate()
###Output
0%| | 0/1 [00:00<?, ?it/s]
###Markdown
Now that you have solved and simulated these consumers, make a plot that shows the relationship between your alternative values of $\rho$ and the mean level of assets
###Code
# To help you out, we have given you the command needed to construct a list of the levels of assets for all consumers
aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# You should take the mean of aLvl for each consumer in MyTypes, divide it by the mean across all simulations
# and then plot the ratio of the values of mean(aLvl) for each group against the value of $\rho$
###Output
_____no_output_____
###Markdown
InterpretHere, you should attempt to give an intiutive explanation of the results you see in the figure you just constructed The Distribution of Wealth...Your next exercise is to show how the distribution of wealth differs for the different parameter values
###Code
from HARK.utilities import getLorenzShares, getPercentiles
# Finish filling in this function to calculate the Euclidean distance between the simulated and actual Lorenz curves.
def calcLorenzDistance(SomeTypes):
'''
Calculates the Euclidean distance between the simulated and actual (from SCF data) Lorenz curves at the
20th, 40th, 60th, and 80th percentiles.
Parameters
----------
SomeTypes : [AgentType]
List of AgentTypes that have been solved and simulated. Current levels of individual assets should
be stored in the attribute aLvlNow.
Returns
-------
lorenz_distance : float
Euclidean distance (square root of sum of squared differences) between simulated and actual Lorenz curves.
'''
# Define empirical Lorenz curve points
lorenz_SCF = np.array([-0.00183091, 0.0104425 , 0.0552605 , 0.1751907 ])
# Extract asset holdings from all consumer types
aLvl_sim = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# Calculate simulated Lorenz curve points
lorenz_sim = getLorenzShares(aLvl_sim,percentiles=[0.2,0.4,0.6,0.8])
# Calculate the Euclidean distance between the simulated and actual Lorenz curves
lorenz_distance = np.sqrt(np.sum((lorenz_SCF - lorenz_sim)**2))
# Return the Lorenz distance
return lorenz_distance
###Output
_____no_output_____
###Markdown
...and the Marginal Propensity to ConsumeNow let's look at the aggregate MPC. In the code block below, write a function that produces text output of the following form:$\texttt{The 35th percentile of the MPC is 0.15623}$Your function should take two inputs: a list of types of consumers and an array of percentiles (numbers between 0 and 1). It should return no outputs, merely print to screen one line of text for each requested percentile. The model is calibrated at a quarterly frequency, but Carroll et al report MPCs at an annual frequency. To convert, use the formula:$\kappa_{Y} \approx 1.0 - (1.0 - \kappa_{Q})^4$
###Code
# Write a function to tell us about the distribution of the MPC in this code block, then test it!
# You will almost surely find it useful to use a for loop in this function.
def describeMPCdstn(SomeTypes,percentiles):
MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in SomeTypes])
MPCpercentiles_quarterly = getPercentiles(MPC_sim,percentiles=percentiles)
MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4
for j in range(len(percentiles)):
print('The ' + str(100*percentiles[j]) + 'th percentile of the MPC is ' + str(MPCpercentiles_annual[j]))
describeMPCdstn(MyTypes,np.linspace(0.05,0.95,19))
###Output
The 5.0th percentile of the MPC is 0.3830226479018113
The 10.0th percentile of the MPC is 0.41900980317343206
The 15.0th percentile of the MPC is 0.4598470116058171
The 20.0th percentile of the MPC is 0.4598470116058171
The 25.0th percentile of the MPC is 0.4598470116058171
The 30.0th percentile of the MPC is 0.49791664149541504
The 35.0th percentile of the MPC is 0.49791664149541504
The 40.0th percentile of the MPC is 0.49791664149541504
The 44.99999999999999th percentile of the MPC is 0.5372418610399285
The 49.99999999999999th percentile of the MPC is 0.5372418610399285
The 54.99999999999999th percentile of the MPC is 0.5821887061768998
The 60.0th percentile of the MPC is 0.5821887061768998
The 65.0th percentile of the MPC is 0.5821887061768998
The 70.0th percentile of the MPC is 0.6345373126858305
The 75.0th percentile of the MPC is 0.6345373126858305
The 80.0th percentile of the MPC is 0.7267307307276051
The 85.0th percentile of the MPC is 0.7799255201452849
The 90.0th percentile of the MPC is 0.8208530902866044
The 95.0th percentile of the MPC is 0.8592556334047874
###Markdown
Alternative Combinations of Parameter ValuesPlease write the names and email addresses of everyone who worked on this notebook on the line below.YOUR NAMES HERE IntroductionThe notebook "Micro-and-Macro-Implications-of-Very-Impatient-HHs" is an exercise that demonstrates the consequences of changing a key parameter of the [cstwMPC](http://econ.jhu.edu/people/ccarroll/papers/cstwMPC) model, the time preference factor $\beta$.The [REMARK](https://github.com/econ-ark/REMARK) `SolvingMicroDSOPs` reproduces the last figure in the [SolvingMicroDSOPs](http://econ.jhu.edu/people/ccarroll/SolvingMicroDSOPs) lecture notes, which shows that there are classes of alternate values of $\beta$ and $\rho$ that fit the data almost as well as the exact 'best fit' combination.Inspired by this comparison, this notebook asks you to examine the consequences for:* The consumption function* The distribution of wealthOf _joint_ changes in $\beta$ and $\rho$ together. One way you can do this is to construct a list of alternative values of $\rho$ (say, values that range upward from the default value of $\rho$, in increments of 0.2, all the way to $\rho=5$). Then for each of these values of $\rho$ you will find the value of $\beta$ that leads the same value for target market resources, $\check{m}$.As a reminder, $\check{m}$ is defined as the value of $m$ at which the optimal value of ${c}$ is the value such that, at that value of ${c}$, the expected level of ${m}$ next period is the same as its current value:$\mathbb{E}_{t}[{m}_{t+1}] = {m}_{t}$ Other notes:* The cstwMPC model solves and simulates the problems of consumers with 7 different values of $\beta$ * You should do your exercise using the middle value of $\beta$ from that exercise: * `DiscFac_mean = 0.9855583`* You are likely to run into the problem, as you experiment with parameter values, that you have asked HARK to solve a model that does not satisfy one of the impatience conditions required for the model to have a solution. Those conditions are explained intuitively in the [TractableBufferStock](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/TractableBufferStock/) model. The versions of the impatience conditions that apply to the $\texttt{IndShockConsumerType}$ model can be found in the paper [BufferStockTheory](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory), table 2. * The conditions that need to be satisfied are: * The Growth Impatience Condition (GIC) * The Return Impatience Condition (RIC)* Please accumulate the list of solved consumers' problems in a list called `MyTypes` * For compatibility with a further part of the assignment below
###Code
# This cell merely imports and sets up some basic functions and packages
%matplotlib inline
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
from copy import deepcopy
import HARK # Prevents import error from Demos repo
from HARK.utilities import plotFuncs
# Import IndShockConsumerType
from HARK.ConsumptionSaving.ConsIndShockModel import IndShockConsumerType
# Define a dictionary with calibrated parameters
cstwMPC_calibrated_parameters = {
"CRRA":1.0, # Coefficient of relative risk aversion
"Rfree":1.01/(1.0 - 1.0/160.0), # Survival probability,
"PermGroFac":[1.000**0.25], # Permanent income growth factor (no perm growth),
"PermGroFacAgg":1.0,
"BoroCnstArt":0.0,
"CubicBool":False,
"vFuncBool":False,
"PermShkStd":[(0.01*4/11)**0.5], # Standard deviation of permanent shocks to income
"PermShkCount":5, # Number of points in permanent income shock grid
"TranShkStd":[(0.01*4)**0.5], # Standard deviation of transitory shocks to income,
"TranShkCount":5, # Number of points in transitory income shock grid
"UnempPrb":0.07, # Probability of unemployment while working
"IncUnemp":0.15, # Unemployment benefit replacement rate
"UnempPrbRet":None,
"IncUnempRet":None,
"aXtraMin":0.00001, # Minimum end-of-period assets in grid
"aXtraMax":40, # Maximum end-of-period assets in grid
"aXtraCount":32, # Number of points in assets grid
"aXtraExtra":[None],
"aXtraNestFac":3, # Number of times to 'exponentially nest' when constructing assets grid
"LivPrb":[1.0 - 1.0/160.0], # Survival probability
"DiscFac":0.97, # Default intertemporal discount factor; dummy value, will be overwritten
"cycles":0,
"T_cycle":1,
"T_retire":0,
'T_sim':1200, # Number of periods to simulate (idiosyncratic shocks model, perpetual youth)
'T_age': 400,
'IndL': 10.0/9.0, # Labor supply per individual (constant),
'aNrmInitMean':np.log(0.00001),
'aNrmInitStd':0.0,
'pLvlInitMean':0.0,
'pLvlInitStd':0.0,
'AgentCount':10000,
}
# Construct a list of solved consumers' problems, IndShockConsumerType is just a place holder
MyTypes = [IndShockConsumerType(**cstwMPC_calibrated_parameters)]
###Output
_____no_output_____
###Markdown
Simulating the Distribution of Wealth for Alternative CombinationsYou should now have constructed a list of consumer types all of whom have the same _target_ level of market resources $\check{m}$. But the fact that everyone has the same target ${m}$ does not mean that the _distribution_ of ${m}$ will be the same for all of these consumer types.In the code block below, fill in the contents of the loop to solve and simulate each agent type for many periods. To do this, you should invoke the methods $\texttt{solve}$, $\texttt{initializeSim}$, and $\texttt{simulate}$ in that order. Simulating for 1200 quarters (300 years) will approximate the long run distribution of wealth in the population.
###Code
for ThisType in tqdm(MyTypes):
ThisType.solve()
ThisType.initializeSim()
ThisType.simulate()
###Output
0%| | 0/1 [00:00<?, ?it/s]
###Markdown
Now that you have solved and simulated these consumers, make a plot that shows the relationship between your alternative values of $\rho$ and the mean level of assets
###Code
# To help you out, we have given you the command needed to construct a list of the levels of assets for all consumers
aLvl_all = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# You should take the mean of aLvl for each consumer in MyTypes, divide it by the mean across all simulations
# and then plot the ratio of the values of mean(aLvl) for each group against the value of $\rho$
###Output
_____no_output_____
###Markdown
InterpretHere, you should attempt to give an intiutive explanation of the results you see in the figure you just constructed The Distribution of Wealth...Your next exercise is to show how the distribution of wealth differs for the different parameter values
###Code
from HARK.utilities import getLorenzShares, getPercentiles
# Finish filling in this function to calculate the Euclidean distance between the simulated and actual Lorenz curves.
def calcLorenzDistance(SomeTypes):
'''
Calculates the Euclidean distance between the simulated and actual (from SCF data) Lorenz curves at the
20th, 40th, 60th, and 80th percentiles.
Parameters
----------
SomeTypes : [AgentType]
List of AgentTypes that have been solved and simulated. Current levels of individual assets should
be stored in the attribute aLvlNow.
Returns
-------
lorenz_distance : float
Euclidean distance (square root of sum of squared differences) between simulated and actual Lorenz curves.
'''
# Define empirical Lorenz curve points
lorenz_SCF = np.array([-0.00183091, 0.0104425 , 0.0552605 , 0.1751907 ])
# Extract asset holdings from all consumer types
aLvl_sim = np.concatenate([ThisType.aLvlNow for ThisType in MyTypes])
# Calculate simulated Lorenz curve points
lorenz_sim = getLorenzShares(aLvl_sim,percentiles=[0.2,0.4,0.6,0.8])
# Calculate the Euclidean distance between the simulated and actual Lorenz curves
lorenz_distance = np.sqrt(np.sum((lorenz_SCF - lorenz_sim)**2))
# Return the Lorenz distance
return lorenz_distance
###Output
_____no_output_____
###Markdown
...and the Marginal Propensity to ConsumeNow let's look at the aggregate MPC. In the code block below, write a function that produces text output of the following form:$\texttt{The 35th percentile of the MPC is 0.15623}$Your function should take two inputs: a list of types of consumers and an array of percentiles (numbers between 0 and 1). It should return no outputs, merely print to screen one line of text for each requested percentile. The model is calibrated at a quarterly frequency, but Carroll et al report MPCs at an annual frequency. To convert, use the formula:$\kappa_{Y} \approx 1.0 - (1.0 - \kappa_{Q})^4$
###Code
# Write a function to tell us about the distribution of the MPC in this code block, then test it!
# You will almost surely find it useful to use a for loop in this function.
def describeMPCdstn(SomeTypes,percentiles):
MPC_sim = np.concatenate([ThisType.MPCnow for ThisType in SomeTypes])
MPCpercentiles_quarterly = getPercentiles(MPC_sim,percentiles=percentiles)
MPCpercentiles_annual = 1.0 - (1.0 - MPCpercentiles_quarterly)**4
for j in range(len(percentiles)):
print('The ' + str(100*percentiles[j]) + 'th percentile of the MPC is ' + str(MPCpercentiles_annual[j]))
describeMPCdstn(MyTypes,np.linspace(0.05,0.95,19))
###Output
The 5.0th percentile of the MPC is 0.3830226479018113
The 10.0th percentile of the MPC is 0.41900980317343206
The 15.0th percentile of the MPC is 0.4598470116058171
The 20.0th percentile of the MPC is 0.4598470116058171
The 25.0th percentile of the MPC is 0.4598470116058171
The 30.0th percentile of the MPC is 0.49791664149541504
The 35.0th percentile of the MPC is 0.49791664149541504
The 40.0th percentile of the MPC is 0.49791664149541504
The 44.99999999999999th percentile of the MPC is 0.5372418610399285
The 49.99999999999999th percentile of the MPC is 0.5372418610399285
The 54.99999999999999th percentile of the MPC is 0.5821887061768998
The 60.0th percentile of the MPC is 0.5821887061768998
The 65.0th percentile of the MPC is 0.5821887061768998
The 70.0th percentile of the MPC is 0.6345373126858305
The 75.0th percentile of the MPC is 0.6345373126858305
The 80.0th percentile of the MPC is 0.7267307307276051
The 85.0th percentile of the MPC is 0.7799255201452849
The 90.0th percentile of the MPC is 0.8208530902866044
The 95.0th percentile of the MPC is 0.8592556334047874
|
notebooks/Analysis/Water_consumption.ipynb | ###Markdown
This script shows how to use the existing code in opengrid to create a water consumption benchmark.
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from opengrid_dev import config
c = config.Config()
%matplotlib inline
plt.rcParams['figure.figsize'] = 16,8
from opengrid_dev.library.houseprint import houseprint
###Output
_____no_output_____
###Markdown
Load the Houseprint and sync all data
###Code
hp = houseprint.Houseprint()
hp.sync_tmpos()
###Output
_____no_output_____
###Markdown
Create a dataframe containing a column for each water sensor with data of exactly one year (up till now)
###Code
df = hp.get_data(sensortype='water', head=pd.Timestamp('now') - pd.Timedelta(days=365))
#retain only positive values (sometimes there are meter resets)
df = df[df > 0]
###Output
_____no_output_____
###Markdown
Plot results Plot timeseries and load duration for each retained sensor
###Code
for sensor in df.columns:
plt.figure()
ts = df[sensor].dropna()
ax1=plt.subplot(121)
plt.plot_date(ts.index, ts, '-', label=sensor)
plt.ylabel('water consumption [l/min]')
plt.legend()
ax2=plt.subplot(122)
plt.plot(np.sort(ts)[::-1], label=sensor)
plt.ylabel('water JBDC [l/min]')
plt.legend()
###Output
_____no_output_____
###Markdown
Let's refine the load duration into a histogram
###Code
for sensor in df.columns:
plt.figure()
ts = df[sensor]
ts=ts[ts>1]
ts=ts[ts<30]
ts.hist(bins=100)
plt.title(hp.find_sensor(sensor).device.key + ' - ' + hp.find_sensor(sensor).description + ' - ' + sensor)
for sensor in df.columns:
plt.figure()
ts = df[sensor]
ts=ts[ts>0.1]
ts=ts[ts<30]
hst, edges = np.histogram(ts, np.arange(0,20,0.5))
hst_weighted = hst * edges[1:]
plt.plot(np.arange(0,20,0.5)[1:], hst_weighted)
#plt.plot(np.arange(0,20,0.25)[1:], hst)
plt.title(hp.find_sensor(sensor).device.key + ' - ' + sensor)
plt.ylabel('Water consumption (liter)')
###Output
_____no_output_____ |
notebooks/MR/f_create_undersampled_kspace.ipynb | ###Markdown
Create undersampled k-spaceThis demonstration shows how to create different undersampled k-space data which can be used either directly for image reconstruction or used to simulate MR data acquisition of a new object.This demo is a 'script', i.e. intended to be run step by step in aPython notebook such as Jupyter. It is organised in 'cells'. Jupyter displays thesecells nicely and allows you to run each cell on its own. First version: 18th of June 2021Author: Christoph KolbitschCCP SyneRBI Synergistic Image Reconstruction Framework (SIRF). Copyright 2015 - 2021 Rutherford Appleton Laboratory STFC. Copyright 2015 - 2021 University College London. Copyright 2015 - 2021 Physikalisch-Technische Bundesanstalt.This is software developed for the Collaborative Computational Project in Synergistic Reconstruction for Biomedical Imaging (http://www.ccpsynerbi.ac.uk/).Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. In the previous MR notebooks such as `e_advanced_recon` we used an existing MR raw data file with regular Cartesian sampling and an undersampling factor (R) of 4. If we would like to repeate the notebook for a dataset with R = 6, we would need to go to the MR scanner and acquire new raw data. This of course is one of the strengths of __SIRF__ because it means all our developed code can be applied to real data in a straight forward way, but sometimes it would be nice to switch from R = 4 to R = 6 by adapting a parameter in our script. Or go from regular Cartesian undersampling to random Cartesian undersampling without having to implement a new trajectory on the MR scanner.This notebook will show how we can achieve this at least to a certain degree. The idea is to start with a fully sampled Cartesian data set and then select only a subset of acquired k-space lines for the image reconstruction. We will use a 2D Cartesian data set and hence we can select a different subset of $k_y$ points. Of course this approach has several limitations. We cannot got from a Cartesian to a non-Cartesian (e.g. radial) sampling pattern and we cannot adapt the overall FOV of the scan, but it is a start. So let's think about what we need to do. * (A) We need a fully sampled Cartesian data set. Let's take `ptb_resolutionphantom_fully_ismrmrd.h5` which is in `exercises_data_path('MR', 'PTB_ACRPhantom_GRAPPA')`. We need to load the data and we will already call `preprocess_acquisition_data()` for this data. * (B) Then we need to find out, which $k_y$ points have been acquired and where the centre of k-space is (i.e. $k_y$ = 0), because for any undersampled MR acquisition it is a good idea to have a small fully sampled region around the k-space centre and carry out the undersampling in the higher k-space frequencies. * (C) Define a subset of the orignal $k_y$ points. * (D) Create a new `AcquisitionData` object with only the subset defined in (C). * (E) Do a simple reconstruction to check we did the right thing. After completing all the previous notebooks you are already MR reconstruction experts. Therefore, you should be able to do these steps by yourself. Give it a try and try to create a new k-space with an undersampling factor of 4 and a fully sampled central region of 10 $k_y$ points. Only have a look at the example solution below if you are stuck. A few hints to get you started: * Details on how to get information about the acquired k-space (e.g. which $k_y$ points have been acquired) can be find in the notebook `d_undersampled_reconstructions`. * We can define an empty `AcquisitionData` object using `acq_new = preprocessed_data.new_acquisition_data(empty=True)`. * We can select an `Acquisition` object from an existing `AcquisitionData` object using `cacq = existing_acq_data.acquisition(acq_index)`. * Finally we can add this `Acquisition` object to our so far empty `AcquisitionData` object using `acq_new.append_acquisition(cacq)`. * Once we have added all your `Acquisition` objects, we have to sort the data again: `acq_new.sort()`. * In order to find out, how to do a simple reconstruction have a look at the MR part of the introductory notebook `acquisition_model_mr_pet_ct`.
###Code
#%% make sure figures appears inline and animations works
%matplotlib notebook
__version__ = '0.1.1'
import notebook_setup
import numpy
# import engine module
import sirf.Gadgetron as mr
from sirf_exercises import exercises_data_path
# import further modules
import os
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
You should already have called the following script. If not, uncomment BOTH lines and run it now!
###Code
#%%bash
#bash ../../scripts/download_data.sh -m
###Output
_____no_output_____
###Markdown
Utilities
###Code
# First define some handy function definitions
# To make subsequent code cleaner, we have a few functions here. You can ignore
# ignore them when you first see this demo.
def plot_2d_image(idx,vol,title,clims=None,cmap="viridis"):
"""Customized version of subplot to plot 2D image"""
plt.subplot(*idx)
plt.imshow(vol,cmap=cmap)
if not clims is None:
plt.clim(clims)
plt.colorbar()
plt.title(title)
plt.axis("off")
def crop_and_fill(templ_im, vol):
"""Crop volumetric image data and replace image content in template image object"""
# Get size of template image and crop
idim_orig = templ_im.as_array().shape
idim = (1,)*(3-len(idim_orig)) + idim_orig
offset = (numpy.array(vol.shape) - numpy.array(idim)) // 2
vol = vol[offset[0]:offset[0]+idim[0], offset[1]:offset[1]+idim[1], offset[2]:offset[2]+idim[2]]
# Make a copy of the template to ensure we do not overwrite it
templ_im_out = templ_im.copy()
# Fill image content
templ_im_out.fill(numpy.reshape(vol, idim_orig))
return(templ_im_out)
###Output
_____no_output_____
###Markdown
(A) Fully sampled k-space data Load in fully sampled k-space data and preprocess it.
###Code
# Load MR AcquisitionData
mr_acq = mr.AcquisitionData(exercises_data_path('MR', 'PTB_ACRPhantom_GRAPPA')
+ '/ptb_resolutionphantom_fully_ismrmrd.h5' )
preprocessed_data = mr.preprocess_acquisition_data(mr_acq)
# Calculate image
recon = mr.FullySampledReconstructor()
recon.set_input(preprocessed_data)
recon.process()
im_mr = recon.get_output()
# Display it
plt.figure();
plot_2d_image([1,1,1], numpy.abs(im_mr.as_array())[0,:,:], 'Original image', cmap="Greys_r")
###Output
_____no_output_____
###Markdown
(B) Find out which k-space points have been acquired We will get the information about the $k_y$ position for each `Acquisition`. Because we have to go through all the acquired data, this can take a bit of time.
###Code
ky_index = preprocessed_data.get_ISMRMRD_info('kspace_encode_step_1')
print(ky_index)
###Output
_____no_output_____
###Markdown
So we have got 256 phase encoding points $k_y$. Because this is a fully sampled Cartesian acquisition we can savely assume that the k-space centre is located bang in the middle, i.e. $k_y$ = 0 for `ky_index` = 128. (C) Define a subset of k-space data Let's start with something easy. Define a subset for a regular undersampling factor R = 2 but with a fully sampled central k-space region of 20 $k_y$ points.
###Code
# Define an undersampling factor
R = 4
# Define the number of fully sampled k-space points in the k-space centre
N_ctr = 10
# and your k-space centre was in the middle, so
ky0_index = len(ky_index)//2
###Output
_____no_output_____
###Markdown
Let's first select the fully sampled k-space centre
###Code
ky_index_subset = numpy.arange(ky0_index-N_ctr//2, ky0_index+N_ctr//2)
###Output
_____no_output_____
###Markdown
Now we can add the rest of the data with an undersampling factor of R
###Code
ky_index_subset = numpy.concatenate((ky_index_subset, numpy.arange(start=0, stop=len(ky_index), step=R)), axis=0)
###Output
_____no_output_____
###Markdown
Of course, now we might have added points from the centre of k-space again. To make sure that no $k_y$ index occurs twice, we simply call `numpy.unique`
###Code
ky_index_subset = numpy.unique(ky_index_subset)
###Output
_____no_output_____
###Markdown
Now we can plot the original fully sampled and the new undersampled indices. Hint: zoom into the figure.
###Code
plt.figure()
plt.plot(ky_index, numpy.ones(ky_index.shape), 'bo')
plt.plot(ky_index[ky_index_subset], numpy.ones(ky_index[ky_index_subset].shape), 'r.');
###Output
_____no_output_____
###Markdown
(D) Create new acquisition data Now we know which k-space points to select, we need to select them and create a new `AcquisitionData` object. We will follow the steps detailed at the beginning of the notebook.
###Code
acq_new = preprocessed_data.new_acquisition_data(empty=True)
# Create raw data
for jnd in range(len(ky_index_subset)):
cacq = preprocessed_data.acquisition(ky_index_subset[jnd])
acq_new.append_acquisition(cacq)
acq_new.sort()
###Output
_____no_output_____
###Markdown
(E) Simple reconstruction Now we will do a simple reconstruction by defining and `AcquisitionModel` based on the `AcquisitionData` and then call `backward()` (i.e. Fourier transform).
###Code
# Original data
csm_orig = mr.CoilSensitivityData()
csm_orig.smoothness = 200
csm_orig.calculate(preprocessed_data)
A_orig = mr.AcquisitionModel(preprocessed_data, im_mr)
A_orig.set_coil_sensitivity_maps(csm_orig)
im_orig = A_orig.backward(preprocessed_data)
# Undersampled data
csm_new = mr.CoilSensitivityData()
csm_new.smoothness = 200
csm_new.calculate(acq_new)
A_new = mr.AcquisitionModel(acq_new, im_mr)
A_new.set_coil_sensitivity_maps(csm_orig)
im_new = A_orig.backward(acq_new)
# Display it
plt.figure();
plot_2d_image([1,2,1], numpy.abs(im_orig.as_array())[0,:,:], 'Original image', cmap="Greys_r")
plot_2d_image([1,2,2], numpy.abs(im_new.as_array())[0,:,:], 'Undersampled image', cmap="Greys_r")
###Output
_____no_output_____
###Markdown
Create undersampled k-spaceThis demonstration shows how to create different undersampled k-space data which can be used either directly for image reconstruction or used to simulate MR data acquisition of a new object.This demo is a 'script', i.e. intended to be run step by step in aPython notebook such as Jupyter. It is organised in 'cells'. Jupyter displays thesecells nicely and allows you to run each cell on its own. First version: 18th of June 2021Author: Christoph KolbitschCCP SyneRBI Synergistic Image Reconstruction Framework (SIRF). Copyright 2015 - 2021 Rutherford Appleton Laboratory STFC. Copyright 2015 - 2021 University College London. Copyright 2015 - 2021 Physikalisch-Technische Bundesanstalt.This is software developed for the Collaborative Computational Project in Synergistic Reconstruction for Biomedical Imaging (http://www.ccpsynerbi.ac.uk/).Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. In the previous MR notebooks such as `e_advanced_recon` we used an existing MR raw data file with regular Cartesian sampling and an undersampling factor (R) of 4. If we would like to repeate the notebook for a dataset with R = 6, we would need to go to the MR scanner and acquire new raw data. This of course is one of the strengths of __SIRF__ because it means all our developed code can be applied to real data in a straight forward way, but sometimes it would be nice to switch from R = 4 to R = 6 by adapting a parameter in our script. Or go from regular Cartesian undersampling to random Cartesian undersampling without having to implement a new trajectory on the MR scanner.This notebook will show how we can achieve this at least to a certain degree. The idea is to start with a fully sampled Cartesian data set and then select only a subset of acquired k-space lines for the image reconstruction. We will use a 2D Cartesian data set and hence we can select a different subset of $k_y$ points. Of course this approach has several limitations. We cannot got from a Cartesian to a non-Cartesian (e.g. radial) sampling pattern and we cannot adapt the overall FOV of the scan, but it is a start. So let's think about what we need to do. * (A) We need a fully sampled Cartesian data set. Let's take `ptb_resolutionphantom_fully_ismrmrd.h5` which is in `exercises_data_path('MR', 'PTB_ACRPhantom_GRAPPA')`. We need to load the data and we will already call `preprocess_acquisition_data()` for this data. * (B) Then we need to find out, which $k_y$ points have been acquired and where the centre of k-space is (i.e. $k_y$ = 0), because for any undersampled MR acquisition it is a good idea to have a small fully sampled region around the k-space centre and carry out the undersampling in the higher k-space frequencies. * (C) Define a subset of the orignal $k_y$ points. * (D) Create a new `AcquisitionData` object with only the subset defined in (C). * (E) Do a simple reconstruction to check we did the right thing. After completing all the previous notebooks you are already MR reconstruction experts. Therefore, you should be able to do these steps by yourself. Give it a try and try to create a new k-space with an undersampling factor of 4 and a fully sampled central region of 10 $k_y$ points. Only have a look at the example solution below if you are stuck. A few hints to get you started: * Details on how to get information about the acquired k-space (e.g. which $k_y$ points have been acquired) can be find in the notebook `d_undersampled_reconstructions`. * We can define an empty `AcquisitionData` object using `acq_new = preprocessed_data.new_acquisition_data(empty=True)`. * We can select an `Acquisition` object from an existing `AcquisitionData` object using `cacq = existing_acq_data.acquisition(acq_index)`. * Finally we can add this `Acquisition` object to our so far empty `AcquisitionData` object using `acq_new.append_acquisition(cacq)`. * Once we have added all your `Acquisition` objects, we have to sort the data again: `acq_new.sort()`. * In order to find out, how to do a simple reconstruction have a look at the MR part of the introductory notebook `acquisition_model_mr_pet_ct`.
###Code
#%% make sure figures appears inline and animations works
%matplotlib notebook
__version__ = '0.1.1'
import notebook_setup
import numpy
# import engine module
import sirf.Gadgetron as mr
from sirf_exercises import exercises_data_path
# import further modules
import os
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
You should already have called the following script. If not, uncomment BOTH lines and run it now!
###Code
#%%bash
#bash ../../scripts/download_data.sh -m
###Output
_____no_output_____
###Markdown
Utilities
###Code
# First define some handy function definitions
# To make subsequent code cleaner, we have a few functions here. You can ignore
# ignore them when you first see this demo.
def plot_2d_image(idx,vol,title,clims=None,cmap="viridis"):
"""Customized version of subplot to plot 2D image"""
plt.subplot(*idx)
plt.imshow(vol,cmap=cmap)
if not clims is None:
plt.clim(clims)
plt.colorbar()
plt.title(title)
plt.axis("off")
def crop_and_fill(templ_im, vol):
"""Crop volumetric image data and replace image content in template image object"""
# Get size of template image and crop
idim_orig = templ_im.as_array().shape
idim = (1,)*(3-len(idim_orig)) + idim_orig
offset = (numpy.array(vol.shape) - numpy.array(idim)) // 2
vol = vol[offset[0]:offset[0]+idim[0], offset[1]:offset[1]+idim[1], offset[2]:offset[2]+idim[2]]
# Make a copy of the template to ensure we do not overwrite it
templ_im_out = templ_im.copy()
# Fill image content
templ_im_out.fill(numpy.reshape(vol, idim_orig))
return(templ_im_out)
###Output
_____no_output_____
###Markdown
(A) Fully sampled k-space data Load in fully sampled k-space data and preprocess it.
###Code
# Load MR AcquisitionData
mr_acq = mr.AcquisitionData(exercises_data_path('MR', 'PTB_ACRPhantom_GRAPPA')
+ '/ptb_resolutionphantom_fully_ismrmrd.h5' )
preprocessed_data = mr.preprocess_acquisition_data(mr_acq)
# Calculate image
recon = mr.FullySampledReconstructor()
recon.set_input(preprocessed_data)
recon.process()
im_mr = recon.get_output()
# Display it
plt.figure();
plot_2d_image([1,1,1], numpy.abs(im_mr.as_array())[0,:,:], 'Original image', cmap="Greys_r")
###Output
_____no_output_____
###Markdown
(B) Find out which k-space points have been acquired We will get the information about the $k_y$ position for each `Acquisition`. Because we have to go through all the acquired data, this can take a bit of time.
###Code
ky_index = preprocessed_data.parameter_info('kspace_encode_step_1')
print(ky_index)
###Output
_____no_output_____
###Markdown
So we have got 256 phase encoding points $k_y$. Because this is a fully sampled Cartesian acquisition we can savely assume that the k-space centre is located bang in the middle, i.e. $k_y$ = 0 for `ky_index` = 128. (C) Define a subset of k-space data Let's start with something easy. Define a subset for a regular undersampling factor R = 2 but with a fully sampled central k-space region of 20 $k_y$ points.
###Code
# Define an undersampling factor
R = 4
# Define the number of fully sampled k-space points in the k-space centre
N_ctr = 10
# and your k-space centre was in the middle, so
ky0_index = len(ky_index)//2
###Output
_____no_output_____
###Markdown
Let's first select the fully sampled k-space centre
###Code
ky_index_subset = numpy.arange(ky0_index-N_ctr//2, ky0_index+N_ctr//2)
###Output
_____no_output_____
###Markdown
Now we can add the rest of the data with an undersampling factor of R
###Code
ky_index_subset = numpy.concatenate((ky_index_subset, numpy.arange(start=0, stop=len(ky_index), step=R)), axis=0)
###Output
_____no_output_____
###Markdown
Of course, now we might have added points from the centre of k-space again. To make sure that no $k_y$ index occurs twice, we simply call `numpy.unique`
###Code
ky_index_subset = numpy.unique(ky_index_subset)
###Output
_____no_output_____
###Markdown
Now we can plot the original fully sampled and the new undersampled indices. Hint: zoom into the figure.
###Code
plt.figure()
plt.plot(ky_index, numpy.ones(ky_index.shape), 'bo')
plt.plot(ky_index[ky_index_subset], numpy.ones(ky_index[ky_index_subset].shape), 'r.');
###Output
_____no_output_____
###Markdown
(D) Create new acquisition data Now we know which k-space points to select, we need to select them and create a new `AcquisitionData` object. We will follow the steps detailed at the beginning of the notebook.
###Code
acq_new = preprocessed_data.new_acquisition_data(empty=True)
# Create raw data
for jnd in range(len(ky_index_subset)):
cacq = preprocessed_data.acquisition(ky_index_subset[jnd])
acq_new.append_acquisition(cacq)
acq_new.sort()
###Output
_____no_output_____
###Markdown
(E) Simple reconstruction Now we will do a simple reconstruction by defining and `AcquisitionModel` based on the `AcquisitionData` and then call `backward()` (i.e. Fourier transform).
###Code
# Original data
csm_orig = mr.CoilSensitivityData()
csm_orig.smoothness = 200
csm_orig.calculate(preprocessed_data)
A_orig = mr.AcquisitionModel(preprocessed_data, im_mr)
A_orig.set_coil_sensitivity_maps(csm_orig)
im_orig = A_orig.backward(preprocessed_data)
# Undersampled data
csm_new = mr.CoilSensitivityData()
csm_new.smoothness = 200
csm_new.calculate(acq_new)
A_new = mr.AcquisitionModel(acq_new, im_mr)
A_new.set_coil_sensitivity_maps(csm_orig)
im_new = A_orig.backward(acq_new)
# Display it
plt.figure();
plot_2d_image([1,2,1], numpy.abs(im_orig.as_array())[0,:,:], 'Original image', cmap="Greys_r")
plot_2d_image([1,2,2], numpy.abs(im_new.as_array())[0,:,:], 'Undersampled image', cmap="Greys_r")
###Output
_____no_output_____ |
assignment1/.ipynb_checkpoints/svm-checkpoint.ipynb | ###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
###Output
[130.64189796 135.98173469 132.47391837 130.05569388 135.34804082
131.75402041 130.96055102 136.14328571 132.47636735 131.48467347]
###Markdown
SVM ClassifierYour code for this section will all be written inside `cs231n/classifiers/linear_svm.py`. As you can see, we have prefilled the function `svm_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
###Output
loss: 8.427748
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: 23.482586 analytic: 23.482586, relative error: 6.011186e-12
numerical: -2.081071 analytic: -2.081071, relative error: 7.484549e-11
numerical: -9.158171 analytic: -9.158171, relative error: 2.215943e-11
numerical: 36.832362 analytic: 36.736058, relative error: 1.309029e-03
numerical: 5.270509 analytic: 5.270509, relative error: 7.969603e-11
numerical: -17.688289 analytic: -17.688289, relative error: 6.833861e-12
numerical: 2.537516 analytic: 2.537516, relative error: 3.824743e-12
numerical: -7.891247 analytic: -7.891247, relative error: 3.774477e-11
numerical: 6.742901 analytic: 6.742901, relative error: 4.475165e-11
numerical: 8.621325 analytic: 8.621325, relative error: 1.118366e-11
numerical: 19.785503 analytic: 19.785503, relative error: 1.160928e-11
numerical: 30.142421 analytic: 30.142421, relative error: 3.576001e-12
numerical: 9.635986 analytic: 9.577686, relative error: 3.034299e-03
numerical: 12.656478 analytic: 12.656478, relative error: 7.713022e-12
numerical: -44.082079 analytic: -44.082079, relative error: 1.018182e-11
numerical: -0.767301 analytic: -0.767301, relative error: 3.208565e-10
numerical: 8.190569 analytic: 8.190569, relative error: 3.821971e-11
numerical: -1.203050 analytic: -1.203050, relative error: 1.143488e-10
numerical: -1.714670 analytic: -1.714670, relative error: 2.486826e-10
numerical: 22.495652 analytic: 22.495652, relative error: 4.345966e-12
###Markdown
**Inline Question 1**It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? How would change the margin affect of the frequency of this happening? *Hint: the SVM loss function is not strictly speaking differentiable*$\color{blue}{\textit Your Answer:}$ It's possible. Such a discrepancy is rooted from the indifferentiability of the softmax function at zero. The SVM loss function involves the softmax function, which is not differentiable at zero. This means that we may see this kind of discrepancy at/near a decision boundary. Thus, within a small range, changing the margin will not cause significant change of frequency of such discrepancy, but when margin becomes so large that the decision plane fell off the data manifold, then we will not see any discrepancies, since the checking points (X_dev, y_dev) is chosen from the trainning data.
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
print('difference: %f' % (loss_naive - loss_vectorized))
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss and gradient: computed in %fs' % (toc - tic))
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss and gradient: computed in %fs' % (toc - tic))
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('difference: %f' % difference)
###Output
Naive loss and gradient: computed in 0.091530s
Vectorized loss and gradient: computed in 0.003625s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss. Your code for this part will be written inside `cs231n/classifiers/linear_classifier.py`.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = svm.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.39 on the validation set.
# Note: you may see runtime/overflow warnings during hyper-parameter search.
# This may be caused by extreme values, and is not a bug.
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
# Provided as a reference. You may or may not want to change these hyperparameters
learning_rates = [3e-7,4e-7,5e-7]
regularization_strengths = [1e4, 2e4, 3e4, 5e4, 7e4, 1e5]
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
for learning_rate in learning_rates:
for reg in regularization_strengths:
# train and predict
svm = LinearSVM()
svm.train(X_train, y_train, learning_rate = learning_rate, reg=reg)
y_train_pred = svm.predict(X_train)
y_val_pred = svm.predict(X_val)
# store and update results
train_accuracy = np.mean(y_train==y_train_pred)
val_accuracy = np.mean(y_val==y_val_pred)
results[(learning_rate,reg)]=(train_accuracy,val_accuracy)
if val_accuracy > best_val:
best_val = val_accuracy
best_svm = svm
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Visualize the cross-validation results
import math
import pdb
# pdb.set_trace()
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.tight_layout(pad=3)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors, cmap=plt.cm.coolwarm)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors, cmap=plt.cm.coolwarm)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____
###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from loadmydata import get_data_set_label_and_data, get_training_and_test_sets
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
# cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
# X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
filepath = '/Users/adolfovaldivieso/Library/Application Support/Tullpi/WilliamVision/Datasets/balanced_letter_data_set_simplechars_nPixels_28_method_INTER_CUBIC_nSamples_60635_noisy_False.txt'
labels, data = get_data_set_label_and_data(filepath, -1)
(X_train, y_train), (X_test, y_test) = get_training_and_test_sets(labels, data, 0.85)
y_train = y_train.astype(int)
y_test = y_test.astype(int)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
# print(X_train[0].reshape((28,28))*255)
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
rescaled = X_train[idx].reshape((28,28)) * 256 + np.ones((28,28)) * 128
plt.imshow(rescaled.astype('uint8'), cmap='gray')
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 50000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
mean_image_rescaled = mean_image.reshape((28,28)) * 256 + np.ones((28,28)) * 128
plt.imshow(mean_image_rescaled.astype('uint8'), cmap='gray') # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
###Output
(50000, 785) (1000, 785) (1000, 785) (500, 785)
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(785, 181) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
# print(grad[:10,1])
###Output
loss: 179.985554
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: 0.297766 analytic: 0.297766, relative error: 3.009617e-08
numerical: 0.778961 analytic: 0.778961, relative error: 7.323072e-09
numerical: -0.136687 analytic: -0.136687, relative error: 6.356233e-08
numerical: -0.009187 analytic: -0.009187, relative error: 2.845602e-08
numerical: 0.145242 analytic: 0.145242, relative error: 2.352126e-08
numerical: 0.418984 analytic: 0.418984, relative error: 2.048416e-08
numerical: -0.229734 analytic: -0.229734, relative error: 4.357224e-09
numerical: -0.158148 analytic: -0.158148, relative error: 5.700345e-08
numerical: 0.097734 analytic: 0.097734, relative error: 3.114900e-09
numerical: -0.055742 analytic: -0.055742, relative error: 1.359367e-07
numerical: -0.390616 analytic: -0.390616, relative error: 2.015425e-08
numerical: -0.101577 analytic: -0.101577, relative error: 1.956386e-09
numerical: -0.208174 analytic: -0.208174, relative error: 2.084789e-08
numerical: -0.091333 analytic: -0.091333, relative error: 6.766472e-08
numerical: -0.500969 analytic: -0.500969, relative error: 1.286073e-08
numerical: -0.139017 analytic: -0.139017, relative error: 3.164754e-08
numerical: 0.399977 analytic: 0.399977, relative error: 4.206750e-10
numerical: 0.765758 analytic: 0.765758, relative error: 1.122580e-08
numerical: -0.070041 analytic: -0.070041, relative error: 6.052118e-08
numerical: -0.387532 analytic: -0.387532, relative error: 2.566886e-08
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer:** *fill this in.*Differentiability implies continuity and softness of the function and the hinge funtion doens't accomplish the last one in one point (in each dimension) so technically the gradient is not difined in this point. So the numerical gradient can be inestable.
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
print('difference: %f' % (loss_naive - loss_vectorized))
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss and gradient: computed in %fs' % (toc - tic))
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss and gradient: computed in %fs' % (toc - tic))
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('difference: %f' % difference)
###Output
Naive loss and gradient: computed in 0.103306s
X
(500, 3073)
y
(500,)
W
(3073, 10)
Scores
(500, 10)
correct_class_score
(500,)
margins
(500, 10)
indicators
(500, 10)
dW
(3073, 10)
Vectorized loss and gradient: computed in 0.006128s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
import time
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = svm.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [2.5e4, 5e4]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
for learning_rate in learning_rates:
for reg in regularization_strengths:
print('lr %e reg %e' % (
learning_rate, reg,))
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=learning_rate, reg=reg,
num_iters=1500, verbose=True)
y_train_pred = svm.predict(X_train)
y_val_pred = svm.predict(X_val)
accuracy_train = np.mean(y_train == y_train_pred)
accuracy_val = np.mean(y_val == y_val_pred)
results[(learning_rate, reg)] = (accuracy_train, accuracy_val)
if best_val < accuracy_val:
best_val = accuracy_val
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
print(w.shape)
w = w.reshape(28, 28, 1, 181)
print(w.shape)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'), cmap='gray')
plt.axis('off')
plt.title(classes[i])
###Output
(784, 181)
(28, 28, 1, 181)
###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](https://course.cse.ust.hk/comp4901j/Password_Only/programs/assignment1/index.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
###Output
(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
###Output
loss: 9.362444
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: 8.477690 analytic: 8.477690, relative error: 2.073100e-11
numerical: -16.684845 analytic: -16.684845, relative error: 4.075598e-12
numerical: -22.221184 analytic: -22.221184, relative error: 7.630413e-12
numerical: 14.304704 analytic: 14.304704, relative error: 4.722686e-12
numerical: -16.160203 analytic: -16.160203, relative error: 6.036240e-12
numerical: -21.835189 analytic: -21.835189, relative error: 7.859345e-12
numerical: -1.591776 analytic: -1.591776, relative error: 3.279437e-10
numerical: 7.525420 analytic: 7.525420, relative error: 4.315197e-11
numerical: 18.567578 analytic: 18.577234, relative error: 2.599538e-04
numerical: 3.759656 analytic: 3.759656, relative error: 1.088465e-10
numerical: -12.110043 analytic: -12.110043, relative error: 1.221252e-11
numerical: 16.576028 analytic: 16.576028, relative error: 2.061068e-11
numerical: -25.648613 analytic: -25.648613, relative error: 1.355527e-11
numerical: 14.088864 analytic: 14.088864, relative error: 1.541836e-11
numerical: 7.090620 analytic: 7.090620, relative error: 1.093529e-12
numerical: -19.555182 analytic: -19.555182, relative error: 7.420479e-12
numerical: 21.246970 analytic: 21.246970, relative error: 1.179519e-11
numerical: 10.892472 analytic: 10.892472, relative error: 2.724026e-12
numerical: 4.700954 analytic: 4.696455, relative error: 4.786661e-04
numerical: -50.074655 analytic: -50.074655, relative error: 5.869201e-12
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer:** The numerical gradient check could fail beacuse the SVM loss function is not always differentiable, more specifically, it's not differentiable when the margin equals to 0. For example, if we have $L = \max(0, mx)$ for some $m \in \mathbb{R}$. We consider $\frac{\partial L}{\partial x} = 1(x>0)x$ which means $\left.\frac{\partial L}{\partial x}\right|_{x=0} = 0$ by this convention. However, in numerical method, we consider $\frac{L(x+h) - L(x-h)}{2h}$ for $x=0$, which equals $\frac{mh-0}{2h}=\frac{m}{2} \neq 0$. This indicates that the SVM loss function is not always differentiable and thus could fail.
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
print('difference: %f' % (loss_naive - loss_vectorized))
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss and gradient: computed in %fs' % (toc - tic))
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss and gradient: computed in %fs' % (toc - tic))
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('difference: %f' % difference)
###Output
Naive loss and gradient: computed in 0.136631s
Vectorized loss and gradient: computed in 0.002012s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = svm.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [0.5*1.184483e-07, 1.184483e-07,1.38e-7,2e-7]
regularization_strengths = [1e4 ,1.5e4,2e4,2.5e4, 3e4, 4e4, 5e4]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
for l in learning_rates:
for r in regularization_strengths:
svm=LinearSVM()
svm.train(X_train,y_train,learning_rate=l,reg=r,num_iters=2000,batch_size=300)
y_train_pred=svm.predict(X_train)
y_val_pred=svm.predict(X_val)
training_accuracy=np.mean(y_train==y_train_pred)
val_accuracy=np.mean(y_val==y_val_pred)
results[(l,r)]=(training_accuracy,val_accuracy)
if val_accuracy>best_val:
best_val=val_accuracy
best_svm=svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____
###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
###Output
(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
###Output
loss: 9.218518
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: 32.933542 analytic: 32.933542, relative error: 6.774241e-12
numerical: -13.817871 analytic: -13.807058, relative error: 3.914106e-04
numerical: 19.180142 analytic: 19.206111, relative error: 6.765345e-04
numerical: -18.135090 analytic: -18.135090, relative error: 9.468469e-12
numerical: 20.501814 analytic: 20.573567, relative error: 1.746864e-03
numerical: -24.880827 analytic: -24.880827, relative error: 6.945267e-13
numerical: 7.800443 analytic: 7.732724, relative error: 4.359621e-03
numerical: 40.537779 analytic: 40.480095, relative error: 7.119935e-04
numerical: -0.915586 analytic: -0.915586, relative error: 4.425898e-10
numerical: 41.961222 analytic: 41.994422, relative error: 3.954559e-04
numerical: -10.846583 analytic: -10.846583, relative error: 3.654814e-11
numerical: -17.505549 analytic: -17.505549, relative error: 9.645097e-12
numerical: 12.917935 analytic: 12.988847, relative error: 2.737189e-03
numerical: -1.635314 analytic: -1.635314, relative error: 1.008626e-10
numerical: -1.600106 analytic: -1.567532, relative error: 1.028341e-02
numerical: -8.594107 analytic: -8.594107, relative error: 4.727605e-11
numerical: 2.258499 analytic: 2.317126, relative error: 1.281300e-02
numerical: 4.458605 analytic: 4.429122, relative error: 3.317179e-03
numerical: -17.691759 analytic: -17.691759, relative error: 1.273749e-11
numerical: 13.636523 analytic: 13.716715, relative error: 2.931691e-03
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer:** *fill this in.*
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
print('difference: %f' % (loss_naive - loss_vectorized))
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss and gradient: computed in %fs' % (toc - tic))
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss and gradient: computed in %fs' % (toc - tic))
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('difference: %f' % difference)
###Output
Naive loss and gradient: computed in 0.169785s
Vectorized loss and gradient: computed in 0.015425s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = svm.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [2.5e4, 5e4]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
pass
num_learning_rates = len(learning_rates)
num_regularization = len(regularization_strengths)
for i in range(num_learning_rates):
for j in range(num_regularization):
lr = learning_rates[i]
reg = regularization_strengths[j]
svm = LinearSVM()
svm.train(X_train, y_train, learning_rate=lr, reg=reg, num_iters=1500,verbose=False)
train_accuracy = np.mean(svm.predict(X_train) == y_train)
val_accuracy = np.mean(svm.predict(X_val) == y_val)
results[(lr, reg)] = (train_accuracy, val_accuracy)
if val_accuracy > best_val:
best_val = val_accuracy
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____
###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
print 'dev data shape: ', X_dev.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print mean_image[:10] # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print X_train.shape, X_val.shape, X_test.shape, X_dev.shape
###Output
_____no_output_____
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.00001)
print 'loss: %f' % (loss, )
###Output
_____no_output_____
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 1e2)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
_____no_output_____
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer:** *fill this in.*
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# The losses should match but your vectorized implementation should be much faster.
print 'difference: %f' % (loss_naive - loss_vectorized)
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss and gradient: computed in %fs' % (toc - tic)
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss and gradient: computed in %fs' % (toc - tic)
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'difference: %f' % difference
###Output
_____no_output_____
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=5e4,
num_iters=1500, verbose=True)
toc = time.time()
print 'That took %fs' % (toc - tic)
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
y_val_pred = svm.predict(X_val)
print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [5e4, 1e5]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'linear SVM on raw pixels final test set accuracy: %f' % test_accuracy
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____
###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
_____no_output_____
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print 'Training data shape: ', X_train.shape
print 'Validation data shape: ', X_val.shape
print 'Test data shape: ', X_test.shape
print 'dev data shape: ', X_dev.shape
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
# print mean_image.shape
print mean_image[:10] # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print X_train.shape, X_val.shape, X_test.shape, X_dev.shape
###Output
(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.00001)
print 'loss: %f' % (loss, )
###Output
loss: 9.302914
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 1e2)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 1e2)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: -1.534094 analytic: -1.534094, relative error: 2.423582e-11
numerical: 16.811570 analytic: 16.811570, relative error: 1.340300e-11
numerical: -19.896992 analytic: -19.896992, relative error: 9.103107e-12
numerical: 12.678518 analytic: 12.678518, relative error: 2.388863e-11
numerical: 25.779107 analytic: 25.779107, relative error: 4.251966e-12
numerical: -2.709624 analytic: -2.709624, relative error: 1.005212e-10
numerical: 6.569497 analytic: 6.569497, relative error: 1.484142e-11
numerical: -1.877628 analytic: -1.877628, relative error: 3.654931e-11
numerical: 5.449144 analytic: 5.449144, relative error: 5.959302e-11
numerical: 11.322111 analytic: 11.322111, relative error: 2.291512e-11
numerical: -3.760336 analytic: -3.760336, relative error: 5.716901e-11
numerical: 12.155717 analytic: 12.155717, relative error: 3.986879e-11
numerical: -4.137343 analytic: -4.137343, relative error: 2.889550e-11
numerical: 1.668019 analytic: 1.668019, relative error: 1.774222e-10
numerical: -0.493644 analytic: -0.493644, relative error: 3.578404e-10
numerical: 1.302989 analytic: 1.302989, relative error: 1.532105e-10
numerical: -38.874396 analytic: -38.874396, relative error: 6.222623e-12
numerical: 3.453485 analytic: 3.453485, relative error: 1.269574e-10
numerical: 18.935984 analytic: 18.935984, relative error: 2.235176e-11
numerical: -3.800693 analytic: -3.800693, relative error: 1.335838e-10
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer: Yes, because the hinge function max(0,x) acts like a step function, if x equals to 0, the gradient will be undefined. However, because this case is rare in practice, we should not worry about this.
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# The losses should match but your vectorized implementation should be much faster.
print 'difference: %f' % (loss_naive - loss_vectorized)
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Naive loss and gradient: computed in %fs' % (toc - tic)
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.00001)
toc = time.time()
print 'Vectorized loss and gradient: computed in %fs' % (toc - tic)
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'difference: %f' % difference
###Output
Naive loss and gradient: computed in 0.053374s
Vectorized loss and gradient: computed in 0.006626s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=5e4,
num_iters=1500, verbose=True)
toc = time.time()
print 'That took %fs' % (toc - tic)
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print 'training accuracy: %f' % (np.mean(y_train == y_train_pred), )
y_val_pred = svm.predict(X_val)
print 'validation accuracy: %f' % (np.mean(y_val == y_val_pred), )
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = 10**(-1*np.random.uniform(5,8,10))
regularization_strengths = 10**np.random.uniform(3,5,10)
print learning_rates
print regularization_strengths
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
for ii in xrange(len(learning_rates)):
for jj in xrange(len(regularization_strengths)):
svm = LinearSVM()
lr = learning_rates[ii]
reg = regularization_strengths[jj]
loss_hist = svm.train(X_train, y_train, learning_rate=lr, reg=reg,
num_iters=1500, verbose=True)
y_train_pred = svm.predict(X_train)
train_accuracy = np.mean(y_train == y_train_pred)
y_val_pred = svm.predict(X_val)
val_accuracy = np.mean(y_val == y_val_pred)
results[(lr,reg)] = (train_accuracy,val_accuracy)
if(val_accuracy > best_val):
best_val = val_accuracy
best_svm = svm
best_ii = ii
best_jj = jj
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'linear SVM on raw pixels final test set accuracy: %f' % test_accuracy
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____
###Markdown
Multiclass Support Vector Machine exercise*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*In this exercise you will: - implement a fully-vectorized **loss function** for the SVM- implement the fully-vectorized expression for its **analytic gradient**- **check your implementation** using numerical gradient- use a validation set to **tune the learning rate and regularization** strength- **optimize** the loss function with **SGD**- **visualize** the final learned weights
###Code
# Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# This is a bit of magic to make matplotlib figures appear inline in the
# notebook rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
CIFAR-10 Data Loading and Preprocessing
###Code
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Split the data into train, val, and test sets. In addition we will
# create a small development set as a subset of the training data;
# we can use this for development so our code runs faster.
num_training = 49000
num_validation = 1000
num_test = 1000
num_dev = 500
# Our validation set will be num_validation points from the original
# training set.
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
# Our training set will be the first num_train points from the original
# training set.
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
# We will also make a development set, which is a small subset of
# the training set.
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# We use the first num_test points of the original test set as our
# test set.
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# As a sanity check, print out the shapes of the data
print('Training data shape: ', X_train.shape)
print('Validation data shape: ', X_val.shape)
print('Test data shape: ', X_test.shape)
print('dev data shape: ', X_dev.shape)
# Preprocessing: subtract the mean image
# first: compute the image mean based on the training data
mean_image = np.mean(X_train, axis=0)
print(mean_image[:10]) # print a few of the elements
plt.figure(figsize=(4,4))
plt.imshow(mean_image.reshape((32,32,3)).astype('uint8')) # visualize the mean image
plt.show()
# second: subtract the mean image from train and test data
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# third: append the bias dimension of ones (i.e. bias trick) so that our SVM
# only has to worry about optimizing a single weight matrix W.
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
print(X_train.shape, X_val.shape, X_test.shape, X_dev.shape)
###Output
(49000, 3073) (1000, 3073) (1000, 3073) (500, 3073)
###Markdown
SVM ClassifierYour code for this section will all be written inside **cs231n/classifiers/linear_svm.py**. As you can see, we have prefilled the function `compute_loss_naive` which uses for loops to evaluate the multiclass SVM loss function.
###Code
# Evaluate the naive implementation of the loss we provided for you:
from cs231n.classifiers.linear_svm import svm_loss_naive
import time
# generate a random SVM weight matrix of small numbers
W = np.random.randn(3073, 10) * 0.0001
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.000005)
print('loss: %f' % (loss, ))
###Output
loss: 9.280441
###Markdown
The `grad` returned from the function above is right now all zero. Derive and implement the gradient for the SVM cost function and implement it inline inside the function `svm_loss_naive`. You will find it helpful to interleave your new code inside the existing function.To check that you have correctly implemented the gradient correctly, you can numerically estimate the gradient of the loss function and compare the numeric estimate to the gradient that you computed. We have provided code that does this for you:
###Code
# Once you've implemented the gradient, recompute it with the code below
# and gradient check it with the function we provided for you
# Compute the loss and its gradient at W.
loss, grad = svm_loss_naive(W, X_dev, y_dev, 0.0)
# Numerically compute the gradient along several randomly chosen dimensions, and
# compare them with your analytically computed gradient. The numbers should match
# almost exactly along all dimensions.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad)
# do the gradient check once again with regularization turned on
# you didn't forget the regularization gradient did you?
loss, grad = svm_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: svm_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad)
###Output
numerical: -25.905664 analytic: -25.905664, relative error: 6.745183e-12
numerical: -2.031098 analytic: -2.031098, relative error: 9.475742e-12
numerical: -7.934114 analytic: -7.934114, relative error: 4.117052e-11
numerical: 18.141369 analytic: 18.141369, relative error: 1.025117e-11
numerical: -4.385044 analytic: -4.385044, relative error: 2.410555e-11
numerical: -4.359201 analytic: -4.359201, relative error: 7.233625e-11
numerical: 4.493470 analytic: 4.493470, relative error: 3.347280e-11
numerical: 0.128675 analytic: 0.128675, relative error: 1.037817e-09
numerical: 1.818212 analytic: 1.818212, relative error: 8.329075e-11
numerical: 1.501132 analytic: 1.501132, relative error: 2.012700e-10
numerical: -6.213597 analytic: -6.213597, relative error: 2.539134e-12
numerical: -26.191392 analytic: -26.191392, relative error: 1.296054e-11
numerical: -45.098234 analytic: -45.098234, relative error: 7.112480e-12
numerical: -8.327072 analytic: -8.327072, relative error: 2.307036e-11
numerical: -19.498402 analytic: -19.498402, relative error: 1.013773e-11
numerical: -18.054316 analytic: -18.054316, relative error: 1.404472e-11
numerical: 5.913065 analytic: 5.913065, relative error: 6.658937e-12
numerical: -47.250811 analytic: -47.250811, relative error: 6.080562e-12
numerical: -8.748079 analytic: -8.748079, relative error: 2.805707e-11
numerical: 6.839345 analytic: 6.839345, relative error: 6.474324e-13
###Markdown
Inline Question 1:It is possible that once in a while a dimension in the gradcheck will not match exactly. What could such a discrepancy be caused by? Is it a reason for concern? What is a simple example in one dimension where a gradient check could fail? How would change the margin affect of the frequency of this happening? *Hint: the SVM loss function is not strictly speaking differentiable***Your Answer:** If we are at a point x close to 0 such that x+h > 0 and x-h <=0, then the gradcheck will not match exactly since the gradients at these two points differ by a lot. This is due to the fact that the loss function is not differentiable at 0.
###Code
# Next implement the function svm_loss_vectorized; for now only compute the loss;
# we will implement the gradient in a moment.
tic = time.time()
loss_naive, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.linear_svm import svm_loss_vectorized
tic = time.time()
loss_vectorized, _ = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# The losses should match but your vectorized implementation should be much faster.
print('difference: %f' % (loss_naive - loss_vectorized))
# Complete the implementation of svm_loss_vectorized, and compute the gradient
# of the loss function in a vectorized way.
# The naive implementation and the vectorized implementation should match, but
# the vectorized version should still be much faster.
tic = time.time()
_, grad_naive = svm_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Naive loss and gradient: computed in %fs' % (toc - tic))
tic = time.time()
_, grad_vectorized = svm_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('Vectorized loss and gradient: computed in %fs' % (toc - tic))
# The loss is a single number, so it is easy to compare the values computed
# by the two implementations. The gradient on the other hand is a matrix, so
# we use the Frobenius norm to compare them.
difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('difference: %f' % difference)
###Output
Naive loss and gradient: computed in 0.091289s
Vectorized loss and gradient: computed in 0.002349s
difference: 0.000000
###Markdown
Stochastic Gradient DescentWe now have vectorized and efficient expressions for the loss, the gradient and our gradient matches the numerical gradient. We are therefore ready to do SGD to minimize the loss.
###Code
# In the file linear_classifier.py, implement SGD in the function
# LinearClassifier.train() and then run it with the code below.
from cs231n.classifiers import LinearSVM
svm = LinearSVM()
tic = time.time()
loss_hist = svm.train(X_train, y_train, learning_rate=1e-7, reg=2.5e4,
num_iters=1500, verbose=True)
toc = time.time()
print('That took %fs' % (toc - tic))
# A useful debugging strategy is to plot the loss as a function of
# iteration number:
plt.plot(loss_hist)
plt.xlabel('Iteration number')
plt.ylabel('Loss value')
plt.show()
# Write the LinearSVM.predict function and evaluate the performance on both the
# training and validation set
y_train_pred = svm.predict(X_train)
print('training accuracy: %f' % (np.mean(y_train == y_train_pred), ))
y_val_pred = svm.predict(X_val)
print('validation accuracy: %f' % (np.mean(y_val == y_val_pred), ))
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of about 0.4 on the validation set.
learning_rates = [1e-7, 5e-5]
regularization_strengths = [2.5e4, 5e4]
# results is dictionary mapping tuples of the form
# (learning_rate, regularization_strength) to tuples of the form
# (training_accuracy, validation_accuracy). The accuracy is simply the fraction
# of data points that are correctly classified.
results = {}
best_val = -1 # The highest validation accuracy that we have seen so far.
best_svm = None # The LinearSVM object that achieved the highest validation rate.
################################################################################
# TODO: #
# Write code that chooses the best hyperparameters by tuning on the validation #
# set. For each combination of hyperparameters, train a linear SVM on the #
# training set, compute its accuracy on the training and validation sets, and #
# store these numbers in the results dictionary. In addition, store the best #
# validation accuracy in best_val and the LinearSVM object that achieves this #
# accuracy in best_svm. #
# #
# Hint: You should use a small value for num_iters as you develop your #
# validation code so that the SVMs don't take much time to train; once you are #
# confident that your validation code works, you should rerun the validation #
# code with a larger value for num_iters. #
################################################################################
for i in range(10):
lr = 10**np.random.uniform(-8, -7)
reg = 10**np.random.uniform(3, 5)
svm = LinearSVM()
loss_hist = svm.train(X_train, y_train, learning_rate=lr, reg=reg,
num_iters=1500, verbose=True)
y_train_pred = svm.predict(X_train)
y_val_pred = svm.predict(X_val)
train_accuracy = np.mean(y_train == y_train_pred)
val_accuracy = np.mean(y_val == y_val_pred)
results[(lr, reg)] = (train_accuracy, val_accuracy)
if val_accuracy > best_val:
best_val = val_accuracy
best_svm = svm
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# Visualize the cross-validation results
import math
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
# Evaluate the best svm on test set
y_test_pred = best_svm.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('linear SVM on raw pixels final test set accuracy: %f' % test_accuracy)
# Visualize the learned weights for each class.
# Depending on your choice of learning rate and regularization strength, these may
# or may not be nice to look at.
w = best_svm.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
###Output
_____no_output_____ |
final_feature_based_FN.ipynb | ###Markdown
Loading the data
###Code
directory="C:\\Users\\anh21\\Fake_news\\fake news challenge (FNC-1)\\Qatar_data\\"
train_folders=['legit-training\\','fake-training\\']
test_folders=['fake-test\\','legit-test\\']
def load_dataset(folders,directory,mode='train'):
# heading=defaultdict(list)
# body=defaultdict(list)
# toke_sentence=defaultdict(list)
files=['training_task1_b_amir.tsv','testing_task1_b_amir.tsv']
if mode=='train':
file=files[0]
else:
file=files[1]
if path.exists(file):
df= pd.read_csv(file, names=['Credibility', 'Heading', 'Body'], header=None, sep="\t", quoting=3)
else:
heading=[]
body=[]
domain=[]
credibility=[]
ids=0
for folder in folders:
for name in listdir(directory+folder):
names=re.findall("[a-zA-Z]+",name)
myfile = pd.read_csv(directory+folder+name,sep='\n\n', header=None,encoding='utf-8')
#addressing one word title
if len(tokenizer.tokenize(myfile[0][0]))<2:
artic=myfile[0][1].replace(' ','\n').split('\n')
#heading.append((myfile[0][0]+' '+artic[0]).lower())
#body.append((' '.join(artic[1:])).lower())
#addressing no title issue
elif len(myfile)==1:
#artic=re.split('[\n!?."-]+',myfile[0][0])
heading.append(np.nan)
body.append(myfile[0][0].replace('\r',''))#(' '.join(artic[:])).replace('\r','').lower()
# addressing hyperlinks
elif re.sub(r'^https?:\/\/.*[\r\n]*', '', myfile[0][0], flags=re.MULTILINE)=='':
artic=myfile[0][1].split('.')
heading.append((artic[0]))
body.append((' '.join(artic[1:])))
else:
heading.append(myfile[0][0].replace('\r',''))
body.append(myfile[0][1].replace('\r',''))
domain.append(names[0])
credibility.append(names[1])
ids+=1
df=pd.DataFrame(list(zip(body, heading,credibility,domain)),
columns =['Body', 'Heading','Credibility','Domain'])
df.to_csv('%s.tsv'%(file), sep = '\t')
return df
print("Loading training data")
train_data_=load_dataset(train_folders,directory)
print("Loading testing data")
test_data_=load_dataset(test_folders,directory,'test')
#%%
#domain_onehot=np.asarray(pd.get_dummies(domain), dtype = np.float32)
# print("Loading training data")
# train_data_ = pd.read_csv('training_task1_b_amir.tsv', names=['Credibility', 'Heading', 'Body'], header=None, sep="\t", quoting=3)
# print("Loading testing data")
# test_data = pd.read_csv('testing_task1_b_amir.tsv', names=['Credibility', 'Heading', 'Body'], header=None, sep="\t", quoting=3)
Y_train = pd.Categorical(train_data_['Credibility']).codes
Y_test = pd.Categorical(test_data['Credibility']).codes
#helping functions for basic text preprocessing
from nltk.stem.wordnet import WordNetLemmatizer
lemma = WordNetLemmatizer()
def lemmatization(review):
lemma_result = []
for words in review:
doc = nlp(words)
for token in doc:
lemma_result.append(token.lemma_)
return lemma_result
def remove_stopwords(sent):
return [token for token in sent if token not in finalstop]
def remove_punctuations(data):
train = []
for i in range(len(data)):
sentence = ""
for char in data[i]:
if char.isalpha() or char== ' ':
sentence+=char
else:
sentence+=' '
train.append(sentence)
return train
def remove_extra_space(text):
"""
Remove extra white spaces space from text
Example: hey are you coming. ? => he are you coming. ?
Args:
text (str): text
Returns:
clean_text (str): clean text with removed extra white spaces
"""
try:
clean_text = ' '.join(text.strip().split())
except:
print(text)
return clean_text
def replace_digits_with_char(text, replace_char=''):
"""
Replace digits to `replace_char`
Example: I will be there on 22 april. => I will be there on dd april.
Args:
text (str): text
replace_char (str): character with which digit has to be replaced
Returns:
clean_text (str): clean text with replaced char for digits
"""
regex_pattern = re.compile(r'[0-9]')
clean_text = regex_pattern.sub(replace_char, text)
return clean_text
def remove_single_char_word(text):
"""
Remove single character word from text
Example: I am in a home for 2 years => am in home for years
Args:
text (str): text
Returns:
(str): text with single char removed
"""
words = text.split()
filter_words = [word for word in words if len(word) > 1]
return " ".join(filter_words)
def data_cleaning(data):
"""
Separate the heading and the body of each article and apply basic text cleaning
Args:
data : dataframe
Returns:
: dataframe with preprocessed text
"""
#train_data_["Heading"]=train_data_["Heading"].str.lower()
data["Heading"]=data["Heading"].apply(remove_extra_space)
#print(train_data["Heading"].head())
data["Heading"]=data["Heading"].apply(replace_digits_with_char)
#print(train_data["Heading"].head())
data["Heading"]=data["Heading"].apply(remove_single_char_word)
#print(train_data["Heading"].head())
data["Heading"]=data["Heading"].apply(remove_punctuations).apply(lambda x:''.join(word for word in x))
print(data["Heading"].head(2))
# print(train_data["Heading"].head())
# train_data_["Heading"]=train_data_["Heading"].apply(lemmatization).apply(lambda x: ' '.join(x))
# print(train_data["Heading"].head(5))
#train_data_["Body"]=train_data_["Body"].str.lower()
data["Body"]=data["Body"].apply(remove_extra_space)
data["Body"]=data["Body"].apply(replace_digits_with_char)
data["Body"]=data["Body"].apply(remove_single_char_word)
data["Body"]=data["Body"].apply(remove_punctuations).apply(lambda x: ''.join(word for word in x))
# print('Lemmatization')
# train_data_["Body"]=train_data_["Body"].apply(lemmatization).apply(lambda x:' '.join(x))
return data
train_data_=data_cleaning(train_data_)
test_data=data_cleaning(test_data)
#combine train and test data for final evaluation
train_data=pd.concat([train_data_,test_data])
###Output
0 Brazil qualify for World Cup after Coutinho an...
1 Uber Self Driving Car Tests Resume Three Days ...
Name: Heading, dtype: object
0 Stan Wawrinka proves victorious of Roger Federer
1 Trump Calls Democrats Smarter Than His Party
Name: Heading, dtype: object
###Markdown
Extract similarity features
###Code
#helping similarity features between each body and the corresponding heading of the article
_wnl = nltk.WordNetLemmatizer()
def normalize_word(w):
return _wnl.lemmatize(w).lower()
def get_tokenized_lemmas(s):
return [normalize_word(t) for t in nltk.word_tokenize(s)]
def clean(s):
# Cleans a string: Lowercasing, trimming, removing non-alphanumeric
return " ".join(re.findall(r'\w+', s, flags=re.UNICODE)).lower()
def remove_stopwords(l):
# Removes stopwords from a list of tokens
return [w for w in l if w not in feature_extraction.text.ENGLISH_STOP_WORDS]
def gen_or_load_feats(feat_fn, headlines, bodies):
feats = feat_fn(headlines, bodies)
#np.save(feature_file, feats)
return feats
def word_overlap_features(headlines, bodies):
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
clean_headline = get_tokenized_lemmas(clean_headline)
clean_body = get_tokenized_lemmas(clean_body)
features = [
len(set(clean_headline).intersection(clean_body)) / float(len(set(clean_headline).union(clean_body)))]
X.append(features)
return X
def refuting_features(headlines, bodies):
_refuting_words = [
'fake',
'fraud',
'hoax',
'false',
'deny', 'denies',
# 'refute',
'not',
'despite',
'nope',
'doubt', 'doubts',
'bogus',
'debunk',
'pranks',
'retract'
]
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_headline = get_tokenized_lemmas(clean_headline)
features = [1 if word in clean_headline else 0 for word in _refuting_words]
X.append(features)
return X
def polarity_features(headlines, bodies):
_refuting_words = [
'fake',
'fraud',
'hoax',
'false',
'deny', 'denies',
'not',
'despite',
'nope',
'doubt', 'doubts',
'bogus',
'debunk',
'pranks',
'retract'
]
def calculate_polarity(text):
tokens = get_tokenized_lemmas(text)
return sum([t in _refuting_words for t in tokens]) % 2
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
clean_headline = clean(headline)
clean_body = clean(body)
features = []
features.append(calculate_polarity(clean_headline))
features.append(calculate_polarity(clean_body))
X.append(features)
return np.array(X)
def ngrams(input, n):
input = input.split(' ')
output = []
for i in range(len(input) - n + 1):
output.append(input[i:i + n])
return output
def chargrams(input, n):
output = []
for i in range(len(input) - n + 1):
output.append(input[i:i + n])
return output
def append_chargrams(features, text_headline, text_body, size):
grams = [' '.join(x) for x in chargrams(" ".join(remove_stopwords(text_headline.split())), size)]
grams_hits = 0
grams_early_hits = 0
grams_first_hits = 0
for gram in grams:
if gram in text_body:
grams_hits += 1
if gram in text_body[:255]:
grams_early_hits += 1
if gram in text_body[:100]:
grams_first_hits += 1
features.append(grams_hits)
features.append(grams_early_hits)
features.append(grams_first_hits)
return features
def append_ngrams(features, text_headline, text_body, size):
grams = [' '.join(x) for x in ngrams(text_headline, size)]
grams_hits = 0
grams_early_hits = 0
for gram in grams:
if gram in text_body:
grams_hits += 1
if gram in text_body[:255]:
grams_early_hits += 1
features.append(grams_hits)
features.append(grams_early_hits)
return features
def hand_features(headlines, bodies):
def binary_co_occurence(headline, body):
# Count how many times a token in the title
# appears in the body text.
bin_count = 0
bin_count_early = 0
for headline_token in clean(headline).split(" "):
if headline_token in clean(body):
bin_count += 1
if headline_token in clean(body)[:255]:
bin_count_early += 1
return [bin_count, bin_count_early]
def binary_co_occurence_stops(headline, body):
# Count how many times a token in the title
# appears in the body text. Stopwords in the title
# are ignored.
bin_count = 0
bin_count_early = 0
for headline_token in remove_stopwords(clean(headline).split(" ")):
if headline_token in clean(body):
bin_count += 1
bin_count_early += 1
return [bin_count, bin_count_early]
def count_grams(headline, body):
# Count how many times an n-gram of the title
# appears in the entire body, and intro paragraph
clean_body = clean(body)
clean_headline = clean(headline)
features = []
features = append_chargrams(features, clean_headline, clean_body, 2)
features = append_chargrams(features, clean_headline, clean_body, 8)
features = append_chargrams(features, clean_headline, clean_body, 4)
features = append_chargrams(features, clean_headline, clean_body, 16)
features = append_ngrams(features, clean_headline, clean_body, 2)
features = append_ngrams(features, clean_headline, clean_body, 3)
features = append_ngrams(features, clean_headline, clean_body, 4)
features = append_ngrams(features, clean_headline, clean_body, 5)
features = append_ngrams(features, clean_headline, clean_body, 6)
return features
X = []
for i, (headline, body) in tqdm(enumerate(zip(headlines, bodies))):
X.append(binary_co_occurence(headline, body)
+ binary_co_occurence_stops(headline, body)
+ count_grams(headline, body))
return X
import sys
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
def generate_features(h,b):
X_overlap = gen_or_load_feats(word_overlap_features, h, b)
print(np.array(X_overlap).shape)
X_refuting = gen_or_load_feats(refuting_features, h, b)
print(np.array(X_refuting).shape)
X_polarity = gen_or_load_feats(polarity_features, h, b)
print(np.array(X_polarity).shape)
X_hand = gen_or_load_feats(hand_features, h, b)
print(np.array(X_hand).shape)
X = np.c_[X_hand, X_polarity, X_refuting, X_overlap]
return X
print('Generate training features')
train_feat=generate_features(train_data_["Heading"].values,train_data_["Body"].values)
print('Generate test features')
test_feat=generate_features(test_data["Heading"].values,test_data["Body"].values)
# load embeddings
import gensim
from gensim.models import word2vec
wordmodelfile="C:/Users/anh21/Fake_news/fake news challenge (FNC-1)/fncdata/GoogleNews-vectors-negative300.bin"
wordmodel= gensim.models.KeyedVectors.load_word2vec_format(wordmodelfile, binary=True)
def w2v(s1,s2,wordmodel):
"""
This function calculates cosine similarity features
between the article headings
and the headings returned by google search API
Args:
search_heading (str): heading from google search api
heading (str): heading from the article
Returns:
results(float): cosine similarity measure
"""
#pdb.set_trace()
if s1==s2:
return 1.0
s1words=s1.split()
s2words=s2.split()
s1wordsset=set(s1words)
s2wordsset=set(s2words)
vocab = wordmodel.vocab #the vocabulary considered in the word embeddings
# if len(s1wordsset & s2wordsset)==0:
# return 0.0
s1words_=[word for word in s1words if word in vocab]
s2words_=[word for word in s2words if word in vocab]
s1wordsset=set(s1words_)
s2wordsset=set(s2words_)
# for word1 in s1wordsset.copy():
# if (word1 in s2words_):
# s1words_.remove(word1)
# s2words_.remove(word1)
if len(s1words_)==0 or len (s2words_)==0:
return 0.0
return wordmodel.n_similarity(s1words_, s2words_)
def cosine_sim(search_heading,heading):
"""
This function calculates cosine similarity results
between the article headings
and the headings returned by google search API
Args:
search_heading (str): heading from google search api
heading (str): heading from the article
Returns:
results(float): cosine similarity
"""
results=[]
for i in range(heading.shape[0]):
r=[1.0 if (heading[i] in test_search_sen or test_search_sen in heading[i] ) else w2v(test_search_sen,heading[i],wordmodel) for test_search_sen in search_heading[i]]
results.append(max(r))
results=np.array(results).reshape(-1,1)
return results
print('Checking heading similarities:')
s1="EU Blocks Deutsche Boerse's $14 Billion Takeover of London Stock Exchange"
s2="EU Applauds Deutsche Boerse's $14 Billion Takeover of London Stock Exchange"
s3='American Airlines gets old planes from China Southern to fly domestic'
s4='American Airlines ties up partnership with China Southern'
s5="_Impossible Foods to supersize production of lab-grown burger"
s6="lab-grown meatless burger part of a larger global agenda."
s7="Amazon to buy Middle East online retailer Souq"
s8="Amazon to sell Middle East online retailer Souq"
print ("sim(s1,s2) = ", w2v(s1,s2,wordmodel))
print ("sim(s7,s8) = ", w2v(s7,s8,wordmodel))
print ("sim(s7,s8) = ", w2v(s3,s4,wordmodel))
#load google search results
train_search=np.load('train_google_search_results.npy')
test_search=np.load('test_search_results.npy')
sim_feat=cosine_sim(train_search,train_data_['Heading'].values)
test_sim_feat=cosine_sim(test_search,test_data['Heading'].values)
###Output
_____no_output_____
###Markdown
Lexicon features
###Code
def lex_distribution(text):
"""
This function calculates lexicon based features
Args:
text (str): heading or body
Returns:
results(float): cosine similarity measure
"""
lexicon_distribution = list()
for article in text:
lexicon_distribution_per_instance = list()
for lexi in lexicon:
temp = list()
for token in lexi:
if token in article:
temp.append(token)
if not temp:
lexicon_distribution_per_instance.append(0)
else:
lexicon_distribution_per_instance.append(len(temp))
lexicon_distribution.append(np.asarray(lexicon_distribution_per_instance))
return lexicon_distribution
def lex_Y (lexicon_distribution_train,y_train):
lex_Y_train = []
for i in range(np.shape(y_train)[0]):
X_Y = [lexicon_distribution_train[i]]
lex_Y_train.append(X_Y)
return lex_Y_train
#lex_path="C:\\Users\\amir\\fake_news\\fake news challenge (FNC-1)\\lexicon\\"
import glob
assertive_hooper = list()
factive_hooper = list()
hedges_hyland = list()
implicatives_karttunen = list()
report_verbs = list()
filenames = glob.glob("lexicon\\public-lexicons\\bias_related_lexicons\\features1\\*.txt")
for index, file in enumerate(filenames):
with open(file, 'r') as reader:
for line in reader:
if 'a+' in line or '' == line.strip() or '' in line:
continue
if index == 0:
assertive_hooper.append(line.strip())
if index == 1:
factive_hooper.append(line.strip())
if index == 2:
hedges_hyland.append(line.strip())
if index == 3:
implicatives_karttunen.append(line.strip())
if index == 4:
report_verbs.append(line.strip())
bias_lexicon = list()
filenames = glob.glob("lexicon\\public-lexicons\\bias-lexicon\\features2\\*.txt")
for file in filenames:
with open(file, 'r') as reader:
for line in reader:
if 'a+' in line or '' == line.strip() or '' in line:
continue
bias_lexicon.append(line.strip())
negative_words = list()
positive_words = list()
subjclues = list()
filenames = glob.glob("lexicon\\public-lexicons\\emo-lexicon\\*.txt")
for index, file in enumerate(filenames):
with open(file, 'r', encoding='Latin-1') as reader:
for line in reader:
if 'a+' in line or '' == line.strip() or '' in line:
continue
if index == 0:
negative_words.append(line.strip())
if index == 1:
positive_words.append(line.strip())
if index == 2:
subjclues.append(line.strip())
lexicon = []
lexicon.append(assertive_hooper)
lexicon.append(factive_hooper)
lexicon.append(hedges_hyland)
lexicon.append(implicatives_karttunen)
lexicon.append(report_verbs)
lexicon.append(bias_lexicon)
lexicon.append(negative_words)
lexicon.append(positive_words)
lexicon.append(subjclues)
lexicon_distribution_train = np.array(lex_distribution(train_data_["Body"].values))
lexicon_distribution_test = np.array(lex_distribution(test_data["Body"].values))
lexicon_distribution_title_train = np.array(lex_distribution(train_data_["Heading"].values))
lexicon_distribution_title_test = np.array(lex_distribution(test_data["Heading"].values))
#visualize lexicon distribution features
y=['Legitimate','Fake']
sample=np.random.randint(len(lexicon_distribution_train))
lexicons=['assert','fact','hedge','implic','rep','bias','neg','pos','subj']
plt.figure(figsize=(8,6))
plt.bar(lexicons,lexicon_distribution_train[sample,:])
title=y[Y_train[sample]]
plt.xlabel('Lexicons',fontsize=16)
plt.ylabel('Counts',fontsize=16)
plt.title('%s article distribution'%(title),fontsize=18,fontweight='bold')
plt.grid(ls='--')
plt.show()
###Output
_____no_output_____
###Markdown
Combine the features
###Code
total_train_feat=np.concatenate((train_feat,sim_feat,lexicon_distribution_train),axis=1)
total_train_feat.shape
###Output
_____no_output_____
###Markdown
Normalize features
###Code
std_scale=MinMaxScaler().fit(total_train_feat)
X_train = std_scale.transform(total_train_feat)
total_test_feat=np.concatenate((test_feat,test_sim_feat,lexicon_distribution_title_test),axis=1)
X_test = std_scale.transform(total_test_feat)
total_test_feat.shape
###Output
_____no_output_____
###Markdown
XGBoost
###Code
import time
from sklearn.model_selection import RandomizedSearchCV
clf=xgb.XGBClassifier()
param_grid = {
'learning_rate': [ 0.01, 0.1, 0.5, 1,5,10],
'gamma': [0.1, 0, 0.25, 0.5, 1.0,5.0],
'reg_lambda': [0.1, 1.0, 5.0, 10.0, 50.0, 100.0],
'n_estimators': [10,20,30,40,50]}
rs_clf = RandomizedSearchCV(clf, param_distributions=param_grid, n_iter=30)
rs_clf.fit(X_train, Y_train)
# Identify optimal hyperparameter values
best_n_estim= rs_clf.best_params_['n_estimators']
print("The best parameters: {}".format(rs_clf.best_params_))
xgb_classifier = xgb.XGBClassifier(reg_lambda= 100, n_estimators= 10, learning_rate=0.5, gamma=0.1)
xgb_classifier.fit(X_train, Y_train)
xgb_predictions = xgb_classifier.predict(X_test)
print (metrics.classification_report(Y_test, xgb_predictions))
print ("Overall Accuracy:", round(metrics.accuracy_score(Y_test, xgb_predictions), 3))
###Output
precision recall f1-score support
0 0.85 0.96 0.90 24
1 0.95 0.83 0.89 24
accuracy 0.90 48
macro avg 0.90 0.90 0.90 48
weighted avg 0.90 0.90 0.90 48
Overall Accuracy: 0.896
###Markdown
NB model
###Code
naive = naive_bayes.MultinomialNB(alpha=10)
naive.fit(X_train, Y_train)
p=naive.predict(X_test)
print(classification_report(Y_test, p, target_names=['f','l']))
###Output
precision recall f1-score support
f 1.00 0.21 0.34 24
l 0.56 1.00 0.72 24
accuracy 0.60 48
macro avg 0.78 0.60 0.53 48
weighted avg 0.78 0.60 0.53 48
###Markdown
SVM model
###Code
from scipy import stats
svclassifier =svm.SVC(kernel='rbf',decision_function_shape ='ovr')
rand_list = {"C": stats.uniform(1, 100)}
rand_search = RandomizedSearchCV(svclassifier, param_distributions = rand_list, n_iter = 30, cv=8,n_jobs = 4,scoring='f1_weighted')
rand_search.fit(X_train, Y_train)
print(rand_search.best_params_)
svm_model=svm.SVC(C=20,kernel='rbf',decision_function_shape ='ovr')
svm_model.fit(X_train, Y_train)
p=svm_model.predict(X_test)
print(classification_report(Y_test, p, target_names=['f','l']))
###Output
C:\Users\anh21\.conda\envs\ML\lib\site-packages\sklearn\svm\base.py:193: FutureWarning: The default value of gamma will change from 'auto' to 'scale' in version 0.22 to account better for unscaled features. Set gamma explicitly to 'auto' or 'scale' to avoid this warning.
"avoid this warning.", FutureWarning)
###Markdown
Random Forest
###Code
from sklearn.ensemble import RandomForestClassifier
n_estimators = np.random.uniform(100, 300, 10).astype(int)
max_features = np.random.normal(20, 10, 30).astype(int)
hyperparameters = {'n_estimators': list(n_estimators),
'max_features': list(max_features)}
print (hyperparameters)
randomCV = RandomizedSearchCV(RandomForestClassifier(), param_distributions=hyperparameters, n_iter=30)
randomCV.fit(X_train, Y_train)
# Identify optimal hyperparameter values
best_n_estim = randomCV.best_params_['n_estimators']
best_max_features = randomCV.best_params_['max_features']
print("The best performing n_estimators value is: {:5d}".format(best_n_estim))
print("The best performing max_features value is: {:5d}".format(best_max_features))
rf = RandomForestClassifier(n_estimators=best_n_estim,
max_features=30)
rf.fit(X_train, Y_train)
rf_predictions = rf.predict(X_test)
print (metrics.classification_report(Y_test, rf_predictions))
print ("Overall Accuracy:", round(metrics.accuracy_score(Y_test, rf_predictions), 3))
feature_importances = pd.DataFrame(rf.feature_importances_,
columns=['importance']).sort_values('importance',ascending=False)
feature_importances = pd.DataFrame(rf.feature_importances_,
columns=['importance']).sort_values('importance',ascending=False)
fig, ax = plt.subplots(figsize=(7, 5))
feature_importances['importance'][0:22].plot(kind='bar')
plt.xlabel('Features',fontweight='bold', fontsize=10)
plt.xticks(np.arange(22),rotation=0)
plt.ylabel('Importance',fontweight='bold', fontsize=10)
plt.grid(ls='--')
ax.set_ylim([0,0.7])
#plt.title('AUC performance CHBMIT Dataset', fontsize=14,fontweight='bold')
ax.legend(loc='best')
plt.savefig('Feature_importance.png', dpi=400,bbox_inches='tight')
###Output
_____no_output_____
###Markdown
Ensamble
###Code
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, VotingClassifier
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
from sklearn.ensemble import AdaBoostClassifier
rnd_forest = RandomForestClassifier(n_estimators=300, criterion='entropy', n_jobs=-1, max_features=None)
rnd_forest_gini = RandomForestClassifier(n_estimators=300, criterion='gini', n_jobs=-1, random_state=20, max_features=None)
adb = AdaBoostClassifier(n_estimators=300)
xgb_classifier = xgb.XGBClassifier(reg_lambda= 100.0, n_estimators= 10, learning_rate=1, gamma=0.5)
eclf = VotingClassifier(estimators=[('rnd_forest', rnd_forest),('rnd_forest_gini',rnd_forest_gini),('adb',adb),('xgb',xgb_classifier)], voting='soft')
eclf.fit(X_train, Y_train)
y_val_pred = eclf.predict(X_test)
print('\nensemble performance:')
print(classification_report(Y_test, y_val_pred, target_names=['Fake', 'Eligible']))
###Output
ensemble performance:
precision recall f1-score support
Fake 0.85 0.96 0.90 24
Eligible 0.95 0.83 0.89 24
accuracy 0.90 48
macro avg 0.90 0.90 0.90 48
weighted avg 0.90 0.90 0.90 48
|
Fase 2 - Manejo de datos y optimizacion/Tema 04 - Colecciones de datos/Apuntes/Leccion 3 plantilla- - Diccionarios.ipynb | ###Markdown
Los diccionariosSon junto a las listas las colecciones más utilizadas. Se basan en una estructura mapeada donde cada elemento de la colección se encuentra identificado con una clave única. Por tanto, no puede haber dos claves iguales. En otros lenguajes se conocen como arreglos asociativos.
###Code
vacio = {}
vacio
###Output
_____no_output_____
###Markdown
Tipo de una variable
###Code
type(vacio)
###Output
_____no_output_____
###Markdown
Definición Para cada elemento se define la estructura -> clave:valor
###Code
colores = {'amarillo':'yellow','azul':'blue'}
###Output
_____no_output_____
###Markdown
También se pueden añadir elementos sobre la marcha
###Code
colores['verde'] = 'green'
colores
colores['azul']
colores['amarillo']
###Output
_____no_output_____
###Markdown
Las claves también pueden ser números, pero son un poco confusas
###Code
numeros = {10:'diez',20:'veinte'}
numeros[10]
###Output
_____no_output_____
###Markdown
Modificación de valor a partir de la clave
###Code
colores['amarillo'] = 'white'
colores
###Output
_____no_output_____
###Markdown
Función del()Sirve para borrar un elemento del diccionario.
###Code
del(colores['amarillo'])
colores
###Output
_____no_output_____
###Markdown
Trabajando directamente con registros
###Code
edades = {'Hector':27,'Juan':45,'Maria':34}
edades
edades['Hector']+=1
edades
edades['Juan'] + edades['Maria']
###Output
_____no_output_____
###Markdown
Lectura secuencial con for .. in ..Es posible utilizar una iteraciín for para recorrer los elementos del diccionario:
###Code
for edad in edades:
print(edad)
###Output
Maria
Hector
Juan
###Markdown
El problema es que se devuelven las claves, no los valoresPara solucionarlo deberíamos indicar la clave del diccionario para cada elemento.
###Code
for clave in edades:
print(edades[clave])
for clave in edades:
print(clave,edades[clave])
###Output
Maria 34
Hector 28
Juan 45
###Markdown
El método .items()Nos facilita la lectura en clave y valor de los elementos porque devuelve ambos valores en cada iteración automáticamente:
###Code
for c,v in edades.items():
print(c,v)
###Output
Maria 34
Hector 28
Juan 45
###Markdown
Ejemplo utilizando diccionarios y listas a la vezPodemos crear nuestras propias estructuras avanzadas mezclando ambas colecciones. Mientras los diccionarios se encargarían de manejar las propiedades individuales de los registros, las listas nos permitirían manejarlos todos en conjunto.
###Code
personajes = []
p = {'Nombre':'Gandalf','Clase':'Mago','Raza':'Humano'} # No sé si era un humano pero lo parecía jeje
personajes.append(p)
personajes
p = {'Nombre':'Legolas','Clase':'Arquero','Raza':'Elfo'}
personajes.append(p)
p = {'Nombre':'Gimli','Clase':'Guerrero','Raza':'Enano'}
personajes.append(p)
personajes
for p in personajes:
print(p['Nombre'], p['Clase'], p['Raza'])
###Output
Gandalf Mago Humano
Legolas Arquero Elfo
Gimli Guerrero Enano
|
notebooks/AccretionPhysicsCalcs.ipynb | ###Markdown
Okay, now play with critical angular momentum from Beloborodov 2001 https://ui.adsabs.harvard.edu/abs/2001MNRAS.323..167B/abstract
###Code
lstar = 0.75 * rG * c
eta = 1
l_ave = (eta * 0.25 * omega * (r_acc**2)).to(u.m**2 / u.s)
print(l_ave / lstar)
###Output
11.021531505694373
###Markdown
Now look at the mass accretion rate:
###Code
# M = M =4.52 ́10-7M yr from Vink et al 2001:
m2_massloss = 4.52e-7 * u.Msun / u.yr
density = (m2_massloss / (4 * np.pi * a**2 * wind_speed)).to(u.g/u.cm**3)
print(f'Density of wind at BH: {density:8.4}')
# Bondi-Hoyle 1944:
m1_mass_rate = (np.pi * r_acc**2 * density * v_rel).to(u.Msun / u.yr)
print(f'Accretion rate: {m1_mass_rate:9.4}')
# Figure out what the outer diameter of the accretion disk is supposed to be
r_d = 0.25 * (v_rel * a.to(u.km))**2 * (r_acc.to(u.km) / a.to(u.km))**4 / (G * m1.to(u.kg))
my_rd = ( (r_d.to(u.m) / rG))
# Outer diameter at 1700 rG.
# From Shapiro 1976
v_esc = (((2 * G * m1.to(u.kg) / r2.to(u.m))**0.5)).to(u.km / u.s)
ratio_to_isco = 160 * ( (v_esc / wind_speed)**8)
ratio_to_isco *= ( ( 1 + (orb_vel/wind_speed)**2) **(-4))
ratio_to_isco *= (m2 / (10*u.Msun))**2
ratio_to_isco *= (m1 / (30*u.Msun))**-4
ratio_to_isco *= (period.to(u.d) / (5.6*u.d))**(-4/3)
ratio_to_isco *= (m2_massloss / (1e-6 * u.Msun / u.yr))
print(my_rd / ratio_to_isco)
# Free fall time at R_acc (Illarionov 1974)
t = (r_acc.to(u.m)**(3/2)) / (2 * G * m1.to(u.kg))**0.5
# Cooling time due to bremsstrahlung
# Accretion shock temperature:
shock_temp = 4e7 * (wind_speed.to(u.cm / u.s).value * 1e-8)**2 * (r_acc / r_acc) * u.K
t_bremss = 3e11 * (shock_temp**0.5) / density
t_bremss
###Output
_____no_output_____ |
examples/test_gradients.ipynb | ###Markdown
Sig Loss gradients
###Code
A = 1
M = 3
N = 2
D = 2
X = np.random.randn(A,M,D).cumsum(axis=1)
Y = np.random.randn(A,N,D).cumsum(axis=1)
X /= np.max(X)
Y /= np.max(Y)
torch.rand((2,2,2)).shape
X_naive = torch.tensor(X, dtype=torch.float64)
Y_naive = torch.tensor(Y, dtype=torch.float64)
X_cpu = X_naive.clone()
Y_cpu = Y_naive.clone()
X_gpu = X_naive.clone().cuda()
Y_gpu = Y_naive.clone().cuda()
X_naive.requires_grad = True
X_cpu.requires_grad = True
X_gpu.requires_grad = True
t = time.time()
l_naive = sigkernel.SigLoss_naive(static_kernel, dyadic_order, _naive_solver).forward(X_naive,Y_naive)
print('time:', np.round(time.time()-t,3), 's')
print(l_naive)
t = time.time()
l_cpu = signature_kernel.compute_distance(X_cpu,Y_cpu)
print('time:', np.round(time.time()-t,3), 's')
print(l_cpu)
t = time.time()
l_gpu = signature_kernel.compute_distance(X_gpu,Y_gpu)
print('time:', np.round(time.time()-t,3), 's')
print(l_gpu)
t = time.time()
l_naive.backward()
print('time:', np.round(time.time()-t,3), 's')
t = time.time()
l_cpu.backward()
print('time:', np.round(time.time()-t,3), 's')
t = time.time()
l_gpu.backward()
print('time:', np.round(time.time()-t,3), 's')
X_naive.grad
X_cpu.grad
X_gpu.grad.cpu()
###Output
_____no_output_____
###Markdown
Sig MMD gradients
###Code
A = 2
B = 3
M = 4
N = 3
D = 2
X = np.random.randn(A,M,D).cumsum(axis=1)
Y = np.random.randn(B,N,D).cumsum(axis=1)
X /= np.max(X)
Y /= np.max(Y)
X_naive = torch.tensor(X, dtype=torch.float64)
Y_naive = torch.tensor(Y, dtype=torch.float64)
X_cpu = X_naive.clone()
Y_cpu = Y_naive.clone()
X_gpu = X_naive.clone().cuda()
Y_gpu = Y_naive.clone().cuda()
X_naive.requires_grad = True
X_cpu.requires_grad = True
X_gpu.requires_grad = True
t = time.time()
mmd_naive = sigkernel.SigMMD_naive(static_kernel, dyadic_order, _naive_solver).forward(X_naive,Y_naive)
print('time:', np.round(time.time()-t,3), 's')
print(mmd_naive)
t = time.time()
mmd_cpu = signature_kernel.compute_mmd(X_cpu,Y_cpu)
print('time:', np.round(time.time()-t,3), 's')
print(mmd_cpu)
t = time.time()
mmd_gpu = signature_kernel.compute_mmd(X_gpu,Y_gpu)
print('time:', np.round(time.time()-t,3), 's')
print(mmd_gpu)
t = time.time()
mmd_naive.backward()
print('time:', np.round(time.time()-t,3), 's')
t = time.time()
mmd_cpu.backward()
print('time:', np.round(time.time()-t,3), 's')
t = time.time()
mmd_gpu.backward()
print('time:', np.round(time.time()-t,3), 's')
X_naive.grad
X_cpu.grad
X_gpu.grad.cpu()
###Output
_____no_output_____ |
means/statistics_all_datasets_MCI.ipynb | ###Markdown
Imports
###Code
from statistics import mean
import numpy as np
import pandas as pd
import math
import os
from collections import Counter
from functools import reduce
import glob
import copy
###Output
_____no_output_____
###Markdown
Opening the CSV files
###Code
dataframes = [pd.read_csv(file, sep=',', index_col=0) for file in sorted(glob.glob('../preprocessed_datasets' + "/*."+'csv'))]
cohorts = [file.strip(".csv") for file in sorted(os.listdir('../preprocessed_datasets'))]
# reduce to BL visit only
all_cohorts = dict()
for name, df in zip(cohorts, dataframes):
all_cohorts[name] = df.loc[(df["Visit"] == 1) & (df["Diagnosis"].astype(str) == 'MCI')]
###Output
_____no_output_____
###Markdown
Functions to perform essential calculations
###Code
def cat_stat_df(dfs, result):
"""Counting different categories, calculate the % of categorical features, store results in a df"""
categorical = {'APOE4': [2.0, 1.0], 'Sex': ['Female'], 'Diagnosis': ['CU', 'MCI', 'AD']}
column_cat = ['Sex', 'Diagnosis', 'APOE4']
for cohort in dfs:
if dfs[cohort].empty==True:
continue
else:
calc_dict = dict()
df = dfs[cohort]
for col in column_cat:
ca = Counter(df[col].dropna())
calc_dict[col] = ca
cohort_df = pd.DataFrame(calc_dict).transpose()
cohort_df = cohort_df.dropna(how='all')
cohort_df.loc[cohort] = cohort_df.sum()
for i in categorical:
if i == 'Diagnosis':
if i in cohort_df.index:
result.loc[cohort, categorical[i]] = cohort_df.loc[cohort, cohort_df.loc[i].notna()].astype(int)
result.loc[cohort, categorical[i]] = result.loc[cohort, categorical[i]].replace({np.nan: 0})
result.loc[cohort, 'n'] = int(sum(cohort_df.loc[cohort, cohort_df.loc[i].notna()]))
result.loc[cohort, 'Total'] = int(len(dfs[cohort].index))
else:
result.loc[cohort, i] = np.nan
result.loc[cohort, 'n'] = int(len(dfs[cohort].index))
elif i == 'APOE4':
if 'APOE4' in list(cohort_df.index.astype(str)):
if '2.0' not in list(cohort_df.columns.astype(str)) and '2' not in list(cohort_df.columns.astype(str)):
cohort_df[2.0] = np.nan
result.loc[cohort, i] = round(100 * sum([val for val in cohort_df.loc[i, categorical[i]]]) /
sum([val for val in cohort_df.loc[i].dropna()]), 1)
else:
result.loc[cohort, i] = np.nan
elif i == 'Sex':
if (i in cohort_df.index) & ("Female" in cohort_df.columns):
result.loc[cohort, i] = round(100 * sum([val for val in cohort_df.loc[i, categorical[i]]])
/ sum([val for val in cohort_df.loc[i].dropna()]), 1)
else:
result.loc[cohort, i] = 0
result.rename(columns={"Sex": "Female %", "APOE4": "APOE4 %"}, inplace=True)
return result
def num_stat_df(dfs, result_df):
"""Calculating std and mean and storing it in the result dataframe"""
column_names = ['Age', 'CDR', 'Education', 'MMSE', 'CDRSB', 'Hippocampus', 'A-beta', 'Ttau', 'Ptau']
for df in dfs:
dataset = dfs[df]
calc_dict = dict()
for col in column_names:
if (col in dataset.columns) and (dataset[col].notna().any()):
df_std = round(np.nanstd(dataset[col]), 1)
df_mean = round(np.nanmean(dataset[col]), 1)
dict_value = str(df_mean) + ' (' + str(df_std) + ')'
calc_dict[col] = dict_value
else:
calc_dict[col] = np.nan
for key in calc_dict:
result_df.loc[df, key] = calc_dict[key]
return result_df
###Output
_____no_output_____
###Markdown
Make an empty dataframe to fill in with the results
###Code
results = pd.DataFrame(index = all_cohorts.keys(), columns = [col for col in all_cohorts['AIBL'].columns])
results.index.name = 'Name of Dataset'
for i in ['CU', 'MCI', 'AD', 'Total']:
results[i] = np.nan
cat_stat_df(all_cohorts, results)
num_stat_df(all_cohorts, results)
results.drop(columns=['Diagnosis', 'Visit', 'Race', 'Months'], inplace=True)
results
###Output
_____no_output_____
###Markdown
Final table
###Code
results[['n', 'Total', 'CU', 'MCI', 'AD', 'Female %', 'Age', 'Education', 'MMSE', 'CDR', 'CDRSB', 'APOE4 %', 'Hippocampus']]
###Output
_____no_output_____ |
correlation research/extractFiles.ipynb | ###Markdown
整理一下现在的数据:mapping list: 二维数组(列表 第一个index是基因id的位置 里面存的列表是peak的 但是只是第一个染色体的emmmm 啊!西!巴!但是我好像缺了一点步骤orz补上吧; 把这个癌症的受试者找出来,只在里面比较
###Code
atac_sample = atac_sample[4:len(atac_sample)]
atac_sample
rna_atac = pd.read_csv('rna_atac.csv',header = 0)
del rna_atac['Unnamed: 0']
def modifyformat(x):
x = x.replace('-','_')
return x
rna_atac['bam_prefix'] = rna_atac['bam_prefix'].apply(modifyformat)
rna_atac
indexmap = atac_sample.to_frame()
indexmap = indexmap.reset_index()
del indexmap[0]
indexmap.rename(columns={'index': 'bam_prefix'}, inplace=True)
indexmap
indexmap = pd.merge(indexmap, rna_atac, how='left')
indexmap
###Output
_____no_output_____
###Markdown
indexmap 这个癌症种类的病人和涉及到的数据关联关系我接下来需要的是 atac的不用动,rna的要拿出来
###Code
expressionInAll = pd.read_csv('E:/课题组相关/科创II/RNA-seq-calculated_ALL_1.csv',header=0)
expressionInAll.head(10)
rna_names = indexmap['File Name'].values.tolist()
rna_names.insert(0,'loadedanno.gene_id')
expressionInThis = expressionInAll[rna_names]
expressionInThis # 这个是按照map的顺序的
#这个是
del expressionInAll
###Output
_____no_output_____
###Markdown
接下来要做的是去掉基因和peak底部的25% 在转置之前要保证rnaseq的样本和atac是对应的
###Code
atac_prefix = indexmap['bam_prefix'].values.tolist()
BRCA = pd.read_csv(path,header=0)
atac_prefix.insert(0,'name')
peakInthis = BRCA[atac_prefix]
peakInthis
# 对应好了
peakInthis = pd.DataFrame(peakInthis.values.T, index=peakInthis.columns, columns=peakInthis.name)
peakInthis.drop(['name'],inplace=True)
peakInthis
filter_ = peakInthis.var()
bond = filter_.sort_values(ascending=False)[int(len(filter_)/4*3)]
bond
filter_ = filter_[filter_>bond]
filter_ = filter_.index.tolist()
peakInthis = peakInthis[filter_]
##好了好了 peak的过滤完了 说真的这个好烦啊
peakInthis
remaining = peakInthis.columns.tolist()
dict_ = {'name':remaining}
BRCA_indexd = pd.DataFrame(dict_)
BRCA_index = pd.merge(BRCA_indexd,BRCA_index,how='left')
BRCA_index
###Output
_____no_output_____
###Markdown
peak的搞完了 来搞基因的 额额额额额额 好烦好烦好烦expressionInThis
###Code
expressionInThis = pd.DataFrame(expressionInThis.values.T, index=expressionInThis.columns, columns=expressionInThis['loadedanno.gene_id'])
expressionInThis.drop(['loadedanno.gene_id'],inplace=True)
expressionInThis
filter_ = expressionInThis.var()
bond = filter_.sort_values(ascending=False)[int(len(filter_)/4*3)]
bond
filter_ = filter_[filter_>bond]
filter_ = filter_.index.tolist()
expressionInThis = expressionInThis[filter_]
expressionInThis
expressionInThis.to_csv('dataSample_rna_BRCA_11.csv',index = 0)
BRCA_index.to_csv('BRCA_index_11.csv',index=0)
peakInthis.to_csv('dataSample_atac_BRCA_11.csv',index = 0)
###Output
_____no_output_____ |
tutorials/notebook/programming_guide/mindspore_dataset_loading.ipynb | ###Markdown
数据集加载 概述MindSpore支持加载图像领域常用的数据集,用户可以直接使用`mindspore.dataset`中对应的类实现数据集的加载。目前支持的常用数据集及对应的数据集类如下表所示。| 图像数据集 | 数据集类 | 数据集简介 || :---- | :---- | :---- || MNIST | MnistDataset | MNIST是一个大型手写数字图像数据集,拥有60,000张训练图像和10,000张测试图像,常用于训练各种图像处理系统。 || CIFAR-10 | Cifar10Dataset | CIFAR-10是一个微小图像数据集,包含10种类别下的60,000张32x32大小彩色图像,平均每种类别6,000张,其中5,000张为训练集,1,000张为测试集。 || CIFAR-100 | Cifar100Dataset | CIFAR-100与CIFAR-10类似,但拥有100种类别,平均每种类别600张,其中500张为训练集,100张为测试集。 || CelebA | CelebADataset | CelebA是一个大型人脸图像数据集,包含超过200,000张名人人脸图像,每张图像拥有40个特征标记。 || PASCAL-VOC | VOCDataset | PASCAL-VOC是一个常用图像数据集,被广泛用于目标检测、图像分割等计算机视觉领域。 || COCO | CocoDataset | COCO是一个大型目标检测、图像分割、姿态估计数据集。 || CLUE | CLUEDataset | CLUE是一个大型中文语义理解数据集。 |MindSpore还支持加载多种数据存储格式下的数据集,用户可以直接使用`mindspore.dataset`中对应的类加载磁盘中的数据文件。目前支持的数据格式及对应加载方式如下表所示。| 数据格式 | 数据集类 | 数据格式简介 || :---- | :---- | :---- || MindRecord | MindDataset | MindRecord是MindSpore的自研数据格式,具有读写高效、易于分布式处理等优势。 || Manifest | ManifestDataset | Manifest是华为ModelArts支持的一种数据格式,描述了原始文件和标注信息,可用于标注、训练、推理场景。 || TFRecord | TFRecordDataset | TFRecord是TensorFlow定义的一种二进制数据文件格式。 || NumPy | NumpySlicesDataset | NumPy数据源指的是已经读入内存中的NumPy arrays格式数据集。 || Text File | TextFileDataset | Text File指的是常见的文本格式数据。 || CSV File | CSVDataset | CSV指逗号分隔值,其文件以纯文本形式存储表格数据。 |MindSpore也同样支持使用`GeneratorDataset`自定义数据集的加载方式,用户可以根据需要实现自己的数据集类。> 更多详细的数据集加载接口说明,参见[API文档](https://www.mindspore.cn/doc/api_python/zh-CN/master/mindspore/mindspore.dataset.html)。 常用数据集加载下面将介绍几种常用数据集的加载方式。 CIFAR-10/100数据集下载CIFAR-10数据集并解压到指定位置:
###Code
!wget -N https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/cifar10.zip
!unzip -o ./cifar10.zip -d ./datasets/
!tree ./datasets/cifar10/
###Output
--2020-12-09 20:21:42-- https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/cifar10.zip
Resolving proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)... 192.168.0.172
Connecting to proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)|192.168.0.172|:8083... connected.
Proxy request sent, awaiting response... 200 OK
Length: 166235630 (159M) [application/zip]
Saving to: ‘cifar10.zip’
cifar10.zip 100%[===================>] 158.53M 70.0MB/s in 2.3s
2020-12-09 20:21:44 (70.0 MB/s) - ‘cifar10.zip’ saved [166235630/166235630]
Archive: ./cifar10.zip
creating: ./datasets/cifar10/
creating: ./datasets/cifar10/test/
inflating: ./datasets/cifar10/test/test_batch.bin
creating: ./datasets/cifar10/train/
inflating: ./datasets/cifar10/train/batches.meta.txt
inflating: ./datasets/cifar10/train/data_batch_1.bin
inflating: ./datasets/cifar10/train/data_batch_2.bin
inflating: ./datasets/cifar10/train/data_batch_3.bin
inflating: ./datasets/cifar10/train/data_batch_4.bin
inflating: ./datasets/cifar10/train/data_batch_5.bin
./datasets/cifar10/
├── test
│ └── test_batch.bin
└── train
├── batches.meta.txt
├── data_batch_1.bin
├── data_batch_2.bin
├── data_batch_3.bin
├── data_batch_4.bin
└── data_batch_5.bin
2 directories, 7 files
###Markdown
下面的样例通过`Cifar10Dataset`接口加载CIFAR-10数据集,使用顺序采样器获取其中5个样本,然后展示了对应图片的形状和标签。CIFAR-100数据集和MNIST数据集的加载方式也与之类似。
###Code
import mindspore.dataset as ds
DATA_DIR = "./datasets/cifar10/train/"
sampler = ds.SequentialSampler(num_samples=5)
dataset = ds.Cifar10Dataset(DATA_DIR, sampler=sampler)
for data in dataset.create_dict_iterator():
print("Image shape:", data['image'].shape, ", Label:", data['label'])
###Output
Image shape: (32, 32, 3) , Label: 6
Image shape: (32, 32, 3) , Label: 9
Image shape: (32, 32, 3) , Label: 9
Image shape: (32, 32, 3) , Label: 4
Image shape: (32, 32, 3) , Label: 1
###Markdown
VOC数据集VOC数据集有多个版本,此处以VOC2012为例。下载[VOC2012数据集](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar)并解压,目录结构如下。```└─ VOCtrainval_11-May-2012 └── VOCdevkit └── VOC2012 ├── Annotations ├── ImageSets ├── JPEGImages ├── SegmentationClass └── SegmentationObject```下面的样例通过`VOCDataset`接口加载VOC2012数据集,分别演示了将任务指定为分割(Segmentation)和检测(Detection)时的原始图像形状和目标形状。```pythonimport mindspore.dataset as dsDATA_DIR = "VOCtrainval_11-May-2012/VOCdevkit/VOC2012/"dataset = ds.VOCDataset(DATA_DIR, task="Segmentation", usage="train", num_samples=2, decode=True, shuffle=False)print("[Segmentation]:")for data in dataset.create_dict_iterator(): print("image shape:", data["image"].shape) print("target shape:", data["target"].shape)dataset = ds.VOCDataset(DATA_DIR, task="Detection", usage="train", num_samples=1, decode=True, shuffle=False)print("[Detection]:")for data in dataset.create_dict_iterator(): print("image shape:", data["image"].shape) print("bbox shape:", data["bbox"].shape)```输出结果:```text[Segmentation]:image shape: (2268, 4032, 3)target shape: (680, 1209, 3)image shape: (2268, 4032, 3)target shape: (680, 1209, 3)[Detection]:image shape: (2268, 4032, 3)bbox shape: (3, 4)``` COCO数据集COCO数据集有多个版本,此处以COCO2017的验证数据集为例。下载COCO2017的[验证集](http://images.cocodataset.org/zips/val2017.zip)、[检测任务标注](http://images.cocodataset.org/annotations/annotations_trainval2017.zip)和[全景分割任务标注](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip)并解压,只取其中的验证集部分,按以下目录结构存放。```└─ COCO ├── val2017 └── annotations ├── instances_val2017.json ├── panoptic_val2017.json └── person_keypoints_val2017.json```下面的样例通过`CocoDataset`接口加载COCO2017数据集,分别演示了将任务指定为目标检测(Detection)、背景分割(Stuff)、关键点检测(Keypoint)和全景分割(Panoptic)时获取到的不同数据。```pythonimport mindspore.dataset as dsDATA_DIR = "COCO/val2017/"ANNOTATION_FILE = "COCO/annotations/instances_val2017.json"KEYPOINT_FILE = "COCO/annotations/person_keypoints_val2017.json"PANOPTIC_FILE = "COCO/annotations/panoptic_val2017.json"dataset = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Detection", num_samples=1)for data in dataset.create_dict_iterator(): print("Detection:", data.keys())dataset = ds.CocoDataset(DATA_DIR, annotation_file=ANNOTATION_FILE, task="Stuff", num_samples=1)for data in dataset.create_dict_iterator(): print("Stuff:", data.keys())dataset = ds.CocoDataset(DATA_DIR, annotation_file=KEYPOINT_FILE, task="Keypoint", num_samples=1)for data in dataset.create_dict_iterator(): print("Keypoint:", data.keys())dataset = ds.CocoDataset(DATA_DIR, annotation_file=PANOPTIC_FILE, task="Panoptic", num_samples=1)for data in dataset.create_dict_iterator(): print("Panoptic:", data.keys())```输出结果:```textDetection: dict_keys(['image', 'bbox', 'category_id', 'iscrowd'])Stuff: dict_keys(['image', 'segmentation', 'iscrowd'])Keypoint: dict_keys(['image', 'keypoints', 'num_keypoints'])Panoptic: dict_keys(['image', 'bbox', 'category_id', 'iscrowd', 'area'])``` 特定格式数据集加载下面将介绍几种特定格式数据集文件的加载方式。 MindRecord数据格式MindRecord是MindSpore定义的一种数据格式,使用MindRecord能够获得更好的性能提升。> 阅读[数据格式转换](https://www.mindspore.cn/doc/programming_guide/zh-CN/master/dataset_conversion.html)章节,了解如何将数据集转化为MindSpore数据格式。执行本例之前需下载对应的测试数据`test_mindrecord.zip`并解压到指定位置,执行如下命令:
###Code
!wget -N https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_mindrecord.zip
!unzip -o ./test_mindrecord.zip -d ./datasets/mindspore_dataset_loading/
!tree ./datasets/mindspore_dataset_loading/
###Output
--2020-12-09 20:21:48-- https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_mindrecord.zip
Resolving proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)... 192.168.0.172
Connecting to proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)|192.168.0.172|:8083... connected.
Proxy request sent, awaiting response... 200 OK
Length: 60583 (59K) [application/zip]
Saving to: ‘test_mindrecord.zip’
test_mindrecord.zip 100%[===================>] 59.16K --.-KB/s in 0s
2020-12-09 20:21:48 (331 MB/s) - ‘test_mindrecord.zip’ saved [60583/60583]
Archive: ./test_mindrecord.zip
inflating: ./datasets/mindspore_dataset_loading/test.mindrecord.db
inflating: ./datasets/mindspore_dataset_loading/test.mindrecord
./datasets/mindspore_dataset_loading/
├── test.mindrecord
└── test.mindrecord.db
0 directories, 2 files
###Markdown
下面的样例通过`MindDataset`接口加载MindRecord文件,并展示已加载数据的标签。
###Code
import mindspore.dataset as ds
DATA_FILE = ["./datasets/mindspore_dataset_loading/test.mindrecord"]
mindrecord_dataset = ds.MindDataset(DATA_FILE)
for data in mindrecord_dataset.create_dict_iterator(output_numpy=True):
print(data.keys())
###Output
dict_keys(['chinese', 'english'])
dict_keys(['chinese', 'english'])
dict_keys(['chinese', 'english'])
###Markdown
Manifest数据格式Manifest是华为ModelArts支持的数据格式文件,详细说明请参见[Manifest文档](https://support.huaweicloud.com/engineers-modelarts/modelarts_23_0009.html)。本次示例需下载测试数据`test_manifest.zip`并将其解压到指定位置,执行如下命令:
###Code
!wget -N https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_manifest.zip
!unzip -o ./test_manifest.zip -d ./datasets/mindspore_dataset_loading/test_manifest/
!tree ./datasets/mindspore_dataset_loading/test_manifest/
###Output
--2020-12-09 20:21:49-- https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_manifest.zip
Resolving proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)... 192.168.0.172
Connecting to proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)|192.168.0.172|:8083... connected.
Proxy request sent, awaiting response... 200 OK
Length: 440877 (431K) [application/zip]
Saving to: ‘test_manifest.zip’
test_manifest.zip 100%[===================>] 430.54K --.-KB/s in 0.005s
2020-12-09 20:21:49 (89.2 MB/s) - ‘test_manifest.zip’ saved [440877/440877]
Archive: ./test_manifest.zip
inflating: ./datasets/mindspore_dataset_loading/test_manifest/eval/1.JPEG
inflating: ./datasets/mindspore_dataset_loading/test_manifest/eval/2.JPEG
inflating: ./datasets/mindspore_dataset_loading/test_manifest/test_manifest.json
creating: ./datasets/mindspore_dataset_loading/test_manifest/train/
inflating: ./datasets/mindspore_dataset_loading/test_manifest/train/1.JPEG
inflating: ./datasets/mindspore_dataset_loading/test_manifest/train/2.JPEG
./datasets/mindspore_dataset_loading/test_manifest/
├── eval
│ ├── 1.JPEG
│ └── 2.JPEG
├── test_manifest.json
└── train
├── 1.JPEG
└── 2.JPEG
2 directories, 5 files
###Markdown
下面的样例通过`ManifestDataset`接口加载Manifest文件`test_manifest.json`,并展示已加载数据的标签。
###Code
import mindspore.dataset as ds
import os
DATA_FILE = "./datasets/mindspore_dataset_loading/test_manifest/test_manifest.json"
manifest_dataset = ds.ManifestDataset(DATA_FILE)
for data in manifest_dataset.create_dict_iterator():
print(data["label"])
###Output
1
0
###Markdown
TFRecord数据格式TFRecord是TensorFlow定义的一种二进制数据文件格式。下面的样例通过`TFRecordDataset`接口加载TFRecord文件,并介绍了两种不同的数据集格式设定方案。 下载`tfrecord`测试数据`test_tftext.zip`并解压到指定位置,执行如下命令:
###Code
!wget -N https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_tftext.zip
!unzip -o ./test_tftext.zip -d ./datasets/mindspore_dataset_loading/test_tfrecord/
!tree ./datasets/mindspore_dataset_loading/test_tfrecord/
###Output
--2020-12-09 20:21:50-- https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_tftext.zip
Resolving proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)... 192.168.0.172
Connecting to proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)|192.168.0.172|:8083... connected.
Proxy request sent, awaiting response... 200 OK
Length: 522 [application/zip]
Saving to: ‘test_tftext.zip’
test_tftext.zip 100%[===================>] 522 --.-KB/s in 0s
2020-12-09 20:21:50 (31.9 MB/s) - ‘test_tftext.zip’ saved [522/522]
Archive: ./test_tftext.zip
inflating: ./datasets/mindspore_dataset_loading/test_tfrecord/test_tftext.tfrecord
./datasets/mindspore_dataset_loading/test_tfrecord/
└── test_tftext.tfrecord
0 directories, 1 file
###Markdown
1. 传入数据集路径或TFRecord文件列表,本例使用`test_tftext.tfrecord`,创建`TFRecordDataset`对象。
###Code
import mindspore.dataset as ds
DATA_FILE = "./datasets/mindspore_dataset_loading/test_tfrecord/test_tftext.tfrecord"
tfrecord_dataset = ds.TFRecordDataset(DATA_FILE)
for data in tfrecord_dataset.create_dict_iterator():
print(data.keys())
###Output
dict_keys(['chinese', 'line', 'words'])
dict_keys(['chinese', 'line', 'words'])
dict_keys(['chinese', 'line', 'words'])
###Markdown
2. 用户可以通过编写Schema文件或创建Schema对象,设定数据集格式及特征。 - 编写Schema文件 将数据集格式和特征按JSON格式写入Schema文件,示例如下: ```json { "columns": { "image": { "type": "uint8", "rank": 1 }, "label" : { "type": "string", "rank": 1 } "id" : { "type": "int64", "rank": 0 } } } ``` - `columns`:列信息字段,需要根据数据集的实际列名定义。上面的示例中,数据集列为`image`、`label`和`id`。 然后在创建`TFRecordDataset`时将Schema文件路径传入。 ```python SCHEMA_DIR = "dataset_schema_path/schema.json" tfrecord_dataset = ds.TFRecordDataset(DATA_FILE, schema=SCHEMA_DIR) ``` - 创建Schema对象 创建Schema对象,为其添加自定义字段,然后在创建数据集对象时传入。
###Code
from mindspore import dtype as mstype
schema = ds.Schema()
schema.add_column('image', de_type=mstype.uint8)
schema.add_column('label', de_type=mstype.int32)
tfrecord_dataset = ds.TFRecordDataset(DATA_FILE, schema=schema)
###Output
_____no_output_____
###Markdown
NumPy数据格式如果所有数据已经读入内存,可以直接使用`NumpySlicesDataset`类将其加载。下面的样例分别介绍了通过`NumpySlicesDataset`加载arrays数据、 list数据和dict数据的方式。- 加载NumPy arrays数据
###Code
import numpy as np
import mindspore.dataset as ds
np.random.seed(6)
features, labels = np.random.sample((4, 2)), np.random.sample((4, 1))
data = (features, labels)
dataset = ds.NumpySlicesDataset(data, column_names=["col1", "col2"], shuffle=False)
for data in dataset:
print(data[0], data[1])
###Output
[0.89286015 0.33197981] [0.33540785]
[0.82122912 0.04169663] [0.62251943]
[0.10765668 0.59505206] [0.43814143]
[0.52981736 0.41880743] [0.73588211]
###Markdown
- 加载Python list数据
###Code
import mindspore.dataset as ds
data1 = [[1, 2], [3, 4]]
dataset = ds.NumpySlicesDataset(data1, column_names=["col1"], shuffle=False)
for data in dataset:
print(data[0])
###Output
[1 2]
[3 4]
###Markdown
- 加载Python dict数据
###Code
import mindspore.dataset as ds
data1 = {"a": [1, 2], "b": [3, 4]}
dataset = ds.NumpySlicesDataset(data1, column_names=["col1", "col2"], shuffle=False)
for data in dataset.create_dict_iterator():
print(data)
###Output
{'col1': Tensor(shape=[], dtype=Int64, value= 1), 'col2': Tensor(shape=[], dtype=Int64, value= 3)}
{'col1': Tensor(shape=[], dtype=Int64, value= 2), 'col2': Tensor(shape=[], dtype=Int64, value= 4)}
###Markdown
CSV数据格式下面的样例通过`CSVDataset`加载CSV格式数据集文件,并展示了已加载数据的`keys`。下载测试数据`test_csv.zip`并解压到指定位置,执行如下命令:
###Code
!wget -N https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_csv.zip
!unzip -o ./test_csv.zip -d ./datasets/mindspore_dataset_loading/test_csv/
!tree ./datasets/mindspore_dataset_loading/test_csv/
###Output
--2020-12-09 20:21:50-- https://obs.dualstack.cn-north-4.myhuaweicloud.com/mindspore-website/notebook/datasets/test_csv.zip
Resolving proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)... 192.168.0.172
Connecting to proxy-notebook.modelarts-dev-proxy.com (proxy-notebook.modelarts-dev-proxy.com)|192.168.0.172|:8083... connected.
Proxy request sent, awaiting response... 200 OK
Length: 344 [application/zip]
Saving to: ‘test_csv.zip’
test_csv.zip 100%[===================>] 344 --.-KB/s in 0s
2020-12-09 20:21:50 (19.3 MB/s) - ‘test_csv.zip’ saved [344/344]
Archive: ./test_csv.zip
extracting: ./datasets/mindspore_dataset_loading/test_csv/test2.csv
extracting: ./datasets/mindspore_dataset_loading/test_csv/test1.csv
./datasets/mindspore_dataset_loading/test_csv/
├── test1.csv
└── test2.csv
0 directories, 2 files
###Markdown
传入数据集路径或csv文件列表,Text格式数据集文件的加载方式与CSV文件类似。
###Code
import mindspore.dataset as ds
DATA_FILE = ["./datasets/mindspore_dataset_loading/test_csv/test1.csv","./datasets/mindspore_dataset_loading/test_csv/test2.csv"]
csv_dataset = ds.CSVDataset(DATA_FILE)
for data in csv_dataset.create_dict_iterator(output_numpy=True):
print(data.keys())
###Output
dict_keys(['a', 'b', 'c', 'd'])
dict_keys(['a', 'b', 'c', 'd'])
dict_keys(['a', 'b', 'c', 'd'])
dict_keys(['a', 'b', 'c', 'd'])
###Markdown
自定义数据集加载对于目前MindSpore不支持直接加载的数据集,可以通过构造`GeneratorDataset`对象实现自定义方式的加载,或者将其转换成MindRecord数据格式。下面分别展示几种不同的自定义数据集加载方法,为了便于对比,生成的随机数据保持相同。 构造数据集生成函数构造生成函数定义数据返回方式,再使用此函数构建自定义数据集对象。此方法适用于简单场景。
###Code
import numpy as np
import mindspore.dataset as ds
np.random.seed(58)
data = np.random.sample((5, 2))
label = np.random.sample((5, 1))
def GeneratorFunc():
for i in range(5):
yield (data[i], label[i])
dataset = ds.GeneratorDataset(GeneratorFunc, ["data", "label"])
for data in dataset.create_dict_iterator():
print(data["data"], data["label"])
###Output
[0.36510558 0.45120592] [0.78888122]
[0.49606035 0.07562207] [0.38068183]
[0.57176158 0.28963401] [0.16271622]
[0.30880446 0.37487617] [0.54738768]
[0.81585667 0.96883469] [0.77994068]
###Markdown
构造可迭代的数据集类构造数据集类实现`__iter__`和`__next__`方法,再使用此类的对象构建自定义数据集对象。相比于直接定义生成函数,使用数据集类能够实现更多的自定义功能。
###Code
import numpy as np
import mindspore.dataset as ds
class IterDatasetGenerator:
def __init__(self):
np.random.seed(58)
self.__index = 0
self.__data = np.random.sample((5, 2))
self.__label = np.random.sample((5, 1))
def __next__(self):
if self.__index >= len(self.__data):
raise StopIteration
else:
item = (self.__data[self.__index], self.__label[self.__index])
self.__index += 1
return item
def __iter__(self):
return self
def __len__(self):
return len(self.__data)
dataset_generator = IterDatasetGenerator()
dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=False)
for data in dataset.create_dict_iterator():
print(data["data"], data["label"])
###Output
[0.36510558 0.45120592] [0.78888122]
[0.49606035 0.07562207] [0.38068183]
[0.57176158 0.28963401] [0.16271622]
[0.30880446 0.37487617] [0.54738768]
[0.81585667 0.96883469] [0.77994068]
###Markdown
构造可随机访问的数据集类构造数据集类实现`__getitem__`方法,再使用此类的对象构建自定义数据集对象。此方法可以用于实现分布式训练。
###Code
import numpy as np
import mindspore.dataset as ds
class GetDatasetGenerator:
def __init__(self):
np.random.seed(58)
self.__data = np.random.sample((5, 2))
self.__label = np.random.sample((5, 1))
def __getitem__(self, index):
return (self.__data[index], self.__label[index])
def __len__(self):
return len(self.__data)
dataset_generator = GetDatasetGenerator()
dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=False)
for data in dataset.create_dict_iterator():
print(data["data"], data["label"])
###Output
[0.36510558 0.45120592] [0.78888122]
[0.49606035 0.07562207] [0.38068183]
[0.57176158 0.28963401] [0.16271622]
[0.30880446 0.37487617] [0.54738768]
[0.81585667 0.96883469] [0.77994068]
###Markdown
如果用户希望实现分布式训练,则需要在此方式的基础上,在采样器类中实现`__iter__`方法,每次返回采样数据的索引。需要补充的代码如下:
###Code
import math
class MySampler():
def __init__(self, dataset, local_rank, world_size):
self.__num_data = len(dataset)
self.__local_rank = local_rank
self.__world_size = world_size
self.samples_per_rank = int(math.ceil(self.__num_data / float(self.__world_size)))
self.total_num_samples = self.samples_per_rank * self.__world_size
def __iter__(self):
indices = list(range(self.__num_data))
indices.extend(indices[:self.total_num_samples-len(indices)])
indices = indices[self.__local_rank:self.total_num_samples:self.__world_size]
return iter(indices)
def __len__(self):
return self.samples_per_rank
dataset_generator = GetDatasetGenerator()
sampler = MySampler(dataset_generator, local_rank=0, world_size=2)
dataset = ds.GeneratorDataset(dataset_generator, ["data", "label"], shuffle=False, sampler=sampler)
for data in dataset.create_dict_iterator():
print(data["data"], data["label"])
###Output
[0.36510558 0.45120592] [0.78888122]
[0.57176158 0.28963401] [0.16271622]
[0.81585667 0.96883469] [0.77994068]
|
CNN1.ipynb | ###Markdown
CNN1 Because there is a lot of duplicated code across my various modelling notebooks for CNN1, VGG16, and ResNet50 to set up the environment, the data, and the models, many of the explanations are duplicated as well. I did this because even though the notebooks would ideally be read in order, I wanted to make each notebook a standalone document. 1.1 Google Colab Setup - When running locally, skip ahead to Local Setup I ran this notebook in Google Colab Pro in the Python3 environment using a GPU runtime.At the time of writing, it was necessary to update the `tensorflow_datasets` package to import the dataset correctly. After running this cell, restart the runtime so that the new version of this package is loaded.
###Code
!pip install tensorflow_datasets==4.2.0
###Output
_____no_output_____
###Markdown
The following cells have been adapted from code located [here](https://towardsdatascience.com/google-drive-google-colab-github-dont-just-read-do-it-5554d5824228) Ensure we're connected to a GPU
###Code
from tensorflow.test import gpu_device_name
device_name = gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
###Output
_____no_output_____
###Markdown
Mount Google Drive and authenticate by following the instructions in the cell output
###Code
from google.colab import drive
# Default location for the drive
ROOT = "/content/drive"
# Mount Google Drive at /content/drive
drive.mount(ROOT)
###Output
_____no_output_____
###Markdown
Change to the directory containing project files
###Code
PROJECT_PATH = '/content/drive/MyDrive/BrainStation Capstone Project/capstone'
%cd '{PROJECT_PATH}'
###Output
_____no_output_____
###Markdown
This cell imports `pcamlib.py` into Google Colab
###Code
import imp
pcamlib = imp.new_module('pcamlib')
exec(open("./pcamlib.py").read(), pcamlib.__dict__)
###Output
_____no_output_____
###Markdown
For best performance, upload the `tensorflow_datasets` folder created by running the CLI command `tfds build patch_camelyon` locally to the folder specified in `pcam_path` within your Google Drive. If you have not done this step, running this cell will instead download the dataset automatically and save it to the virtual disk in Google Colab. When not using Colab Pro, I have found that I quickly ran out of disk space with this method. Note that this step can take 10-20 minutes while the files are downloaded and processed. If you have uploaded the dataset already, this cell should run almost instantly, and print the information about the dataset in the `tfds.core.DatasetInfo()` object.
###Code
pcam_path = '/content/drive/MyDrive/BrainStation Capstone Project/tensorflow_datasets'
pcam, pcam_info = pcamlib.load_pcam(data_dir=pcam_path)
###Output
_____no_output_____
###Markdown
--- 1.2 Local Setup - Skip these cells when running in Google Colab
###Code
# pcamlib.py is my library of helper functions
import pcamlib
###Output
_____no_output_____
###Markdown
This cell should automatically download the `patch_camelyon` dataset into `~/tensorflow_datasets/patch_camelyon` if it's not already installed (7.48 GB). If there are any issues, run the CLI command `tfds build patch_camelyon` first to install. If the dataset already exists in this location, this cell should print the dataset info almost instantly. If you selected a different location to install the dataset, update the `data_dir` parameter with the location of the dataset you selected.
###Code
# Load dataset and dataset info
pcam, pcam_info = pcamlib.load_pcam()
###Output
tfds.core.DatasetInfo(
name='patch_camelyon',
full_name='patch_camelyon/2.0.0',
description="""
The PatchCamelyon benchmark is a new and challenging image classification
dataset. It consists of 327.680 color images (96 x 96px) extracted from
histopathologic scans of lymph node sections. Each image is annoted with a
binary label indicating presence of metastatic tissue. PCam provides a new
benchmark for machine learning models: bigger than CIFAR10, smaller than
Imagenet, trainable on a single GPU.
""",
homepage='https://patchcamelyon.grand-challenge.org/',
data_path='/Users/jaredlauer/tensorflow_datasets/patch_camelyon/2.0.0',
download_size=7.48 GiB,
dataset_size=7.06 GiB,
features=FeaturesDict({
'id': Text(shape=(), dtype=tf.string),
'image': Image(shape=(96, 96, 3), dtype=tf.uint8),
'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=2),
}),
supervised_keys=('image', 'label'),
splits={
'test': <SplitInfo num_examples=32768, num_shards=8>,
'train': <SplitInfo num_examples=262144, num_shards=64>,
'validation': <SplitInfo num_examples=32768, num_shards=8>,
},
citation="""@misc{b_s_veeling_j_linmans_j_winkens_t_cohen_2018_2546921,
author = {B. S. Veeling, J. Linmans, J. Winkens, T. Cohen, M. Welling},
title = {Rotation Equivariant CNNs for Digital Pathology},
month = sep,
year = 2018,
doi = {10.1007/978-3-030-00934-2_24},
url = {https://doi.org/10.1007/978-3-030-00934-2_24}
}""",
)
###Markdown
---- 1.3 Finish Setup - Always run these cells
###Code
# Import datascience packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Import tensorflow packages
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPool2D, Flatten, Dropout
from tensorflow.keras.models import load_model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
###Output
_____no_output_____
###Markdown
To get started with this dataset, I adapted the code from this [article](https://geertlitjens.nl/post/getting-started-with-camelyon/) written by Geert Litjens, one of the authors of the dataset.I used his code for the `train_pipeline`, `valid_pipeline`, and `test_pipeline`, which load the train, validation, and test sets and prepare them for modelling. I also make use of his function `convert_sample`. This function extracts each image and its corresponding label from the dataset, converts each image to a TensorFlow `tf.float32` datatype, then performs one-hot encoding on the labels and converts them to `tf.float32` as well.By default, the batch size for the training set is 64, and 128 for the test and validation sets.
###Code
# Create generator "pipelines" for train, validation and test sets.
# Default batch sizes of 64 for the train set and 128 for validation and test sets to speed up calculations
train_pipeline, valid_pipeline, test_pipeline = pcamlib.build_pipelines(pcam)
###Output
_____no_output_____
###Markdown
--- 2.0 Training the Model> If you are not training this model and are loading it from a file, skip ahead to 2.2 Loading the Model I used Geert Litjens CNN layer architecture as a starting point. It resembles a VGG16 architecture because it has three sets of two Convolutional layers followed by a single Max Pooling layer, followed by a Flattening layer and two Dense layers before the final Dense layer which outputs the class predictions. I kept the `relu` activation function, `valid` padding and 3x3 `kernel_size`, because in my research these all seemed like fairly standard hyperparameters in CNNs, and my general philosophy was not to change anything unless I had a particuarly good reason to. I changed the optimizer to `Adam` from `SGD` because I found that `SGD` did not perform very well, and `Adam` is an extremely popular choice in CNNs because of its effectiveness. The other change I made to this architecture was adding 25% Dropout layers after each Convolutional and Dense layer, because the first iteration of the model started overfitting extremely quickly after the first epoch. Increasing the level of dropout didn't make a major difference in my experience.I also experimented with adding additional Convolutional and Dense layers near the end of the network, but this didn't improve performance significantly. I suspect this is because the dimensionality of the features gets very low at the end of the network (8x8), and adding more layers doesn't make much impact because they don't have enough information to work with. I tried different learning rates as well, but this didn't make a significant impact either.Overall, I found that optimizing this network myself was very difficult. Due to the random nature of weight initialization and training image shuffling, each training run gave different results, and it was challenging to find ways to improve the performance of the network and be confident that changes weren't simply due to random chance. I attempted to train each iteration 3 or more times and average the results, but this quickly became very time-consuming and didn't appear to be worth the effort. I think a better way to improve the improve the performance on this dataset is to make more macro scale changes such as experimenting with different transfer learning models, grayscaled versions of the images, data augmentation, and the like.
###Code
# Instantiate model object
cnn = Sequential()
# Images are 96x96 px, in RGB so there are 3 channels
image_shape = (96, 96, 3)
# Adding convultional layers to the model
# It was important to add dropout layers after each convolutional layer to reduce overfitting
cnn.add(Conv2D(16, kernel_size=(3, 3), activation='relu', padding='valid', input_shape=image_shape))
cnn.add(Dropout(0.25))
cnn.add(Conv2D(16, kernel_size=(3, 3), activation='relu', padding='valid'))
cnn.add(Dropout(0.25))
# Add a max pool layer to reduce the dimensions of the feature maps
cnn.add(MaxPool2D(pool_size=(2, 2), strides=(2,2)))
# Repeating this architecture two more times
cnn.add(Conv2D(32, kernel_size=(3, 3), activation='relu', padding='valid'))
cnn.add(Dropout(0.25))
cnn.add(Conv2D(32, kernel_size=(3, 3), activation='relu', padding='valid'))
cnn.add(Dropout(0.25))
cnn.add(MaxPool2D(pool_size=(2, 2), strides=(2,2)))
cnn.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='valid'))
cnn.add(Dropout(0.25))
cnn.add(Conv2D(64, kernel_size=(3, 3), activation='relu', padding='valid'))
cnn.add(Dropout(0.25))
cnn.add(MaxPool2D(pool_size=(2, 2), strides=(2,2)))
# Flatten the data to prepare for dense layers
cnn.add(Flatten())
cnn.add(Dense(256, activation='relu'))
cnn.add(Dropout(0.25))
cnn.add(Dense(128, activation='relu'))
cnn.add(Dropout(0.25))
# Final Dense layer to make class predictions
cnn.add(Dense(2, activation='softmax'))
cnn.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 94, 94, 16) 448
_________________________________________________________________
dropout (Dropout) (None, 94, 94, 16) 0
_________________________________________________________________
conv2d_1 (Conv2D) (None, 92, 92, 16) 2320
_________________________________________________________________
dropout_1 (Dropout) (None, 92, 92, 16) 0
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 46, 46, 16) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 44, 44, 32) 4640
_________________________________________________________________
dropout_2 (Dropout) (None, 44, 44, 32) 0
_________________________________________________________________
conv2d_3 (Conv2D) (None, 42, 42, 32) 9248
_________________________________________________________________
dropout_3 (Dropout) (None, 42, 42, 32) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 21, 21, 32) 0
_________________________________________________________________
conv2d_4 (Conv2D) (None, 19, 19, 64) 18496
_________________________________________________________________
dropout_4 (Dropout) (None, 19, 19, 64) 0
_________________________________________________________________
conv2d_5 (Conv2D) (None, 17, 17, 64) 36928
_________________________________________________________________
dropout_5 (Dropout) (None, 17, 17, 64) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 8, 8, 64) 0
_________________________________________________________________
flatten (Flatten) (None, 4096) 0
_________________________________________________________________
dense (Dense) (None, 256) 1048832
_________________________________________________________________
dropout_6 (Dropout) (None, 256) 0
_________________________________________________________________
dense_1 (Dense) (None, 128) 32896
_________________________________________________________________
dropout_7 (Dropout) (None, 128) 0
_________________________________________________________________
dense_2 (Dense) (None, 2) 258
=================================================================
Total params: 1,154,066
Trainable params: 1,154,066
Non-trainable params: 0
_________________________________________________________________
###Markdown
CNN1 only has around 1.15 million parameters, all of them trainable. While this sounds like a lot, as we will see when beginning transfer learning with VGG16 and ResNet50, this is actually relatively small for a CNN architecture.We compile the model with the `Adam` optimizer as it is one of the most commonly used and most effective optimizers when using CNNs for image classification. We use a `categorical_crossentopy` loss function and track the accuracy of the train and validation set throughout the training process.
###Code
cnn.compile(optimizer='Adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
###Output
_____no_output_____
###Markdown
Finally, we create an early stopping callback function which will stop the training process early if the validation loss fails to decrease over the specified number of epochs. This allows us to set the number of training epochs to a large number without fear of wasting time going through all the epochs if they aren't improving the results.I experimented with different values for the patience, and 8 may be slightly too long for optimal results, but I wanted to see if the networks would improve at all if left to train for a longer period of time.
###Code
# Add early stop callback to prevent the model from overfitting, or running too long
# This will stop the training early if the validation loss doesn't not decrease within 8 consecutive epochs
early_stop = EarlyStopping(monitor='val_loss', patience=8, mode='min', verbose=1)
###Output
_____no_output_____
###Markdown
This cell trains the model, and it's important to save the output into a variable called `history` so the training accuracy and loss history can be plotted later. I selected 30 epochs as an arbitrarily large number, and rely on the early stopping threshold to determine how long the model is trained. `steps_per_epoch` is simply the size of the training set divided by the batch size (262144 / 64) which ensures the entire training set is covered in each epoch.Similarly, `validation_steps` is the size of the validation set divided by the batch size of the validation set (32768 / 128) to ensure the entire validation set is tested.If changing the batch size, it's important to update these variables accordingly.
###Code
%%time
history = cnn.fit(train_pipeline,
validation_data=valid_pipeline,
verbose=1, epochs=30, steps_per_epoch=4096, validation_steps=256,
callbacks=[early_stop])
###Output
_____no_output_____
###Markdown
Here, I save the history as a pandas dataframe for easier exporting to .csv and plotting.
###Code
# Save the history of the model to a pandas dataframe
hist_df = pd.DataFrame(history.history)
###Output
_____no_output_____
###Markdown
--- 2.1 Saving the Model Because the training process can take a long time, it's a good idea to save the trained model, the training history, and the model predictions to external files.
###Code
# Save the fitted model to a file
cnn.save('data/models/cnn1')
# Save the history of the model to a csv
pcamlib.save_history(hist_df, 'data/models/history/cnn1_history.csv')
###Output
_____no_output_____
###Markdown
In general, I leave `save=False` when running these cells to avoid accidentally overwriting files. Simply change this to `save=True`, and consider changing the filepath, when you want to save these results.On average, calculating `y_proba` takes around 40 seconds in Colab.
###Code
%%time
y_proba = pcamlib.generate_y_proba(cnn, test_pipeline, class_1=False, save=False, filepath='data/models/y_proba/cnn1_y_proba.csv')
###Output
_____no_output_____
###Markdown
--- 2.2 Loading the Model Here, you can load the model, it's history, and predictions from files by running these cells.
###Code
# Load the model from a file
cnn = load_model("data/models/cnn1/")
# Load the model training history from a file
hist_df = pcamlib.load_history('data/models/history/cnn1_history.csv')
# Load y_proba from file if the model is not saved. For some larger models, I only save y_proba because the files are too large to track using git
y_proba = pcamlib.load_y_proba('data/models/y_proba/cnn1_y_proba.csv')
###Output
_____no_output_____
###Markdown
This cell extracts the ground truth labels from the test set and saves them as `y_true`
###Code
%%time
# Create a list of the true labels for the test set
y_true = pcamlib.generate_y_true(pcam)
###Output
CPU times: user 23.4 s, sys: 1.97 s, total: 25.4 s
Wall time: 5.08 s
###Markdown
--- 3.0 Evaluating the Model I am evaluating the outputs from a trained model I've loaded from section 2.2 above.If you trained a model in this notebook, the results will likely be very different from what I'm describing in the markdown cells.
###Code
# Plot the training and validation Accuracy and Loss
pcamlib.plot_history(hist_df, save=True, filepath='data/plots/cnn1_acc_loss.png')
###Output
_____no_output_____
###Markdown
This model does show some overfitting, but the training accuracy seems to be levelling out. The validation accuracy and loss is fairly erratic, and to optimize the performance of this model in the future it may make sense to use Model Checkpointing to save the best version of the model which in this case looks to be around epoch 13. This cell makes predictions based on `y_proba` with the decision threshold set to 50% by default, but this can be adjusted by specifying a different value between 0 and 1 in the `threshold` parameter.
###Code
%%time
# Get predictions from y_proba. Default threshold of 0.5, meaning predicts positive class if >= 50% certainty of class 1
y_pred = pcamlib.generate_y_pred(y_proba)
###Output
CPU times: user 11.7 ms, sys: 6.52 ms, total: 18.2 ms
Wall time: 18.9 ms
###Markdown
Now that we've calculated the predictions of this model, we can evaluate it on its test accuracy, positive class recall, and AUC.
###Code
%%time
# Calculate accuracy of the predictions on the test set
pcamlib.print_test_accuracy(y_true, y_pred)
# Plot the confusion matrix
pcamlib.plot_cf_matrix(y_true, y_pred, normalize=True, save=True, filepath='data/plots/cnn1_cf_matrix.png')
pcamlib.print_classification_report(y_true, y_pred)
# Plot the receiver operating characteristic curve
pcamlib.plot_roc_curve(y_true, y_proba, save=True, filepath='data/plots/cnn1_roc.png')
###Output
_____no_output_____
###Markdown
This model achieved a test accuracy of 78.7%, a positive class recall of 93.45%, and an AUC of 0.906.Even though the test accuracy is slightly low, this model outperformed that pathologist benchmark on both recall and AUC which is very good to see. Another way to evaluate this model is to look at the number of images which were predicted to contain healthy tissue, because these would be discarded during the pre-screening process and is a measure of how much we've reduced the workload of the pathologists. An easy way to do this is to subtract the number of nonzero values from the test set, which will tell us exacly how many images were predicted to contain healthy tissue, and convert this into a percentage of the test set.
###Code
pcamlib.print_workload_reduction(y_pred)
###Output
35.2783203125%
###Markdown
We've reduced the pathologist's workload by 35.3% while only missing around 6.5% of the images containing cancer. In summary:| | Pathologist | CNN1 ||---------------------|--------------|--------|| AUC | 0.810 (mean) | 0.906 || Recall | 62.8% | 93.5% || Test Accuracy | -- | 78.7% || Workload Reduced By | -- | 35.3% | Finally, let's look at some of the images which are misclassified by this model. Note that to see a different selection of images, you can set `image_index` to start the search from your desired place in the test set (e.g. `image_index = 29` to start from image 29).
###Code
# Show a sample of images that were misclassified
pcamlib.plot_misclassified_images(pcam, y_true, y_pred)
###Output
_____no_output_____ |
geom_abs/notebooks/.ipynb_checkpoints/GeomAbstr_DNN_Shapes-checkpoint.ipynb | ###Markdown
2) Studying the effect of supervision format for representation in hidden layers 2.2) Product supervision
###Code
shapes_prod = [set(s1).intersection(set(s2)) for s2 in shapes_smallness for s1 in shapes_parity]
shapes_18.build_dichLabels(shapes_prod, 'parity_prod_smaller_than_9')
dnn_prod = Sequential([
Dense(w_1, input_shape=(w_in,)),
Activation('tanh'),
Dense(w_2),
Activation('tanh'),
Dense(4),
Activation('tanh')
])
model = Model(dnn_prod)
# Train the model, iterating on the data in batches of 32 samples
model.fit(shapes_18, dich_name='parity_prod_smaller_than_9', epochs=10, batch_size=32)
model.evaluate(shapes_18, dich_name='parity_prod_smaller_than_9', batch_size=128)
%matplotlib widget
model.sample_eval(shapes_18, 2)
%matplotlib widget
pca = PCA(n_components=2)
reduced_prod_2d, fig3 = model.get_repr(shapes_18, pca, dim=2, plotting=True)
fig3.canvas.layout.width = '80%'
fig3.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_prod_2d):
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]), 100*red['expl_var'][0], 100*red['expl_var'][1]))
pca3 = PCA(n_components=3)
reduced_prod_3d, fig4 = model.get_repr(shapes_18, pca3, dim=3, plotting=True)
fig4.canvas.layout.width = '80%'
fig4.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_prod_3d):
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}% + {4:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]+red['expl_var'][2]), 100*red['expl_var'][0], 100*red['expl_var'][1], 100*red['expl_var'][2]))
###Output
_____no_output_____
###Markdown
2.3) Compact hstacking supervision
###Code
shapes_18.compstack_dichs('parity', 'smaller_than_9')
dnn_compstack = Sequential([
Dense(w_1, input_shape=(w_in,)),
Activation('tanh'),
Dense(w_2),
Activation('tanh'),
Dense(2),
Activation('tanh')
])
compstack = Model(dnn_compstack)
# Train the model, iterating on the data in batches of 32 samples
compstack.fit(shapes_18, dich_name='parity_compstack_smaller_than_9', epochs=10, batch_size=32)
compstack.evaluate(shapes_18, dich_name='parity_compstack_smaller_than_9', batch_size=128)
%matplotlib widget
pca = PCA(n_components=2)
reduced_compstack_2d, fig5 = compstack.get_repr(shapes_18, pca, dim=2, plotting=True)
fig5.canvas.layout.width = '80%'
fig5.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_compstack_2d):
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]), 100*red['expl_var'][0], 100*red['expl_var'][1]))
pca3 = PCA(n_components=3)
reduced_compstack_3d, fig6 = compstack.get_repr(shapes_18, pca3, dim=3, plotting=True)
fig6.canvas.layout.width = '80%'
fig6.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_compstack_3d):
if red['avg_lbl_repr'].shape[1] > 2:
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}% + {4:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]+red['expl_var'][2]), 100*red['expl_var'][0], 100*red['expl_var'][1], 100*red['expl_var'][2]))
else:
print('Layer {0:d} could not be projected in 3d'.format(lay_id))
###Output
_____no_output_____
###Markdown
3) Testing representation for categorical features 3.1) hstack
###Code
# Currently dichotomies will only be binary
shapes_range = [range(0,6), range(6,12), range(12,18)]
shapes_18.build_dichLabels(shapes_range, 'range_bins')
shapes_18.hstack_dichs('parity', 'range_bins')
shapes_18.compstack_dichs('parity', 'range_bins')
shapes_18.build_catLabels(shapes_range, 'cat_range_bins')
shapes_18.compstack_dichs('parity', 'cat_range_bins')
dnn_ctg_hstack = Sequential([
Dense(w_1, input_shape=(w_in,)),
Activation('tanh'),
Dense(w_2),
Activation('tanh'),
Dense(5),
Activation('tanh')
])
ctg_hstack = Model(dnn_ctg_hstack)
# Train the model, iterating on the data in batches of 32 samples
ctg_hstack.fit(shapes_18, dich_name='parity_hstack_range_bins', epochs=20, batch_size=32)
ctg_hstack.evaluate(shapes_18, dich_name='parity_hstack_range_bins', batch_size=128)
pca = PCA(n_components=2)
reduced_ctg_hstack_2d, fig7 = ctg_hstack.get_repr(shapes_18, pca, plotting=True)
fig7.canvas.layout.width = '80%'
fig7.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_ctg_hstack_2d):
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]), 100*red['expl_var'][0], 100*red['expl_var'][1]))
pca3 = PCA(n_components=3)
reduced_ctg_hstack_3d, fig8 = ctg_hstack.get_repr(shapes_18, pca3, dim=3, plotting=True)
fig8.canvas.layout.width = '80%'
fig8.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_ctg_hstack_3d):
if red['avg_lbl_repr'].shape[1] > 2:
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}% + {4:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]+red['expl_var'][2]), 100*red['expl_var'][0], 100*red['expl_var'][1], 100*red['expl_var'][2]))
else:
print('Layer {0:d} could not be projected in 3d'.format(lay_id))
###Output
_____no_output_____
###Markdown
3.2) Product
###Code
shapes_ctg_prod = [set(s1).intersection(set(s2)) for s2 in shapes_range for s1 in shapes_parity]
shapes_18.build_dichLabels(shapes_ctg_prod, 'parity_prod_range_bins')
w_in =shapes_18.tot_dim
w_1 = 100
w_2 = 100
max_epochs = 400
dnn_ctg_prod = Sequential([
Dense(w_1, input_shape=(w_in,)),
Activation('tanh'),
Dense(w_2),
Activation('tanh'),
Dense(6),
Activation('tanh')
])
ctg_prod = Model(dnn_ctg_prod)
# Train the model, iterating on the data in batches of 32 samples
ctg_prod.fit(shapes_18, dich_name='parity_prod_range_bins', epochs=10, batch_size=32)
ctg_prod.evaluate(shapes_18, dich_name='parity_prod_range_bins', batch_size=128)
pca = PCA(n_components=2)
reduced_ctg_prod_2d, fig9 = ctg_prod.get_repr(shapes_18, pca, plotting=True)
fig9.canvas.layout.width = '80%'
fig9.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_ctg_prod_2d):
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]), 100*red['expl_var'][0], 100*red['expl_var'][1]))
pca3 = PCA(n_components=3)
reduced_ctg_prod_3d, fig10 = ctg_prod.get_repr(shapes_18, pca3, dim=3, plotting=True)
fig10.canvas.layout.width = '80%'
fig10.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_ctg_prod_3d):
if red['avg_lbl_repr'].shape[1] > 2:
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}% + {4:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]+red['expl_var'][2]), 100*red['expl_var'][0], 100*red['expl_var'][1], 100*red['expl_var'][2]))
else:
print('Layer {0:d} could not be projected in 3d'.format(lay_id))
###Output
_____no_output_____
###Markdown
3.3) compstack
###Code
w_in =shapes_18.tot_dim
w_1 = 100
w_2 = 100
max_epochs = 400
dnn_ctg_compstack = Sequential([
Dense(w_1, input_shape=(w_in,)),
Activation('tanh'),
Dense(w_2),
Activation('tanh'),
Dense(2),
Activation('tanh')
])
ctg_compstack = Model(dnn_ctg_compstack)
# Train the model, iterating on the data in batches of 32 samples
ctg_compstack.fit(shapes_18, dich_name='parity_compstack_cat_range_bins', epochs=10, batch_size=32)
ctg_compstack.evaluate(shapes_18, dich_name='parity_compstack_cat_range_bins', batch_size=128)
%matplotlib widget
pca = PCA(n_components=2)
reduced_ctg_compstack_2d, fig11 = ctg_compstack.get_repr(shapes_18, pca, plotting=True)
fig11.canvas.layout.width = '80%'
fig11.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_ctg_compstack_2d):
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]), 100*red['expl_var'][0], 100*red['expl_var'][1]))
pca3 = PCA(n_components=3)
reduced_ctg_compstack_3d, fig12 = ctg_compstack.get_repr(shapes_18, pca3, dim=3, plotting=True)
fig12.canvas.layout.width = '80%'
fig12.canvas.layout.height = '5000px'
for lay_id, red in enumerate(reduced_ctg_compstack_3d):
if red['avg_lbl_repr'].shape[1] > 2:
print('Layer {0:d} - {1:.1f}% 2d var - {2:.1f}% + {3:.1f}% + {4:.1f}%'.format(lay_id, 100*(red['expl_var'][0]+red['expl_var'][1]+red['expl_var'][2]), 100*red['expl_var'][0], 100*red['expl_var'][1], 100*red['expl_var'][2]))
else:
print('Layer {0:d} could not be projected in 3d'.format(lay_id))
###Output
_____no_output_____ |
Chapter04/gradflow/notebooks/mlflow_run_keras.ipynb | ###Markdown
Import dependencies
###Code
import pandas
import numpy as np
import mlflow
import tensorflow
from tensorflow import keras
import mlflow.keras
from sklearn.metrics import f1_score,confusion_matrix
from sklearn.model_selection import train_test_split
###Output
_____no_output_____
###Markdown
Retrieve Data
###Code
pandas_df = pandas.read_csv("training_data.csv")
X=pandas_df.iloc[:,:-1]
Y=pandas_df.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.33, random_state=4284, stratify=Y)
###Output
_____no_output_____
###Markdown
Set Experiment
###Code
mlflow.set_experiment("Baseline_Predictions")
mlflow.tensorflow.autolog()
###Output
INFO: 'Baseline_Predictions' does not exist. Creating a new experiment
###Markdown
Create Model
###Code
model = keras.Sequential([
keras.layers.Dense(
units=36,
activation='relu',
input_shape=(X_train.shape[-1],)
),
keras.layers.BatchNormalization(),
keras.layers.Dense(units=1, activation='sigmoid'),
])
model.compile(
optimizer=keras.optimizers.Adam(lr=0.001),
loss="binary_crossentropy",
metrics="Accuracy"
)
###Output
/opt/conda/lib/python3.7/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py:375: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead.
"The `lr` argument is deprecated, use `learning_rate` instead.")
###Markdown
Run the Model
###Code
with mlflow.start_run(run_name='keras_model_baseline') as run:
model.fit(
X_train,
y_train,
epochs=20,
validation_split=0.05,
shuffle=True,
verbose=0
)
preds = model.predict(X_test)
y_pred = np.where(preds>0.5,1,0)
f1 = f1_score(y_test, y_pred)
mlflow.log_metric(key="f1_experiment_score", value=f1)
###Output
2021/07/02 19:27:52 INFO mlflow.utils.autologging_utils: tensorflow autologging will track hyperparameters, performance metrics, model artifacts, and lineage information for the current tensorflow workflow to the MLflow run with ID '0fb5bdf74e564135bdd037b4c341aabd'
|
Homework/Homework_2.ipynb | ###Markdown
Homework 2: Climate Change TOC There have been many studies documenting that the average global temperature has been increasing over the last century. The consequences of a continued rise in global temperature will be dire. Rising sea levels and an increased frequency of extreme weather events will affect billions of people. In this problem, you will attempt to study the relationship between average global temperature and several other factors. The file climate_change_1.csv contains climate data from May 1983 to December 2008. The available variables include: `Year`: the observation year. `Month`: the observation month. `Temp`: the difference in degrees Celsius between the average global temperature in that period and a reference value. This data comes from the [Climatic Research Unit at the University of East Anglia](https://crudata.uea.ac.uk/cru/data/temperature/) . `CO2`, `N2O`, `CH4`, `CFC.11`, `CFC.12`: atmospheric concentrations of carbon dioxide ($CO_2$), nitrous oxide ($N_2O$), methane ($CH_4$), trichlorofluoromethane ($CCl_3F$commonly referred to as $CFC-11$) and dichlorodifluoromethane ($CCl_2F_2$; commonly referred to as $CFC-12$), respectively. This data comes from the [ESRL/NOAA Global Monitoring Division](http://www.esrl.noaa.gov/gmd/ccgg/data-products.html). `CO2`, `N2O` and `CH4` are expressed in ppmv (parts per million by volume -- i.e., *397 ppmv of CO2 means that CO2 constitutes 397 millionths of the total volume of the atmosphere*) `CFC.11` and `CFC.12` are expressed in ppbv (parts per billion by volume). `Aerosols`: the mean stratospheric aerosol optical depth at 550 nm. This variable is linked to volcanoes, as volcanic eruptions result in new particles being added to the atmosphere, which affect how much of the sun's energy is reflected back into space. This data is from the [Godard Institute for Space Studies at NASA](https://data.giss.nasa.gov/modelforce/strataer/). `TSI`: the total solar irradiance (TSI) in W/m2 (the rate at which the sun's energy is deposited per unit area). Due to sunspots and other solar phenomena, the amount of energy that is given off by the sun varies substantially with time. This data is from the [SOLARIS-HEPPA project website](https://solarisheppa.geomar.de/solarisheppa/cmip5). `MEI`: multivariate El Nino Southern Oscillation index (MEI), a measure of the strength of the [El Nino/La Nina-Southern Oscillation](http://en.wikipedia.org/wiki/El_nino) (a weather effect in the Pacific Ocean that affects global temperatures). This data comes from the [ESRL/NOAA Physical Sciences Division](http://www.esrl.noaa.gov/psd/enso/mei/table.html). ___ Preparation Import data
###Code
import pandas as pd
df1 = pd.read_csv('./data/climate_change_1.csv')
df2 = pd.read_csv('./data/climate_change_2.csv')
###Output
_____no_output_____
###Markdown
Exploration and cleaning Data structure
###Code
df1.head().round()
df2.head().round(2)
###Output
_____no_output_____
###Markdown
Statistics The most significant difference is the variable `NO`. Then explore **basic statistics** with round three:
###Code
df1.describe().round(3)
df2.describe().round(3)
###Output
_____no_output_____
###Markdown
Missing data
###Code
df1.info()
df2.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 308 entries, 0 to 307
Data columns (total 12 columns):
Year 308 non-null int64
Month 308 non-null int64
MEI 308 non-null float64
CO2 308 non-null float64
CH4 308 non-null float64
N2O 308 non-null float64
CFC-11 308 non-null float64
CFC-12 308 non-null float64
TSI 308 non-null float64
Aerosols 308 non-null float64
NO 308 non-null float64
Temp 308 non-null float64
dtypes: float64(10), int64(2)
memory usage: 29.0 KB
###Markdown
No missing data were found, then continue. Duplication
###Code
print('Duplicated rows:', len(df1[df1.duplicated()]), ', then continue.')
###Output
Duplicated rows: 0 , then continue.
###Markdown
Outliers Conduct a boxploting to find out outliers in DF1 and DF2:
###Code
import sys
import seaborn as sns
import matplotlib.pyplot as plt
fig1 = plt.figure(figsize=(12,8), dpi=96)
for i in range(1, len(df1.columns) + 1):
fig1.add_subplot(3, 4, i)
df1.iloc[:, [i-1]].boxplot()
range(1, len(df2.columns) + 1)
fig2 = plt.figure(figsize=(12,8), dpi=96)
for i in range(1, len(df2.columns) + 1):
fig2.add_subplot(3, 4, i)
df2.iloc[:, [i-1]].boxplot()
###Output
_____no_output_____
###Markdown
Check outliers:
###Code
import ipywidgets as widgets
z_slider = widgets.FloatSlider(
value=2.9,
min=2,
max=3.5,
step=0.1,
description='Threshold:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format='.1f',
)
z_slider
from scipy import stats
import numpy as np
z = np.abs(stats.zscore(df1['MEI']))
outlier_index = np.where(z > z_slider.value)[0]
print('Threshhold:', z_slider.value)
print('Index:', outlier_index)
print('Outlier:', [df1['MEI'][i] for i in outlier_index])
###Output
Threshhold: 2.9
Index: [171 172]
Outlier: [3.0010000000000003, 3.0]
###Markdown
Since rare outliers, ignore at preparation step and continue. Correlation Find and plot highly correlated variables (r>0.6 in df1, plotting r>0.5):
###Code
corr = df1.corr()
high_corr = corr[np.abs(corr) > 0.5].fillna(0)
corr[np.abs(corr) > 0.6].fillna('')
plt.figure(dpi=128)
ax = sns.heatmap(
high_corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
###Output
_____no_output_____
###Markdown
Similarly, correlation in df1:
###Code
corr = df2.corr()
high_corr = corr[np.abs(corr) > 0.5].fillna(0)
corr[np.abs(corr) > 0.6].fillna('')
plt.figure(dpi=128)
ax = sns.heatmap(
high_corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
###Output
_____no_output_____
###Markdown
**Potential redundant variables found**, however, now the data is prepared for analyzing. ____ Problem 1 — First Model*We are interested in how changes in these variables affect future temperatures, as well as how well these variables explain temperature changes so far. To do this, first read the dataset climate_change_1.csv into Python or Matlab*. *Then, split the data into a training set, consisting of all the observations up to and including 2006, and a testing set consisting of the remaining years. A training set refers to the data that will be used to build the model, and a testing set refers to the data we will use to test our predictive ability*. *After seeing the problem, your classmate Alice immediately argues that we can apply a linear regression model. Though being a little doubtful, you decide to have a try. To solve the linear regression problem, you recall the linear regression has a closed form solution*: $$\theta = (X^TX)^{-1}X^TY$$ Read and split Though data have been prepared in section *Data Preparation*, dataset df1 has been imported again here following problem description. **Read the dataset**:
###Code
# loaded in exploration
# import pandas as pd
# df1 = pd.read_csv('../data/climate_change_1.csv').iloc[:,2:]
###Output
_____no_output_____
###Markdown
**Split into training set and testing set**:
###Code
# Df1 trainset
df1_train = df1[df1['Year']<=2006].iloc[:,2:]
# Check the result
df1_train.iloc[[0, 1,-2, -1],:]
# Df1 testet
df1_test = df1[df1['Year']>2006].iloc[:,2:]
# Check the result
df1_test.iloc[[0, 1,-2, -1],:]
###Output
_____no_output_____
###Markdown
1. Closed form function*Implement a function `closed_form_1` that computes this closed form solution given the features X, labels y (using Python or Matlab).* Given a pandas `Dataframe`, the features X is the dataframe excluding taget y, then:
###Code
import numpy as np # matrix, vector, and linear algebra support
from numpy.linalg import inv # matrix inversion
def closed_form_1(X: np.ndarray, y: np.ndarray) -> np.matrix:
"""
To calculate OLS theta(s) given X, y in ndarrays.
Parameters:
----------
X: features, IV.
y: taget variable, DV.
Return:
----------
theta: coefficients
"""
X = np.column_stack((np.ones(len(X)), X)) # add x0 = 1 to matrix X
theta = inv(X.T @ X) @ X.T @ y
#theta = theta[1:].reshape((1,10))
return theta
def closed_form_df(df: pd.core.frame.DataFrame, column: int = 8) -> np.matrix:
"""
To calculate OLS theta(s) given data in a DataFrame.
Parameters:
----------
df: a DataFrame of data including both IV X and DV y.
column = 8: index number of clomn where DV y lies. The default value is 8.
Return:
----------
theta: coefficients
"""
X = df.drop(df.columns[column], axis=1).to_numpy() # X: the features
X = np.column_stack((np.ones(len(X)), X)) # add x0 = 1 to matrix X
y = df.iloc[:, [column]].to_numpy(
) # y: the results, lower case to emphasize the difference
theta = inv(X.T @ X) @ X.T @ y
#theta = theta[1:].reshape((1,10))
return theta
###Output
_____no_output_____
###Markdown
Test `closed_form_1` and `closed_form_df` on df1:
###Code
df1_train.drop(df1_train.columns[8], axis=1).head(3)
# Given X, and y in numpy arrays
X = df1_train.drop(df1_train.columns[8], axis=1).to_numpy() # X: the features
y = df1_train.iloc[:, [8]].to_numpy() # y: the results, lower case to emphasize the difference
X_test = df1_test.drop(df1_train.columns[8], axis=1).to_numpy()
y_test = df1_test.iloc[:, [8]].to_numpy()
theta = closed_form_1(X, y)
theta.flatten()
# Given a DataFrame
theta = closed_form_df(df1_train).reshape((1,9))
theta.flatten()
###Output
_____no_output_____
###Markdown
Using *scipy* to check the result:
###Code
from sklearn.linear_model import LinearRegression as lm
l=lm().fit(X, y)
l.coef_.flatten()
###Output
_____no_output_____
###Markdown
Works fine (some differences due to SVD used in *sklearn.LinearRegression*). 2. Fomula and R squre*Write down the mathematical formula for the linear model and evaluate the model R squre on the training set and the testing set.*
###Code
df1_train.columns
###Output
_____no_output_____
###Markdown
**Formula of this model**(`round(5)`)$$\hat{Temp}=-124.594+0.06421*MEI +0.00646*CO_2+0.00012*CH_4-0.01653*N_2O-0.00663*CFC11+0.00381*CFC12+0.09314*TSI-1.53761*Aerosols$$ **Formula of R-squred** R-squared measures model fitting and can be calculated as:$$R^2 = \frac{var(X\hat{\beta})}{var(y)} = \frac{\sum_{i=1}^{n}(\hat{y}_i-\bar{y})^2}{\sum_{i=1}^{n}(y_i-\bar{y})^2}$$
###Code
def predict(X: np.ndarray, theta: np.ndarray) -> np.ndarray:
"""
To predict y given X and theta.
Parameters:
----------
X: features, IV.
theta: coefficients.
Return:
----------
y_hat: predicted value.
"""
X = np.column_stack((np.ones(len(X)), X)) # add x0 = 1 to matrix X
# theta = theta.reshape((1, len(theta)))
y_hat = np.sum(X @ theta, axis=1)
return (y_hat)
###Output
_____no_output_____
###Markdown
Define a `score` function to calculate $R^2$:
###Code
def score(y: np.ndarray, y_hat: np.ndarray) -> float:
"""
To calculate OLS R^2 given data in ndarrays.
Parameters:
----------
y: actual labels.
y_hat: predicted values.
Return:
----------
SST: R^2 caculated based on y and y_hat.
"""
mean = y.mean()
TSS = np.sum(np.square(y_hat - mean))
ESS = np.sum(np.square(y - mean))
SST = TSS / ESS
return SST
###Output
_____no_output_____
###Markdown
On training set:
###Code
X = df1_train.drop(df1_train.columns[8], axis=1).to_numpy()
y = df1_train.iloc[:, [8]].to_numpy()
rsquare_train = score(y, predict(X, closed_form_1(X, y)))
print("R2:", rsquare_train)
# Use *scipy* to check the result:
l=lm().fit(X, y)
print("R2 by scipy:", l.score(X, y))
###Output
R2: 0.7508932770388383
R2 by scipy: 0.7508932770523428
###Markdown
On testing set:
###Code
rsquare_test = score(y_test, predict(X_test, closed_form_1(X, y)))
print("R2:", rsquare_test)
###Output
R2: 0.22517701916249677
###Markdown
Works fine. **Evaluation** Based on the formula above, R-squred can be applied in Python to evaluate previous model. On training set: $R^2$ is 0.75089, while on testing set, $R^2$ is 0.22518. \*\*\* *However, for a multi-variable linear model, $R^{2}_{adjusted}$ may be a better indicator because the original $R^{2}$ is sensitive to the number of features.* 3. Significant variables*Which variables are significant in the model?*
###Code
import statsmodels.api as sm
# set an alpha
alpha = 0.05
X2 = sm.add_constant(X)
l = sm.OLS(y, X2).fit()
pvalues = l.summary2().tables[1]['P>|t|']
labels = ['x0: constant'] + ["x" + str(i+1) + ": " + df1_train.columns[i] for i in range(len(df1_train.columns)-1)]
variables = pd.DataFrame(np.concatenate([pd.DataFrame(labels), pd.DataFrame(pvalues)], axis=1))
variables.columns = ['Variable', 'pvalues']
# print significant variables
variables[variables.pvalues < alpha]
###Output
_____no_output_____
###Markdown
That's to say, significant(alpha=0.05) varibles are:
###Code
[i for i in variables[variables.pvalues < alpha].Variable.to_numpy()]
###Output
_____no_output_____
###Markdown
4. Necessary conditions and application*Write down the necessary conditions for using the closed form solution. And you can apply it to the dataset climate_change_2.csv, explain the solution is unreasonable.* **Necessary conditions** $X^TX$ must be invertible.
###Code
df2.head(3)
# Df2 trainset
df2_train = df2[df2['Year']<=2006].iloc[:,2:]
# Check the result
df2_train.iloc[[0, 1,-2, -1],:]
# Df2 testet
df2_test = df2[df2['Year']>2006].iloc[:,2:]
# Check the result
df2_test.iloc[[0, 1,-2, -1],:]
# Given X, and y in numpy arrays
X_2 = df2_train.drop(df2_train.columns[9], axis=1).to_numpy() # X: the features
y_2 = df2_train.iloc[:, [9]].to_numpy() # y: the results, lower case to emphasize the difference
X_2_test = df2_test.drop(df2_test.columns[9], axis=1).to_numpy()
y_2_test = df2_test.iloc[:, [9]].to_numpy()
theta = closed_form_1(X_2, y_2)
theta.flatten()
###Output
_____no_output_____
###Markdown
**Why unreasonable:** Because $X^TX$ is non-invertible. According to [Andrew NG](https://www.coursera.org/learn/machine-learning/supplement/66bi5/normal-equation-noninvertibility), >When implementing the normal equation in octave we want to use the `pinv` function rather than `inv`. The 'pinv' function will give you a value of \thetaθ even if $X^TX$ is not invertible. >If $X^TX$ is noninvertible, the common causes might be having :> - **Redundant features**, where two features are very closely related (i.e. they are linearly dependent)> - **Too many features** (e.g. m ≤ n). In this case, delete some features or use "regularization" (to be explained in a later lesson). >>Solutions to the above problems include deleting a feature that is linearly dependent with another or deleting one or more features when there are too many features. In this case, many variables (as mentioned in the first section exploration) are highly correlated. ___ Problem 2 — Regularization*Regularization is a method to boost robustness of model, including L1 regularization and L_2 regularization.* 1. Loss function*Please write down the loss function for linear model with L1 regularization, L2regularization, respectively.* **L1, Lasso Regression:**$$J\left( \theta \right)=\frac{1}{2m}[\sum\limits_{i=1}^{m}{{{({h_\theta}({{x}^{(i)}})-{{y}^{(i)}})}^{2}}+\lambda \sum\limits_{j=1}^{n}{|\theta _{j}|}]}$$ **L2, Ridge Regression:**$$J\left( \theta \right)=\frac{1}{2m}[\sum\limits_{i=1}^{m}{{{({h_\theta}({{x}^{(i)}})-{{y}^{(i)}})}^{2}}+\lambda \sum\limits_{j=1}^{n}{\theta _{j}^{2}}]}$$ 2. Compute and regularization*The closed form solution for linear model with L2 regularization:*$$𝛉 = (𝐗^𝐓𝐗 + 𝛌𝐈)^{−𝟏}𝐗^𝐓𝐘$$*where I is the identity matrix. Write a function closed_form_2 that computes thisclosed form solution given the features X, labels Y and the regularizationparameter λ (using Python or Matlab).*
###Code
def closed_form_2(X: np.ndarray, y: np.ndarray, lambd: float) -> np.ndarray:
"""
To calculate OLS theta(s) given X, y in ndarrays.
Parameters:
----------
X: features, IV.
y: taget variable, DV.
lambd: regularization parameter
Return:
----------
theta: coefficients
"""
X = np.concatenate([np.ones((len(X), 1)), X], axis=1) # add x0 = 1 to matrix X
I = np.identity(len(X[0]))
theta = inv(X.T @ X + lambd * I) @ (X.T @ y)
return theta
###Output
_____no_output_____
###Markdown
3. Comparison*Compare the two solutions in problem 1 and problem 2 and explain the reason why linear model with L2 regularization is robust. (using climate_change_1.csv)*
###Code
X_train = df1_train.drop(df1_train.columns[8], axis=1).to_numpy()
y_train = df1_train.iloc[:, [8]].to_numpy()
theta_0 = closed_form_1(X_train, y_train)
theta_0.flatten()
theta_2 = closed_form_2(X_train, y_train, 0.5)
theta_2.flatten()
rsquare_test_theta_0 = score(y_test, predict(X_test, theta_0))
rsquare_test_theta_2 = score(y_test, predict(X_test, theta_2))
print("R2:", rsquare_test_theta_0, rsquare_test_theta_2)
###Output
R2: 0.22517701916249677 0.8022366128860432
###Markdown
Obviously, theta_2, which is the result of ridge regression, is much better due to the lower effect of redundant variables. 4. Change λ*You can change the regularization parameter λ to get different solutions for this problem. Suppose we set λ = 10, 1, 0.1, 0.01, 0.001, and please evaluate the model* $R^2$ *on the training set and the testing set.*
###Code
from sklearn.metrics import mean_squared_error as mse
# Define constants
X_train = df1_train.drop(df1_train.columns[8], axis=1).to_numpy()
y_train = df1_train.iloc[:, [8]].to_numpy()
X_test = df1_test.drop(df1_test.columns[8], axis=1).to_numpy()
y_test = df1_test.iloc[:, [8]].to_numpy()
lambds = [10.00, 1.000, 0.10, 0.01, 0.001]
print("R scores comparison")
# print("λ Training R2 Testing R2 Testing MSE")
print("λ Training R2 Testing R2")
for lambd in lambds:
theta = closed_form_2(X_train, y_train, lambd)
rsquare_train = score(y_train, predict(X_train, theta))
rsquare_test = score(y_test, predict(X_test, theta))
# meanse = mse(y_test, predict(X_test, theta))
# print(lambd, " ", rsquare_train.round(5), " ", rsquare_test.round(5), " ", meanse.round(5))
print(lambd, " ", rsquare_train.round(5), " ", rsquare_test.round(5))
###Output
R scores comparison
λ Training R2 Testing R2
10.0 0.67461 0.94087
1.0 0.67947 0.84675
0.1 0.69447 0.67329
0.01 0.71165 0.58528
0.001 0.71483 0.56252
###Markdown
*Finally, please decide the best regularization parameter λ. (Note that: As a qualified data analyst, you must know how to choose model parameters, please learn about cross validation methods.)*
###Code
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import train_test_split
def cross_validation(X, y, alpha=[1e1, 1, 1e-1, 1e-2, 1e-3]):
"""
Using k-fold to get optimal value of lambda based on R-squared.
Parameters:
----------
X: features, IV.
y: taget variable, DV.
Return:
----------
alpha: best lambda(alpha in sklearn)
"""
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=0)
regressor = RidgeCV(alphas=alpha, store_cv_values=True)
regressor.fit(X_train, y_train)
cv_mse = np.mean(regressor.cv_values_, axis=0)
print(alpha)
print(cv_mse)
return regressor.alpha_
print('Optimal lamba should be ', cross_validation(X, y))
###Output
[10.0, 1, 0.1, 0.01, 0.001]
[[0.01058427 0.01013997 0.00905723 0.00881546 0.00881876]]
Optimal lamba should be 0.01
###Markdown
___ Problem 3 — Feature Selection 1. Lesser variables*From Problem 1, you can know which variables are significant, therefore you can use less variables to train model. For example, remove highly correlated and redundant features. You can propose a workflow to select feature.* As mentioned in the first section and known siginificant variables(MEI, CO2, CDC-11, CDC-12, TST, Aerocols), a new correlation matrix can be introduced:
###Code
corr = df1[['MEI', 'CO2', 'CFC-11', 'CFC-12', 'TSI', 'Aerosols' ]].corr()
high_corr = corr[np.abs(corr) > 0.5].fillna(0)
corr[np.abs(corr) > 0.6].fillna('')
plt.figure(dpi=96)
ax = sns.heatmap(
high_corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
###Output
_____no_output_____
###Markdown
Thus, CFC-12 should also be removed(r>0.6) then we have:
###Code
corr = df1[['MEI', 'CO2', 'CFC-11', 'TSI', 'Aerosols' ]].corr()
high_corr = corr[np.abs(corr) > 0.5].fillna(0)
corr[np.abs(corr) > 0.6].fillna('')
plt.figure(dpi=96)
ax = sns.heatmap(
high_corr,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
);
###Output
_____no_output_____
###Markdown
Now no redundant variables left. 2. A better model*Train a better model than the model in Problem 2.*
###Code
X_lesser = df1_train[['MEI', 'CO2', 'CFC-11', 'TSI', 'Aerosols' ]].to_numpy() # X: the features
y_lesser = df1_train.iloc[:, [8]].to_numpy() # y: the results, lower case to emphasize the difference
X_test = df1_test[['MEI', 'CO2', 'CFC-11', 'TSI', 'Aerosols' ]].to_numpy()
y_test = df1_test.iloc[:, [8]].to_numpy()
#theta_lesser = closed_form_1(X_lesser, y_lesser)
theta_lesser = closed_form_2(X_lesser, y_train, cross_validation(X_lesser,y_lesser))
theta_lesser = np.array(theta_lesser)
formula = [str(theta_lesser.round(5).tolist()[i][0]) + ' * x' + str(i) + ' + ' for i in range(0, len(theta_lesser.round(5).tolist()))]
print('Thus our better model is: \ny = '+ ' '.join(formula).replace(' * x0', '')[:-3])
###Output
[10.0, 1, 0.1, 0.01, 0.001]
[[0.01116522 0.01070773 0.00956511 0.0093032 0.00930385]]
Thus our better model is:
y = -0.25903 + 0.05314 * x1 + 0.01136 * x2 + 0.0001 * x3 + -0.00265 * x4 + -1.28616 * x5
###Markdown
Significance:
###Code
l = sm.OLS(y_lesser, X_lesser).fit()
pvalues = l.summary2().tables[1]['P>|t|']
pvalues < 0.05
###Output
_____no_output_____
###Markdown
Then remove x3 based on the new result:
###Code
X_lesser = df1_train[['MEI', 'CO2', 'TSI', 'Aerosols' ]].to_numpy() # X: the features
y_lesser = df1_train.iloc[:, [8]].to_numpy() # y: the results, lower case to emphasize the difference
X_test = df1_test[['MEI', 'CO2', 'TSI', 'Aerosols' ]].to_numpy()
y_test = df1_test.iloc[:, [8]].to_numpy()
theta_lesser = closed_form_1(X_lesser, y_train)
theta_lesser = np.array(theta_lesser)
formula = [str(theta_lesser.round(5).tolist()[i][0]) + ' * x' + str(i) + ' + ' for i in range(0, len(theta_lesser.round(5).tolist()))]
print('Thus our better model is: \n\ny = '+ ' '.join(formula).replace(' * x0', '')[:-3])
l = sm.OLS(y_lesser, X_lesser).fit()
pvalues = l.summary2().tables[1]['P>|t|']
pvalues < 0.05
###Output
_____no_output_____
###Markdown
R2:
###Code
rsquare_train = score(y_lesser, predict(X_lesser, theta_lesser))
rsquare_test = score(y_test, predict(X_test, theta_lesser))
print(('R2\nTraining: {}\nTesting: {}').format(rsquare_train, rsquare_test))
###Output
R2
Training: 0.7336403428986276
Testing: 0.6328867941215394
###Markdown
___ Problem 4 — Gradient Descent*Gradient descent algorithm is an iterative process that takes us to the minimum of a function. Please write down the iterative expression for updating the solution of linear model and implement it using Python or Matlab in gradientDescent function.* Cost and gradient functions
###Code
def normalize(mtx: np.matrix, method="std"):
"""
To normalize a matrix
Parameters:
----------
mtx: matrix
Return:
----------
normalized matrix
"""
return (mtx - np.mean(mtx)) / np.std(mtx) # Normalization for faster convergence
def costFunction(X: np.matrix, y: np.matrix, theta: np.ndarray) -> float:
"""
To calculate cost given X, y, and theta in ndarrays.
Parameters:
----------
X: features, IV.
y: taget variable, DV.
theta: coefficients
Return:
----------
cost: calculated cost
"""
# print(X.shape, np.array(theta).shape, y.shape) # for debugging
m = len(y_train) # no. of training samples
temp = X @ theta - y
return np.sum(np.power(temp, 2)) / (2 * m)
def gradientDescent(X: np.matrix,
y: np.matrix,
theta: np.ndarray,
alpha: float = 0.001,
iterations: int = 10000,
norm: bool = True) -> np.ndarray:
"""
To find optimal theta given X, y, theta in ndarrays and alpha, iters in float.
Parameters:
----------
X: features, IV.
y: taget variable, DV.
theta: initial coefficients
alpha: learning rate, default by 0.001
iterations: an assigned number of iterations
norm: nomalization or not, default by True
Return:
----------
theta: np.matrix, final theta
J_history: np.ndarray, cost history
"""
X = (X, normalize(X))[norm] # normalization
# print(X.shape, np.array(theta).shape, y.shape)
m = len(y)
J_history = []
_theta = theta.copy()
for i in range(iterations):
error = X.T @ (X @ _theta - y)
_theta -= alpha * 1 / m * error
J_history.append(costFunction(X, y, _theta))
# print(_theta, J_history)
return _theta, J_history
###Output
_____no_output_____
###Markdown
Datasets
###Code
features = ["MEI", "CO2", "CH4", "N2O", "CFC-11", "CFC-12", "TSI", "Aerosols"] # Features
target = ["Temp"] # taget
# Splitting into training and testing
year = 2006 # Specific year for splitting
train, test= df1[df1['Year'] <= year], df1[df1['Year'] > year]
X_train, X_test = train.get(features), test.get(features)
X_train, X_test = np.column_stack((np.ones(len(X_train)), X_train)), np.column_stack((np.ones(len(X_test)), X_test))
y_train, y_test = train.get(target), test.get(target)
X_train, X_test, y_train, y_test = np.mat(X_train), np.mat(X_test), np.mat(y_train), np.mat(y_test)
###Output
_____no_output_____
###Markdown
Parameters
###Code
# Define parameters
alpha = 0.01 # Learning rate
iterations = 300000 # The number of iterations
###Output
_____no_output_____
###Markdown
Run
###Code
# Initial
theta_init = np.zeros((X_train.shape[1], 1))
J_init = costFunction(X_train, y_train, theta_init)
print("Initial cost:", J_init.round(3))
## Gradient descent method (for normalized features)
result = gradientDescent(X_train, y_train, theta_init, alpha, iterations)
theta, J_history = result[0], pd.DataFrame(result[1])
J_history.plot.line()
J_final = float(J_history.round(3)[-1:][0])
print("Final cost:", J_final, "\nFinal theta(for normalized features):",
[i[0] for i in theta.round(3)])
###Output
Initial cost: 0.047
Final cost: 0.006
Final theta(for normalized features): [0.205, 0.367, 1.474, 0.588, 0.584, -1.649, 1.033, -0.32, 0.201]
###Markdown
Compare with theta(s)=0
###Code
comparison = {'Init': [J_init.round(2)],
'Final': [float(J_history[-1:][0])]}
pd.DataFrame(comparison).plot.bar()
###Output
_____no_output_____ |
_20210514_datablocks_exploration.ipynb | ###Markdown
Purpose Understand the basics of the DataBlocks API. RETROSPECTIVE: I learned that db.dataloaders(x) wants x to just be the unlabeled data (such as a list of filenames), NOT tuples of (x,y)!
###Code
from fastai.vision.all import *
nums = L(range(10))
chars = L('a b c d e f g h i j'.split())
dss = DataBlock().datasets(nums)
dss.train
dss.valid
dss = DataBlock().datasets((nums,chars))
dss.train
dss.valid
###Output
_____no_output_____
###Markdown
The above code shows that a correct dss was created when we passed in the list but not the tuple of lists.
###Code
dss = DataBlock().datasets(L(zip(nums,chars)))
dss.train
dss.valid
dss.train[0]
###Output
_____no_output_____
###Markdown
Now, my goal is to pass in get_x and get_y so that dss.train returns (4, 'e').
###Code
doc(Datasets)
L(zip(nums,chars))
def fx(x):
return x
def fxp(x):
print(x)
return x
dss = DataBlock(
get_x = fxp,
get_y = fx
).datasets(L(zip(nums,chars)))
dss.train
dss.valid
###Output
8
i
1
b
###Markdown
OK, I think I get it: when I call db.dataloaders(x), x must be the x data – NOT the data + labels!!! Let's test that.
###Code
num2char = {n:c for n,c in zip(nums,chars)}
def get_y(x): return num2char[x]
dss = DataBlock(get_y = get_y).datasets(nums)
dss.train
x,y = dss.train[0]
x,y
###Output
_____no_output_____ |
hoja_de_trabajo.ipynb | ###Markdown
Importamos datos del csv
###Code
dataset = pd.read_csv('prediccion_aceptacion.csv', usecols=[1,2,3,4,5,6,7,8])
#Por intuición nos deshacemos de la columna serial no porque no aporta al modelo
dataset.columns
dataset
###Output
_____no_output_____
###Markdown
Hacemos gráficos individuales de las variables
###Code
def plotear_variables(x_label, y_label, datos, tipo):
plt.plot(datos,tipo)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.show()
#Gráficas de todas las variables
plotear_variables('GRE SCORE', '', dataset['GRE Score'],'ro')
plotear_variables('TOEFL Score', '', dataset['TOEFL Score'],'ro')
plotear_variables('University Rating', '', dataset['University Rating'],'g^')
plotear_variables('SOP', '', dataset['SOP'],'ro')
plotear_variables('LOR', '', dataset['LOR '],'ro')
plotear_variables('CGPA', '', dataset['CGPA'],'g^')
plotear_variables('Research', '', dataset['Research'],'ro')
plotear_variables('Chance of Admit', '', dataset['Chance of Admit '],'ro')
###Output
_____no_output_____
###Markdown
Hacemos un heatmap con las correlaciones de las variables
###Code
#funcion extraída de: https://towardsdatascience.com/better-heatmaps-and-correlation-matrix-plots-in-python-41445d0f2bec
corr = dataset.corr()
plt.figure(figsize = (16,5))
ax = sns.heatmap(
corr,
vmin=0, vmax=1, center=0,
square=True,
annot=True,
linewidths = 1,
)
###Output
_____no_output_____
###Markdown
Se decide multiplicar las variables que tienen mas de 0.7 de correlación, para utilizarlas como una
###Code
TOELF_GRE_CGPA_SCORE = dataset['GRE Score']*dataset['TOEFL Score']*dataset['CGPA']
LOR_SOP = dataset['LOR ']*dataset['SOP']
###Output
_____no_output_____
###Markdown
Se hace un scatter plot para ver la relación con la variable respuesta
###Code
area = np.pi*3
plt.scatter(TOELF_GRE_CGPA_SCORE, dataset['Chance of Admit '], s=area, c="green", alpha=0.5)
plt.title('Gráfico de dispersión')
plt.xlabel('TOELF_GRE_CGPA_SCORE')
plt.ylabel('Chance of Admit')
plt.show()
plt.scatter(LOR_SOP, dataset['Chance of Admit '], s=area, c="green", alpha=0.5)
plt.title('Gráfico de dispersión')
plt.xlabel('LOR_SOP')
plt.ylabel('Chance of Admit')
plt.show()
###Output
_____no_output_____
###Markdown
Funciones provistas por profesor/trabajadas en clase
###Code
def linear_cost_derivate(X,
y,
theta,
llambda):
return np.matmul((np.matmul(X, theta)-y).T,X).T.sum()/X.shape[0] + theta.sum()*llambda/X.shape[0]
def linear_cost (X,
y,
theta,
llambda):
return ((np.matmul(X, theta)-y)**2).sum()*(1/2*X.shape[0]) + (theta ** 2).sum()*(llambda/2*X.shape[0])
def gradient_descent(
X,
y,
theta_0,
cost,
cost_derivate,
alpha=0.00001,
llambda=0.000001,
treshold=0.0001,
max_iter=10000):
costs = []
gradient_norms = []
theta, i = theta_0, 0
while np.linalg.norm(cost_derivate(X,y,theta,alpha)) > treshold and i <max_iter:
theta = theta - (alpha * cost_derivate(X,y,theta,alpha))
i +=1
costs.append(cost(X, y, theta,alpha))
gradient_norms.append(cost_derivate(X, y, theta, alpha))
return theta, costs, gradient_norms
###Output
_____no_output_____
###Markdown
Hacemos nuestro nuevo dataframe
###Code
new_dataset = pd.DataFrame({'TOELF_GRE_CGPA_SCORE':TOELF_GRE_CGPA_SCORE,'LOR_SOP': LOR_SOP})
new_dataset
xTrain, xTest, yTrain, yTest = train_test_split(new_dataset.iloc[:,:-1], new_dataset.iloc[:,-1], test_size = 0.4, random_state = 0)
xTest, crossx, yTest, crossy = train_test_split(xTest,yTest, test_size = 0.5, random_state = 0)
xTrain = xTrain.values
crossx = crossx.values
yTest = yTest.values
yTrain = yTrain.values.reshape(240,1)
crossy = crossy.values.reshape(80,1)
yTest = yTest.reshape(80,1)
print( xTrain.shape, yTrain.shape )
print( crossx.shape, crossy.shape )
print( xTest.shape, yTest.shape )
"""
Division de datos con numpy
msk = np.random.rand(len(new_dataset)) < 0.6
train_x = new_dataset[msk]
train_y = dataset['Chance of Admit '][msk]
cont,pond = train_y.shape
np.reshape(train_y,(cont,1))
test_x = new_dataset[~msk]
test_y = dataset['Chance of Admit '][~msk]
#test_y.reshape(len(test_y),1)
msk_2 = np.random.rand(len(test_x)) <= 0.5
cross_validationx = test_x[msk_2]
cross_validationy = test_y[~msk_2]
print( train_x.shape, train_y.shape )
print( test_x.shape, test_y.shape )
print( cross_validationx.shape, cross_validationy.shape )
"""
m, n = xTrain.shape
theta_0 = np.random.rand(n, 1)
theta, costs, gradient_norms = gradient_descent(
xTrain,
yTrain,
theta_0,
linear_cost,
linear_cost_derivate,
alpha=0.000000000001,
llambda=0.00001,
treshold=0.00001,
max_iter=10000000
)
plt.scatter(list(range(0,len(costs))),costs)
linear_cost(xTest,yTest,theta,0.001)
linear_cost(crossx,crossy, theta, 0.000001)
###Output
_____no_output_____ |
docs/_sources/dl/9444-project.ipynb | ###Markdown
Image classification  Implementation
###Code
"""
student.py
UNSW COMP9444 Neural Networks and Deep Learning
You may modify this file however you wish, including creating additional
variables, functions, classes, etc., so long as your code runs with the
hw2main.py file unmodified, and you are only using the approved packages.
You have been given some default values for the variables train_val_split,
batch_size as well as the transform function.
You are encouraged to modify these to improve the performance of your model.
The variable device may be used to refer to the CPU/GPU being used by PyTorch.
You may change this variable in the config.py file.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from math import ceil
import math
"""
Answer to Question:
Briefly describe how your program works, and explain any design and training
decisions you made along the way.
Task is to classify 14 different Simpsons Characters .
Firstly, EDA:
image is in grayscale, and 64*64
the distribution of classes is ploted via bar plot and is found to be a unbalanced dataset (523~2146).
"""
############################################################################
###### Specify transform(s) to be applied to the input images ######
############################################################################
def transform(mode):
"""
Called when loading the data. Visit this URL for more information:
https://pytorch.org/vision/stable/transforms.html
You may specify different transforms for training and testing
visulsation of transformation - https://pytorch.org/vision/stable/auto_examples/plot_transforms.html#sphx-glr-auto-examples-plot-transforms-py
since the dataset is relatively small, more transforms are used to avoid overfitting
"""
if mode == 'train':
return transforms.Compose(
[
transforms.Grayscale(num_output_channels=1),
transforms.Resize(32),
transforms.RandomPerspective(distortion_scale=0.3),
transforms.RandomCrop(32, padding=4),# Since cropping is done after padding, https://pytorch.org/vision/stable/transforms.html
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
]) # #1 channel so len = 1, keep range to [-1,1] more explanation - https://discuss.pytorch.org/t/understanding-transform-normalize/21730
elif mode == 'test':
return transforms.Compose(
[
transforms.Grayscale(num_output_channels=1),
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
import torch.nn.init as init
import torch.nn.functional as F
import sys
import numpy as np
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class wide_basic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(wide_basic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class Network(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate):
super(Network, self).__init__()
num_classes=14
self.in_planes = 16
assert ((depth-4)%6 ==0), 'Wide-resnet depth should be 6n+4'
n = (depth-4)/6
k = widen_factor
nStages = [16, 16*k, 32*k, 64*k]
in_channel = 1
self.conv1 = conv3x3(in_channel,nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(int(num_blocks)-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def get_class_weight():
import os, os.path
# path joining version for other paths
import os
data_dir = data_path
dir_list = [os.path.join(data_dir, o) for o in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir,o))]
n_files = []
for d in dir_list:
nf = len(os.listdir(d))
n_files.append(nf)
max_class = max(n_files)
weights = [round(max_class/i, 2) for i in n_files]
class_weights = torch.FloatTensor(weights).cuda()
return class_weights # weighted - use the min(n_class) / all n_classes, min_class = 2
data_path = "./data"
net = Network(16, 4, 0.5)
#_net.load_state_dict(torch.load("./checkModel10.pth"))
class_weights = get_class_weight()
lossFunc = nn.CrossEntropyLoss(weight=class_weights) # loss()
############################################################################
####### Metaparameters and training options ######
############################################################################
dataset = data_path
train_val_split = 0.8
batch_size = 96 # https://github.com/facebookresearch/LaMCTS/blob/master/LaNAS/LaNet/CIFAR10/train.py
epochs = 200 # since resnet50 140 ep takes 90mb, 50 also 90mb
optimiser = optim.SGD(net.parameters(), lr=1e-1, momentum=0.9, weight_decay=5e-4)
###Output
_____no_output_____ |
10-Quantopian-Platform/02-Basic-Algorithm-Methods.ipynb | ###Markdown
Basic Algorithm MethodsLet's algorithmically test our earlier optimized tech portfolio strategy with Quantopian! THIS CODE ONLY WORKS ON QUANTOPIAN. EACH CELL CORRESPONDS WITH A PART OF THE VIDEO LECTURE. MAKE SURE TO WATCH THE VIDEOS FOR CLARITY ON THIS! **initialize()**initialize() is called exactly once when our algorithm starts and requires context as input.context is an augmented Python dictionary used for maintaining state during our backtest or live trading, and can be referenced in different parts of our algorithm. context should be used instead of global variables in the algorithm. Properties can be accessed using dot notation (context.some_property). ** handle_data() **handle_data() is called once at the end of each minute and requires context and data as input. context is a reference to the same dictionary in initialize() and data is an object that stores several API functions. Our Tech Stock Optimized PortfolioLet's use the tech stock portfolio we calculated earlier. Keep in mind that handle_data() is readjusting our portfolio every minute! That may be unreasonable for certain algorithms, but for this example, we will just continue with these basics functions.
###Code
def initialize(context):
# Reference to Tech Stocks
context.aapl = sid(24)
context.csco = sid(1900)
context.amzn = sid(16841)
def handle_data(context, data):
# Position our portfolio optimization!
order_target_percent(context.aapl, .27)
order_target_percent(context.csco, .20)
order_target_percent(context.amzn, .53)
###Output
_____no_output_____
###Markdown
Grabbing Current Data data.current()data.current() can be used to retrieve the most recent value of a given field(s) for a given asset(s). data.current() requires two arguments: the asset or list of assets, and the field or list of fields being queried. Possible fields include 'price', 'open', 'high', 'low', 'close', and 'volume'. The output type will depend on the input types
###Code
def initialize(context):
# Reference to Tech Stocks
context.techies = [sid(16841),
sid(24),
sid(1900)]
def handle_data(context, data):
# Position our portfolio optimization!
tech_close = data.current(context.techies, 'close')
print(type(tech_close)) # Pandas Series
print(tech_close) # Closing Prices
###Output
_____no_output_____
###Markdown
Note! You can use data.is_stale(sid()) to check if the results of data.current() where generated at the current bar (the timeframe) or were forward filled from a previous time. Checking for trading data.can_trade()data.can_trade() is used to determine if an asset(s) is currently listed on a supported exchange and can be ordered. If data.can_trade() returns True for a particular asset in a given minute bar, we are able to place an order for that asset in that minute. This is an important guard to have in our algorithm if we hand-pick the securities that we want to trade. It requires a single argument: an asset or a list of assets.
###Code
def initialize(context):
# Reference to amazn
context.amzn = sid(16841)
def handle_data(context, data):
# This insures we don't hit an exception!
if data.can_trade(sid(16841)):
order_target_percent(context.amzn, 1.0)
###Output
_____no_output_____
###Markdown
Checking Historical DataWhen your algorithm calls data.history on equities, the returned data is adjusted for splits, mergers, and dividends as of the current simulation date. In other words, when your algorithm asks for a historical window of prices, and there is a split in the middle of that window, the first part of that window will be adjusted for the split. This adustment is done so that your algorithm can do meaningful calculations using the values in the window.This code queries the last 20 days of price history for a static set of securities. Specifically, this returns the closing daily price for the last 20 days, including the current price for the current day. Equity prices are split- and dividend-adjusted as of the current date in the simulation:
###Code
def initialize(context):
# AAPL, MSFT, and SPY
context.assets = [sid(24), sid(1900), sid(16841)]
def before_trading_start(context,data):
price_history = data.history(context.assets,
fields = "price",
bar_count = 5,
frequency = "1d")
print(price_history)
###Output
_____no_output_____
###Markdown
The bar_count field specifies the number of days or minutes to include in the pandas DataFrame returned by the history function. This parameter accepts only integer values.The frequency field specifies how often the data is sampled: daily or minutely. Acceptable inputs are ‘1d’ or ‘1m’. For other frequencies, use the pandas resample function. ExamplesBelow are examples of code along with explanations of the data returned. Daily HistoryUse "1d" for the frequency. The dataframe returned is always in daily bars. The bars never span more than one trading day. For US equities, a daily bar captures the trade activity during market hours (usually 9:30am-4:00pm ET). For US futures, a daily bar captures the trade activity from 6pm-6pm ET (24 hours). For example, the Monday daily bar captures trade activity from 6pm the day before (Sunday) to 6pm on the Monday. Tuesday's daily bar will run from 6pm Monday to 6pm Tuesday, etc. For either asset class, the last bar, if partial, is built using the minutes of the current day. Examples (assuming context.assets exists):* data.history(context.assets, "price", 1, "1d") returns the current price.* data.history(context.assets, "volume", 1, "1d") returns the volume since the current day's open, even if it is partial.* data.history(context.assets, "price", 2, "1d") returns yesterday's close price and the current price.* data.history(context.assets, "price", 6, "1d") returns the prices for the previous 5 days and the current price. Minute HistoryUse "1m" for the frequency.Examples (assuming context.assets exists):* data.history(context.assets, "price", 1, "1m") returns the current price.* data.history(context.assets, "price", 2, "1m") returns the previous minute's close price and the current price.* data.history(context.assets, "volume", 60, "1m") returns the volume for the previous 60 minutes. SchedulingUse schedule_function to indicate when you want other functions to occur. The functions passed in must take context and data as parameters.
###Code
def initialize(context):
context.appl = sid(49051)
# At ebginning of trading week
# At Market Open, set 10% of portfolio to be apple
schedule_function(open_positions,
date_rules.week_start(),
time_rules.market_open())
# At end of trading week
# 30 min before market close, dump all apple stock.
schedule_function(close_positions,
date_rules.week_end(),
time_rules.market_close(minutes = 30))
def open_positions(context, data):
order_target_percent(context.appl, 0.10)
def close_positions(context, data):
order_target_percent(context.appl, 0)
###Output
_____no_output_____
###Markdown
Portfolio InformationYou can get portfolio information and record it!
###Code
def initialize(context):
context.amzn = sid(16841)
context.ibm = sid(3766)
schedule_function(rebalance,
date_rules.every_day(),
time_rules.market_open())
schedule_function(record_vars,
date_rules.every_day(),
time_rules.market_close())
def rebalance(context, data):
# Half of our portfolio long on amazn
order_target_percent(context.amzn, 0.50)
# Half is shorting IBM
order_target_percent(context.ibm, -0.50)
def record_vars(context, data):
# Plot the counts
record(amzn_close=data.current(context.amzn, 'close'))
record(ibm_close=data.current(context.ibm, 'close'))
###Output
_____no_output_____
###Markdown
Slippage and Commision SlippageSlippage is where a simulation estimates the impact of orders on the fill rate and execution price they receive. When an order is placed for a trade, the market is affected. Buy orders drive prices up, and sell orders drive prices down; this is generally referred to as the price_impact of a trade. Additionally, trade orders do not necessarily fill instantaneously. Fill rates are dependent on the order size and current trading volume of the ordered security. The volume_limit determines the fraction of a security's trading volume that can be used by your algorithm.In backtesting and non-brokerage paper trading (Quantopian paper trading), a slippage model can be specified in initialize() using set_slippage(). There are different builtin slippage models that can be used, as well as the option to set a custom model. By default (if a slippage model is not specified), the following volume share slippage model is used:
###Code
set_slippage(slippage.VolumeShareSlippage(volume_limit = 0.025,
price_impact = 0.1))
###Output
_____no_output_____
###Markdown
Using the default model, if an order of 60 shares is placed for a given stock, then 1000 shares of that stock trade in each of the next several minutes and the volume_limit is 0.025, then our trade order will be split into three orders (25 shares, 25 shares, and 10 shares) that execute over the next 3 minutes.At the end of each day, all open orders are canceled, so trading liquid stocks is generally a good idea. Additionally, orders placed exactly at market close will not have time to fill, and will be canceled. CommisionTo set the cost of trades, we can specify a commission model in initialize() using set_commission(). By default (if a commission model is not specified), the following commission model is used:
###Code
set_commission(commission.PerShare(cost = 0.0075,
min_trade_cost = 1))
###Output
_____no_output_____
###Markdown
Basic Algorithm MethodsLet's algorithmically test our earlier optimized tech portfolio strategy with Quantopian! THIS CODE ONLY WORKS ON QUANTOPIAN. EACH CELL CORRESPONDS WITH A PART OF THE VIDEO LECTURE. MAKE SURE TO WATCH THE VIDEOS FOR CLARITY ON THIS! **initialize()**initialize() is called exactly once when our algorithm starts and requires context as input.context is an augmented Python dictionary used for maintaining state during our backtest or live trading, and can be referenced in different parts of our algorithm. context should be used instead of global variables in the algorithm. Properties can be accessed using dot notation (context.some_property). ** handle_data() **handle_data() is called once at the end of each minute and requires context and data as input. context is a reference to the same dictionary in initialize() and data is an object that stores several API functions. Our Tech Stock Optimized PortfolioLet's use the tech stock portfolio we calculated earlier. Keep in mind that handle_data() is readjusting our portfolio every minute! That may be unreasonable for certain algorithms, but for this example, we will just continue with these basics functions.
###Code
def initialize(context):
# Reference to Tech Stocks
context.aapl = sid(24)
context.csco = sid(1900)
context.amzn = sid(16841)
# Questa funzione viene chiamata alla fine di ogni minuto che passa
# non è molto realistico così, poi vedremo come fare
def handle_data(context, data):
"""
# ogni minuto vengno cercate di rispettare queste %. se variano vengono effettuate operazioni di
# acquisto/vendita
"""
# Position our portfolio optimization!
order_target_percent(context.aapl, .27)
order_target_percent(context.csco, .20)
order_target_percent(context.amzn, .53)
###Output
_____no_output_____
###Markdown
Grabbing Current Data data.current()data.current() can be used to retrieve the most recent value of a given field(s) for a given asset(s). data.current() requires two arguments: the asset or list of assets, and the field or list of fields being queried. Possible fields include 'price', 'open', 'high', 'low', 'close', and 'volume'. The output type will depend on the input types
###Code
def initialize(context):
# Reference to Tech Stocks
context.techies = [sid(16841),sid(24),sid(1900)]
def handle_data(context, data):
# Position our portfolio optimization!
tech_close = data.current(context.techies,'close')
print(type(tech_close)) # Pandas Series
print(tech_close) # Closing Prices
###Output
_____no_output_____
###Markdown
Note! You can use data.is_stale(sid()) to check if the results of data.current() where generated at the current bar (the timeframe) or were forward filled from a previous time. Checking for trading data.can_trade()data.can_trade() is used to determine if an asset(s) is currently listed on a supported exchange and can be ordered. If data.can_trade() returns True for a particular asset in a given minute bar, we are able to place an order for that asset in that minute. This is an important guard to have in our algorithm if we hand-pick the securities that we want to trade. It requires a single argument: an asset or a list of assets.
###Code
def initialize(context):
# Reference to amazn
context.amzn = sid(16841)
def handle_data(context, data):
# This insures we don't hit an exception!
if data.can_trade(sid(16841)):
order_target_percent(context.amzn, 1.0)
###Output
_____no_output_____
###Markdown
Checking Historical DataWhen your algorithm calls data.history on equities, the returned data is adjusted for splits, mergers, and dividends as of the current simulation date. In other words, when your algorithm asks for a historical window of prices, and there is a split in the middle of that window, the first part of that window will be adjusted for the split. This adustment is done so that your algorithm can do meaningful calculations using the values in the window.This code queries the last 20 days of price history for a static set of securities. Specifically, this returns the closing daily price for the last 20 days, including the current price for the current day. Equity prices are split- and dividend-adjusted as of the current date in the simulation:
###Code
def initialize(context):
# AAPL, MSFT, and SPY
context.assets = [sid(24), sid(1900), sid(16841)]
def before_trading_start(context,data):
price_history = data.history(context.assets,fields="price", bar_count=5, frequency="1d")
print(price_history)
###Output
_____no_output_____
###Markdown
The bar_count field specifies the number of days or minutes to include in the pandas DataFrame returned by the history function. This parameter accepts only integer values.The frequency field specifies how often the data is sampled: **daily** or **minutely**. Acceptable inputs are ‘1d’ or ‘1m’. For other frequencies, use the pandas resample function. ExamplesBelow are examples of code along with explanations of the data returned. Daily HistoryUse "1d" for the frequency. The dataframe returned is always in daily bars. The bars never span more than one trading day. For US equities, a daily bar captures the trade activity during market hours (usually 9:30am-4:00pm ET). For US futures, a daily bar captures the trade activity from 6pm-6pm ET (24 hours). For example, the Monday daily bar captures trade activity from 6pm the day before (Sunday) to 6pm on the Monday. Tuesday's daily bar will run from 6pm Monday to 6pm Tuesday, etc. For either asset class, the last bar, if partial, is built using the minutes of the current day. Examples (assuming context.assets exists):* data.history(context.assets, "price", 1, "1d") returns the current price.* data.history(context.assets, "volume", 1, "1d") returns the volume since the current day's open, even if it is partial.* data.history(context.assets, "price", 2, "1d") returns yesterday's close price and the current price.* data.history(context.assets, "price", 6, "1d") returns the prices for the previous 5 days and the current price. Minute HistoryUse "1m" for the frequency.Examples (assuming context.assets exists):* data.history(context.assets, "price", 1, "1m") returns the current price.* data.history(context.assets, "price", 2, "1m") returns the previous minute's close price and the current price.* data.history(context.assets, "volume", 60, "1m") returns the volume for the previous 60 minutes. SchedulingUse schedule_function to indicate when you want other functions to occur. The functions passed in must take context and data as parameters.`time_rules.market_open()` i parametri servono per indicare dopo quanto dall'apertura invocare la funzione`time_rules.market_close(minutes=30)` i parametri servono per indicare quanto prima della chiusura invocare la funzione
###Code
def initialize(context):
context.appl = sid(24)
# At beginning of trading week
# At Market Open, set 10% of portfolio to be apple
schedule_function(open_positions, date_rules.week_start(), time_rules.market_open())
# At end of trading week
# 30 min before market close, dump all apple stock.
schedule_function(close_positions, date_rules.week_end(), time_rules.market_close(minutes=30))
def open_positions(context, data):
order_target_percent(context.appl, 0.10) # compra usando il 10% del capitale e mettendolo in aapl
def close_positions(context, data):
order_target_percent(context.appl, 0) # vendi (così rimane 0% di appl nel portfolio)
###Output
_____no_output_____
###Markdown
Portfolio InformationYou can get portfolio information and record it!`record` serve per salvare alcune informazioni e stamparle in un plot che compare durante il backtesting
###Code
def initialize(context):
context.amzn = sid(16841)
context.ibm = sid(3766)
schedule_function(rebalance, date_rules.every_day(), time_rules.market_open())
schedule_function(record_vars, date_rules.every_day(), time_rules.market_close())
def rebalance(context, data):
# Half of our portfolio long on amazn
order_target_percent(context.amzn, 0.50)
# Half is shorting IBM
order_target_percent(context.ibm, -0.50)
def record_vars(context, data):
# Plot the counts
# 'amzn_close' è un nome scelto a mia completa discrezione
record(amzn_close=data.current(context.amzn,'close'))
record(ibm_close=data.current(context.ibm,'close'))
###Output
_____no_output_____
###Markdown
Slippage and Commision SlippageServe per tenere in considerazione che se muovi tantissimi soldi nel momento in cui scegli di comprare qualcosa il prezzo automaticamente sale (difficilmente mi servirà xD)Slippage is where a simulation estimates the impact of orders on the fill rate and execution price they receive. When an order is placed for a trade, the market is affected. Buy orders drive prices up, and sell orders drive prices down; this is generally referred to as the price_impact of a trade. Additionally, trade orders do not necessarily fill instantaneously. Fill rates are dependent on the order size and current trading volume of the ordered security. The volume_limit determines the fraction of a security's trading volume that can be used by your algorithm.In backtesting and non-brokerage paper trading (Quantopian paper trading), a slippage model can be specified in initialize() using set_slippage(). There are different builtin slippage models that can be used, as well as the option to set a custom model. By default (if a slippage model is not specified), the following volume share slippage model is used:
###Code
set_slippage(slippage.VolumeShareSlippage(volume_limit=0.025, price_impact=0.1))
###Output
_____no_output_____
###Markdown
Using the default model, if an order of 60 shares is placed for a given stock, then 1000 shares of that stock trade in each of the next several minutes and the volume_limit is 0.025, then our trade order will be split into three orders (25 shares, 25 shares, and 10 shares) that execute over the next 3 minutes.At the end of each day, all open orders are canceled, so trading liquid stocks is generally a good idea. Additionally, orders placed exactly at market close will not have time to fill, and will be canceled. CommisionTo set the cost of trades, we can specify a commission model in initialize() using set_commission(). By default (if a commission model is not specified), the following commission model is used:
###Code
set_commission(commission.PerShare(cost=0.0075, min_trade_cost=1))
###Output
_____no_output_____
###Markdown
Basic Algorithm MethodsLet's algorithmically test our earlier optimized tech portfolio strategy with Quantopian! THIS CODE ONLY WORKS ON QUANTOPIAN. EACH CELL CORRESPONDS WITH A PART OF THE VIDEO LECTURE. MAKE SURE TO WATCH THE VIDEOS FOR CLARITY ON THIS! **initialize()**initialize() is called exactly once when our algorithm starts and requires context as input.context is an augmented Python dictionary used for maintaining state during our backtest or live trading, and can be referenced in different parts of our algorithm. context should be used instead of global variables in the algorithm. Properties can be accessed using dot notation (context.some_property). ** handle_data() **handle_data() is called once at the end of each minute and requires context and data as input. context is a reference to the same dictionary in initialize() and data is an object that stores several API functions. Our Tech Stock Optimized PortfolioLet's use the tech stock portfolio we calculated earlier. Keep in mind that handle_data() is readjusting our portfolio every minute! That may be unreasonable for certain algorithms, but for this example, we will just continue with these basics functions.
###Code
def initialize(context):
# Reference to Tech Stocks
context.aapl = sid(24)
context.csco = sid(1900)
context.amzn = sid(16841)
def handle_data(context, data):
# Position our portfolio optimization!
order_target_percent(context.aapl, .27)
order_target_percent(context.csco, .20)
order_target_percent(context.amzn, .53)
###Output
_____no_output_____
###Markdown
Grabbing Current Data data.current()data.current() can be used to retrieve the most recent value of a given field(s) for a given asset(s). data.current() requires two arguments: the asset or list of assets, and the field or list of fields being queried. Possible fields include 'price', 'open', 'high', 'low', 'close', and 'volume'. The output type will depend on the input types
###Code
def initialize(context):
# Reference to Tech Stocks
context.techies = [sid(16841),sid(24),sid(1900)]
def handle_data(context, data):
# Position our portfolio optimization!
tech_close = data.current(context.techies,'close')
print(type(tech_close)) # Pandas Series
print(tech_close) # Closing Prices
###Output
_____no_output_____
###Markdown
Note! You can use data.is_stale(sid()) to check if the results of data.current() where generated at the current bar (the timeframe) or were forward filled from a previous time. Checking for trading data.can_trade()data.can_trade() is used to determine if an asset(s) is currently listed on a supported exchange and can be ordered. If data.can_trade() returns True for a particular asset in a given minute bar, we are able to place an order for that asset in that minute. This is an important guard to have in our algorithm if we hand-pick the securities that we want to trade. It requires a single argument: an asset or a list of assets.
###Code
def initialize(context):
# Reference to amazn
context.amzn = sid(16841)
def handle_data(context, data):
# This insures we don't hit an exception!
if data.can_trade(sid(16841)):
order_target_percent(context.amzn, 1.0)
###Output
_____no_output_____
###Markdown
Checking Historical DataWhen your algorithm calls data.history on equities, the returned data is adjusted for splits, mergers, and dividends as of the current simulation date. In other words, when your algorithm asks for a historical window of prices, and there is a split in the middle of that window, the first part of that window will be adjusted for the split. This adustment is done so that your algorithm can do meaningful calculations using the values in the window.This code queries the last 20 days of price history for a static set of securities. Specifically, this returns the closing daily price for the last 20 days, including the current price for the current day. Equity prices are split- and dividend-adjusted as of the current date in the simulation:
###Code
def initialize(context):
# AAPL, MSFT, and SPY
context.assets = [sid(24), sid(1900), sid(16841)]
def before_trading_start(context,data):
price_history = data.history(context.assets,fields="price", bar_count=5, frequency="1d")
print(price_history)
###Output
_____no_output_____
###Markdown
The bar_count field specifies the number of days or minutes to include in the pandas DataFrame returned by the history function. This parameter accepts only integer values.The frequency field specifies how often the data is sampled: daily or minutely. Acceptable inputs are ‘1d’ or ‘1m’. For other frequencies, use the pandas resample function. ExamplesBelow are examples of code along with explanations of the data returned. Daily HistoryUse "1d" for the frequency. The dataframe returned is always in daily bars. The bars never span more than one trading day. For US equities, a daily bar captures the trade activity during market hours (usually 9:30am-4:00pm ET). For US futures, a daily bar captures the trade activity from 6pm-6pm ET (24 hours). For example, the Monday daily bar captures trade activity from 6pm the day before (Sunday) to 6pm on the Monday. Tuesday's daily bar will run from 6pm Monday to 6pm Tuesday, etc. For either asset class, the last bar, if partial, is built using the minutes of the current day. Examples (assuming context.assets exists):* data.history(context.assets, "price", 1, "1d") returns the current price.* data.history(context.assets, "volume", 1, "1d") returns the volume since the current day's open, even if it is partial.* data.history(context.assets, "price", 2, "1d") returns yesterday's close price and the current price.* data.history(context.assets, "price", 6, "1d") returns the prices for the previous 5 days and the current price. Minute HistoryUse "1m" for the frequency.Examples (assuming context.assets exists):* data.history(context.assets, "price", 1, "1m") returns the current price.* data.history(context.assets, "price", 2, "1m") returns the previous minute's close price and the current price.* data.history(context.assets, "volume", 60, "1m") returns the volume for the previous 60 minutes. SchedulingUse schedule_function to indicate when you want other functions to occur. The functions passed in must take context and data as parameters.
###Code
def initialize(context):
context.appl = sid(49051)
# At ebginning of trading week
# At Market Open, set 10% of portfolio to be apple
schedule_function(open_positions, date_rules.week_start(), time_rules.market_open())
# At end of trading week
# 30 min before market close, dump all apple stock.
schedule_function(close_positions, date_rules.week_end(), time_rules.market_close(minutes=30))
def open_positions(context, data):
order_target_percent(context.appl, 0.10)
def close_positions(context, data):
order_target_percent(context.appl, 0)
###Output
_____no_output_____
###Markdown
Portfolio InformationYou can get portfolio information and record it!
###Code
def initialize(context):
context.amzn = sid(16841)
context.ibm = sid(3766)
schedule_function(rebalance, date_rules.every_day(), time_rules.market_open())
schedule_function(record_vars, date_rules.every_day(), time_rules.market_close())
def rebalance(context, data):
# Half of our portfolio long on amazn
order_target_percent(context.amzn, 0.50)
# Half is shorting IBM
order_target_percent(context.ibm, -0.50)
def record_vars(context, data):
# Plot the counts
record(amzn_close=data.current(context.amzn,'close'))
record(ibm_close=data.current(context.ibm,'close'))
###Output
_____no_output_____
###Markdown
Slippage and Commision SlippageSlippage is where a simulation estimates the impact of orders on the fill rate and execution price they receive. When an order is placed for a trade, the market is affected. Buy orders drive prices up, and sell orders drive prices down; this is generally referred to as the price_impact of a trade. Additionally, trade orders do not necessarily fill instantaneously. Fill rates are dependent on the order size and current trading volume of the ordered security. The volume_limit determines the fraction of a security's trading volume that can be used by your algorithm.In backtesting and non-brokerage paper trading (Quantopian paper trading), a slippage model can be specified in initialize() using set_slippage(). There are different builtin slippage models that can be used, as well as the option to set a custom model. By default (if a slippage model is not specified), the following volume share slippage model is used:
###Code
set_slippage(slippage.VolumeShareSlippage(volume_limit=0.025, price_impact=0.1))
###Output
_____no_output_____
###Markdown
Using the default model, if an order of 60 shares is placed for a given stock, then 1000 shares of that stock trade in each of the next several minutes and the volume_limit is 0.025, then our trade order will be split into three orders (25 shares, 25 shares, and 10 shares) that execute over the next 3 minutes.At the end of each day, all open orders are canceled, so trading liquid stocks is generally a good idea. Additionally, orders placed exactly at market close will not have time to fill, and will be canceled. CommisionTo set the cost of trades, we can specify a commission model in initialize() using set_commission(). By default (if a commission model is not specified), the following commission model is used:
###Code
set_commission(commission.PerShare(cost=0.0075, min_trade_cost=1))
###Output
_____no_output_____ |
Data_process.ipynb | ###Markdown
3D interpolation with time or 4D interpolation
###Code
import cartopy.crs
import matplotlib
import matplotlib.pyplot
import numpy
import pyinterp
import pyinterp.backends.xarray
import xarray
import warnings
warnings.filterwarnings('ignore')
ds = xarray.open_dataset("pres_temp_4D.nc")
interpolator = pyinterp.backends.xarray.Grid4D(ds.pressure)
mx, my, mz, mu = numpy.meshgrid(numpy.arange(-125, -70, 0.5),
numpy.arange(25, 50, 0.5),
numpy.datetime64("2000-01-01T12:00"),
0.5,
indexing="ij")
quadrivariate = interpolator.quadrivariate(
dict(longitude=mx.flatten(),
latitude=my.flatten(),
time=mz.flatten(),
level=mu.flatten())).reshape(mx.shape)
interpolator = pyinterp.backends.xarray.Grid4D(ds.pressure, increasing_axes=True)
bicubic = interpolator.bicubic(dict(longitude=mx.flatten(),
latitude=my.flatten(),
time=mz.flatten(),
level=mu.flatten()),
nx=2,
ny=2).reshape(mx.shape)
quadrivariate = quadrivariate.squeeze(axis=(2, 3))
bicubic = bicubic.squeeze(axis=(2, 3))
lons = mx[:, 0].squeeze()
lats = my[0, :].squeeze()
fig = matplotlib.pyplot.figure(figsize=(5, 4))
ax1 = fig.add_subplot(211, projection=cartopy.crs.PlateCarree(central_longitude=180))
pcm = ax1.pcolormesh(lons,
lats,
quadrivariate.T,
cmap='jet',
transform=cartopy.crs.PlateCarree())
ax1.coastlines()
ax1.set_title("Trilinear")
ax2 = fig.add_subplot(212, projection=cartopy.crs.PlateCarree(central_longitude=180))
pcm = ax2.pcolormesh(lons,
lats,
bicubic.T,
cmap='jet',
transform=cartopy.crs.PlateCarree())
ax2.coastlines()
ax2.set_title("Spline & Linear in time")
fig.colorbar(pcm, ax=[ax1, ax2], shrink=0.8)
fig.show()
###Output
_____no_output_____
###Markdown
Empirical Orthogonal Function (EOF) analysis
###Code
from pyEOF import *
import xarray as xr
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
###Output
_____no_output_____ |
convolutional-neural-networks/mnist-mlp/mnist_mlp_solution.ipynb | ###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to data\MNIST\raw\train-images-idx3-ubyte.gz
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2, inplace=False)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.813331
Epoch: 2 Training Loss: 0.321577
Epoch: 3 Training Loss: 0.248456
Epoch: 4 Training Loss: 0.202627
Epoch: 5 Training Loss: 0.171499
Epoch: 6 Training Loss: 0.148158
Epoch: 7 Training Loss: 0.128868
Epoch: 8 Training Loss: 0.115791
Epoch: 9 Training Loss: 0.103729
Epoch: 10 Training Loss: 0.094208
Epoch: 11 Training Loss: 0.086321
Epoch: 12 Training Loss: 0.078787
Epoch: 13 Training Loss: 0.073826
Epoch: 14 Training Loss: 0.068861
Epoch: 15 Training Loss: 0.063639
Epoch: 16 Training Loss: 0.059656
Epoch: 17 Training Loss: 0.055495
Epoch: 18 Training Loss: 0.051543
Epoch: 19 Training Loss: 0.049148
Epoch: 20 Training Loss: 0.045936
Epoch: 21 Training Loss: 0.044068
Epoch: 22 Training Loss: 0.041843
Epoch: 23 Training Loss: 0.038746
Epoch: 24 Training Loss: 0.037098
Epoch: 25 Training Loss: 0.034884
Epoch: 26 Training Loss: 0.032909
Epoch: 27 Training Loss: 0.031460
Epoch: 28 Training Loss: 0.030074
Epoch: 29 Training Loss: 0.027882
Epoch: 30 Training Loss: 0.026905
Epoch: 31 Training Loss: 0.025682
Epoch: 32 Training Loss: 0.024847
Epoch: 33 Training Loss: 0.023258
Epoch: 34 Training Loss: 0.022201
Epoch: 35 Training Loss: 0.021116
Epoch: 36 Training Loss: 0.021035
Epoch: 37 Training Loss: 0.020442
Epoch: 38 Training Loss: 0.018677
Epoch: 39 Training Loss: 0.017699
Epoch: 40 Training Loss: 0.016380
Epoch: 41 Training Loss: 0.016670
Epoch: 42 Training Loss: 0.016217
Epoch: 43 Training Loss: 0.016248
Epoch: 44 Training Loss: 0.014866
Epoch: 45 Training Loss: 0.014198
Epoch: 46 Training Loss: 0.013858
Epoch: 47 Training Loss: 0.013008
Epoch: 48 Training Loss: 0.013388
Epoch: 49 Training Loss: 0.012320
Epoch: 50 Training Loss: 0.012091
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.056756
Test Accuracy of 0: 99% (971/980)
Test Accuracy of 1: 99% (1128/1135)
Test Accuracy of 2: 98% (1016/1032)
Test Accuracy of 3: 97% (986/1010)
Test Accuracy of 4: 98% (970/982)
Test Accuracy of 5: 98% (877/892)
Test Accuracy of 6: 98% (943/958)
Test Accuracy of 7: 98% (1008/1028)
Test Accuracy of 8: 97% (948/974)
Test Accuracy of 9: 97% (985/1009)
Test Accuracy (Overall): 98% (9832/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
c:\programdata\anaconda3\envs\deep-learning\lib\site-packages\ipykernel_launcher.py:15: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.
from ipykernel import kernelapp as app
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833544
Epoch: 2 Training Loss: 0.321996
Epoch: 3 Training Loss: 0.247905
Epoch: 4 Training Loss: 0.201408
Epoch: 5 Training Loss: 0.169627
Epoch: 6 Training Loss: 0.147488
Epoch: 7 Training Loss: 0.129424
Epoch: 8 Training Loss: 0.116433
Epoch: 9 Training Loss: 0.104333
Epoch: 10 Training Loss: 0.094504
Epoch: 11 Training Loss: 0.085769
Epoch: 12 Training Loss: 0.080728
Epoch: 13 Training Loss: 0.073689
Epoch: 14 Training Loss: 0.067905
Epoch: 15 Training Loss: 0.063251
Epoch: 16 Training Loss: 0.058666
Epoch: 17 Training Loss: 0.055106
Epoch: 18 Training Loss: 0.050979
Epoch: 19 Training Loss: 0.048491
Epoch: 20 Training Loss: 0.046173
Epoch: 21 Training Loss: 0.044311
Epoch: 22 Training Loss: 0.041405
Epoch: 23 Training Loss: 0.038702
Epoch: 24 Training Loss: 0.036634
Epoch: 25 Training Loss: 0.035159
Epoch: 26 Training Loss: 0.033605
Epoch: 27 Training Loss: 0.030255
Epoch: 28 Training Loss: 0.029026
Epoch: 29 Training Loss: 0.028722
Epoch: 30 Training Loss: 0.027026
Epoch: 31 Training Loss: 0.026134
Epoch: 32 Training Loss: 0.022992
Epoch: 33 Training Loss: 0.023809
Epoch: 34 Training Loss: 0.022347
Epoch: 35 Training Loss: 0.021212
Epoch: 36 Training Loss: 0.020292
Epoch: 37 Training Loss: 0.019413
Epoch: 38 Training Loss: 0.019758
Epoch: 39 Training Loss: 0.017851
Epoch: 40 Training Loss: 0.017023
Epoch: 41 Training Loss: 0.016846
Epoch: 42 Training Loss: 0.016187
Epoch: 43 Training Loss: 0.015530
Epoch: 44 Training Loss: 0.014553
Epoch: 45 Training Loss: 0.014781
Epoch: 46 Training Loss: 0.013546
Epoch: 47 Training Loss: 0.013328
Epoch: 48 Training Loss: 0.012698
Epoch: 49 Training Loss: 0.012012
Epoch: 50 Training Loss: 0.012588
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for training
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.052876
Test Accuracy of 0: 99% (972/980)
Test Accuracy of 1: 99% (1127/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (992/1010)
Test Accuracy of 4: 98% (968/982)
Test Accuracy of 5: 98% (875/892)
Test Accuracy of 6: 98% (946/958)
Test Accuracy of 7: 98% (1010/1028)
Test Accuracy of 8: 97% (949/974)
Test Accuracy of 9: 98% (990/1009)
Test Accuracy (Overall): 98% (9841/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833544
Epoch: 2 Training Loss: 0.321996
Epoch: 3 Training Loss: 0.247905
Epoch: 4 Training Loss: 0.201408
Epoch: 5 Training Loss: 0.169627
Epoch: 6 Training Loss: 0.147488
Epoch: 7 Training Loss: 0.129424
Epoch: 8 Training Loss: 0.116433
Epoch: 9 Training Loss: 0.104333
Epoch: 10 Training Loss: 0.094504
Epoch: 11 Training Loss: 0.085769
Epoch: 12 Training Loss: 0.080728
Epoch: 13 Training Loss: 0.073689
Epoch: 14 Training Loss: 0.067905
Epoch: 15 Training Loss: 0.063251
Epoch: 16 Training Loss: 0.058666
Epoch: 17 Training Loss: 0.055106
Epoch: 18 Training Loss: 0.050979
Epoch: 19 Training Loss: 0.048491
Epoch: 20 Training Loss: 0.046173
Epoch: 21 Training Loss: 0.044311
Epoch: 22 Training Loss: 0.041405
Epoch: 23 Training Loss: 0.038702
Epoch: 24 Training Loss: 0.036634
Epoch: 25 Training Loss: 0.035159
Epoch: 26 Training Loss: 0.033605
Epoch: 27 Training Loss: 0.030255
Epoch: 28 Training Loss: 0.029026
Epoch: 29 Training Loss: 0.028722
Epoch: 30 Training Loss: 0.027026
Epoch: 31 Training Loss: 0.026134
Epoch: 32 Training Loss: 0.022992
Epoch: 33 Training Loss: 0.023809
Epoch: 34 Training Loss: 0.022347
Epoch: 35 Training Loss: 0.021212
Epoch: 36 Training Loss: 0.020292
Epoch: 37 Training Loss: 0.019413
Epoch: 38 Training Loss: 0.019758
Epoch: 39 Training Loss: 0.017851
Epoch: 40 Training Loss: 0.017023
Epoch: 41 Training Loss: 0.016846
Epoch: 42 Training Loss: 0.016187
Epoch: 43 Training Loss: 0.015530
Epoch: 44 Training Loss: 0.014553
Epoch: 45 Training Loss: 0.014781
Epoch: 46 Training Loss: 0.013546
Epoch: 47 Training Loss: 0.013328
Epoch: 48 Training Loss: 0.012698
Epoch: 49 Training Loss: 0.012012
Epoch: 50 Training Loss: 0.012588
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for training
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.052876
Test Accuracy of 0: 99% (972/980)
Test Accuracy of 1: 99% (1127/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (992/1010)
Test Accuracy of 4: 98% (968/982)
Test Accuracy of 5: 98% (875/892)
Test Accuracy of 6: 98% (946/958)
Test Accuracy of 7: 98% (1010/1028)
Test Accuracy of 8: 97% (949/974)
Test Accuracy of 9: 98% (990/1009)
Test Accuracy (Overall): 98% (9841/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2, inplace=False)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.819566
Epoch: 2 Training Loss: 0.322754
Epoch: 3 Training Loss: 0.249875
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.190544
Test Accuracy of 0: 98% (965/980)
Test Accuracy of 1: 98% (1115/1135)
Test Accuracy of 2: 94% (975/1032)
Test Accuracy of 3: 91% (929/1010)
Test Accuracy of 4: 93% (915/982)
Test Accuracy of 5: 91% (815/892)
Test Accuracy of 6: 96% (921/958)
Test Accuracy of 7: 93% (964/1028)
Test Accuracy of 8: 92% (900/974)
Test Accuracy of 9: 92% (935/1009)
Test Accuracy (Overall): 94% (9434/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833544
Epoch: 2 Training Loss: 0.321996
Epoch: 3 Training Loss: 0.247905
Epoch: 4 Training Loss: 0.201408
Epoch: 5 Training Loss: 0.169627
Epoch: 6 Training Loss: 0.147488
Epoch: 7 Training Loss: 0.129424
Epoch: 8 Training Loss: 0.116433
Epoch: 9 Training Loss: 0.104333
Epoch: 10 Training Loss: 0.094504
Epoch: 11 Training Loss: 0.085769
Epoch: 12 Training Loss: 0.080728
Epoch: 13 Training Loss: 0.073689
Epoch: 14 Training Loss: 0.067905
Epoch: 15 Training Loss: 0.063251
Epoch: 16 Training Loss: 0.058666
Epoch: 17 Training Loss: 0.055106
Epoch: 18 Training Loss: 0.050979
Epoch: 19 Training Loss: 0.048491
Epoch: 20 Training Loss: 0.046173
Epoch: 21 Training Loss: 0.044311
Epoch: 22 Training Loss: 0.041405
Epoch: 23 Training Loss: 0.038702
Epoch: 24 Training Loss: 0.036634
Epoch: 25 Training Loss: 0.035159
Epoch: 26 Training Loss: 0.033605
Epoch: 27 Training Loss: 0.030255
Epoch: 28 Training Loss: 0.029026
Epoch: 29 Training Loss: 0.028722
Epoch: 30 Training Loss: 0.027026
Epoch: 31 Training Loss: 0.026134
Epoch: 32 Training Loss: 0.022992
Epoch: 33 Training Loss: 0.023809
Epoch: 34 Training Loss: 0.022347
Epoch: 35 Training Loss: 0.021212
Epoch: 36 Training Loss: 0.020292
Epoch: 37 Training Loss: 0.019413
Epoch: 38 Training Loss: 0.019758
Epoch: 39 Training Loss: 0.017851
Epoch: 40 Training Loss: 0.017023
Epoch: 41 Training Loss: 0.016846
Epoch: 42 Training Loss: 0.016187
Epoch: 43 Training Loss: 0.015530
Epoch: 44 Training Loss: 0.014553
Epoch: 45 Training Loss: 0.014781
Epoch: 46 Training Loss: 0.013546
Epoch: 47 Training Loss: 0.013328
Epoch: 48 Training Loss: 0.012698
Epoch: 49 Training Loss: 0.012012
Epoch: 50 Training Loss: 0.012588
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for training
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.052876
Test Accuracy of 0: 99% (972/980)
Test Accuracy of 1: 99% (1127/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (992/1010)
Test Accuracy of 4: 98% (968/982)
Test Accuracy of 5: 98% (875/892)
Test Accuracy of 6: 98% (946/958)
Test Accuracy of 7: 98% (1010/1028)
Test Accuracy of 8: 97% (949/974)
Test Accuracy of 9: 98% (990/1009)
Test Accuracy (Overall): 98% (9841/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
<ipython-input-3-731dd270b2c2>:12: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2, inplace=False)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.826213
Epoch: 2 Training Loss: 0.321866
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
# n_epochs = 50
# TODO: Change later
n_epochs = 5
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.825016
Epoch: 2 Training Loss: 0.326062
Epoch: 3 Training Loss: 0.251368
Epoch: 4 Training Loss: 0.203641
Epoch: 5 Training Loss: 0.171040
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.141265
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1120/1135)
Test Accuracy of 2: 93% (970/1032)
Test Accuracy of 3: 96% (970/1010)
Test Accuracy of 4: 96% (943/982)
Test Accuracy of 5: 95% (853/892)
Test Accuracy of 6: 95% (916/958)
Test Accuracy of 7: 94% (972/1028)
Test Accuracy of 8: 94% (917/974)
Test Accuracy of 9: 93% (948/1009)
Test Accuracy (Overall): 95% (9577/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833544
Epoch: 2 Training Loss: 0.321996
Epoch: 3 Training Loss: 0.247905
Epoch: 4 Training Loss: 0.201408
Epoch: 5 Training Loss: 0.169627
Epoch: 6 Training Loss: 0.147488
Epoch: 7 Training Loss: 0.129424
Epoch: 8 Training Loss: 0.116433
Epoch: 9 Training Loss: 0.104333
Epoch: 10 Training Loss: 0.094504
Epoch: 11 Training Loss: 0.085769
Epoch: 12 Training Loss: 0.080728
Epoch: 13 Training Loss: 0.073689
Epoch: 14 Training Loss: 0.067905
Epoch: 15 Training Loss: 0.063251
Epoch: 16 Training Loss: 0.058666
Epoch: 17 Training Loss: 0.055106
Epoch: 18 Training Loss: 0.050979
Epoch: 19 Training Loss: 0.048491
Epoch: 20 Training Loss: 0.046173
Epoch: 21 Training Loss: 0.044311
Epoch: 22 Training Loss: 0.041405
Epoch: 23 Training Loss: 0.038702
Epoch: 24 Training Loss: 0.036634
Epoch: 25 Training Loss: 0.035159
Epoch: 26 Training Loss: 0.033605
Epoch: 27 Training Loss: 0.030255
Epoch: 28 Training Loss: 0.029026
Epoch: 29 Training Loss: 0.028722
Epoch: 30 Training Loss: 0.027026
Epoch: 31 Training Loss: 0.026134
Epoch: 32 Training Loss: 0.022992
Epoch: 33 Training Loss: 0.023809
Epoch: 34 Training Loss: 0.022347
Epoch: 35 Training Loss: 0.021212
Epoch: 36 Training Loss: 0.020292
Epoch: 37 Training Loss: 0.019413
Epoch: 38 Training Loss: 0.019758
Epoch: 39 Training Loss: 0.017851
Epoch: 40 Training Loss: 0.017023
Epoch: 41 Training Loss: 0.016846
Epoch: 42 Training Loss: 0.016187
Epoch: 43 Training Loss: 0.015530
Epoch: 44 Training Loss: 0.014553
Epoch: 45 Training Loss: 0.014781
Epoch: 46 Training Loss: 0.013546
Epoch: 47 Training Loss: 0.013328
Epoch: 48 Training Loss: 0.012698
Epoch: 49 Training Loss: 0.012012
Epoch: 50 Training Loss: 0.012588
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for training
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.052876
Test Accuracy of 0: 99% (972/980)
Test Accuracy of 1: 99% (1127/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (992/1010)
Test Accuracy of 4: 98% (968/982)
Test Accuracy of 5: 98% (875/892)
Test Accuracy of 6: 98% (946/958)
Test Accuracy of 7: 98% (1010/1028)
Test Accuracy of 8: 97% (949/974)
Test Accuracy of 9: 98% (990/1009)
Test Accuracy (Overall): 98% (9841/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833544
Epoch: 2 Training Loss: 0.321996
Epoch: 3 Training Loss: 0.247905
Epoch: 4 Training Loss: 0.201408
Epoch: 5 Training Loss: 0.169627
Epoch: 6 Training Loss: 0.147488
Epoch: 7 Training Loss: 0.129424
Epoch: 8 Training Loss: 0.116433
Epoch: 9 Training Loss: 0.104333
Epoch: 10 Training Loss: 0.094504
Epoch: 11 Training Loss: 0.085769
Epoch: 12 Training Loss: 0.080728
Epoch: 13 Training Loss: 0.073689
Epoch: 14 Training Loss: 0.067905
Epoch: 15 Training Loss: 0.063251
Epoch: 16 Training Loss: 0.058666
Epoch: 17 Training Loss: 0.055106
Epoch: 18 Training Loss: 0.050979
Epoch: 19 Training Loss: 0.048491
Epoch: 20 Training Loss: 0.046173
Epoch: 21 Training Loss: 0.044311
Epoch: 22 Training Loss: 0.041405
Epoch: 23 Training Loss: 0.038702
Epoch: 24 Training Loss: 0.036634
Epoch: 25 Training Loss: 0.035159
Epoch: 26 Training Loss: 0.033605
Epoch: 27 Training Loss: 0.030255
Epoch: 28 Training Loss: 0.029026
Epoch: 29 Training Loss: 0.028722
Epoch: 30 Training Loss: 0.027026
Epoch: 31 Training Loss: 0.026134
Epoch: 32 Training Loss: 0.022992
Epoch: 33 Training Loss: 0.023809
Epoch: 34 Training Loss: 0.022347
Epoch: 35 Training Loss: 0.021212
Epoch: 36 Training Loss: 0.020292
Epoch: 37 Training Loss: 0.019413
Epoch: 38 Training Loss: 0.019758
Epoch: 39 Training Loss: 0.017851
Epoch: 40 Training Loss: 0.017023
Epoch: 41 Training Loss: 0.016846
Epoch: 42 Training Loss: 0.016187
Epoch: 43 Training Loss: 0.015530
Epoch: 44 Training Loss: 0.014553
Epoch: 45 Training Loss: 0.014781
Epoch: 46 Training Loss: 0.013546
Epoch: 47 Training Loss: 0.013328
Epoch: 48 Training Loss: 0.012698
Epoch: 49 Training Loss: 0.012012
Epoch: 50 Training Loss: 0.012588
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for training
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.052876
Test Accuracy of 0: 99% (972/980)
Test Accuracy of 1: 99% (1127/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (992/1010)
Test Accuracy of 4: 98% (968/982)
Test Accuracy of 5: 98% (875/892)
Test Accuracy of 6: 98% (946/958)
Test Accuracy of 7: 98% (1010/1028)
Test Accuracy of 8: 97% (949/974)
Test Accuracy of 9: 98% (990/1009)
Test Accuracy (Overall): 98% (9841/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
print(images.shape)
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
(20, 1, 28, 28)
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
print(img.shape)
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
(28, 28)
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
# epoch is the number of times we want the model to go through an entire dataset
# recommendation is at least 20 epochs
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833660
Epoch: 2 Training Loss: 0.319992
Epoch: 3 Training Loss: 0.249035
Epoch: 4 Training Loss: 0.199781
Epoch: 5 Training Loss: 0.169106
Epoch: 6 Training Loss: 0.145452
Epoch: 7 Training Loss: 0.128913
Epoch: 8 Training Loss: 0.114817
Epoch: 9 Training Loss: 0.103485
Epoch: 10 Training Loss: 0.095245
Epoch: 11 Training Loss: 0.087654
Epoch: 12 Training Loss: 0.080850
Epoch: 13 Training Loss: 0.073405
Epoch: 14 Training Loss: 0.067742
Epoch: 15 Training Loss: 0.064657
Epoch: 16 Training Loss: 0.060023
Epoch: 17 Training Loss: 0.056177
Epoch: 18 Training Loss: 0.052415
Epoch: 19 Training Loss: 0.049279
Epoch: 20 Training Loss: 0.046487
Epoch: 21 Training Loss: 0.044259
Epoch: 22 Training Loss: 0.041661
Epoch: 23 Training Loss: 0.039575
Epoch: 24 Training Loss: 0.037012
Epoch: 25 Training Loss: 0.035101
Epoch: 26 Training Loss: 0.033372
Epoch: 27 Training Loss: 0.031650
Epoch: 28 Training Loss: 0.030668
Epoch: 29 Training Loss: 0.027745
Epoch: 30 Training Loss: 0.026697
Epoch: 31 Training Loss: 0.025421
Epoch: 32 Training Loss: 0.024818
Epoch: 33 Training Loss: 0.023445
Epoch: 34 Training Loss: 0.023131
Epoch: 35 Training Loss: 0.021146
Epoch: 36 Training Loss: 0.020650
Epoch: 37 Training Loss: 0.018963
Epoch: 38 Training Loss: 0.018673
Epoch: 39 Training Loss: 0.018759
Epoch: 40 Training Loss: 0.017963
Epoch: 41 Training Loss: 0.017360
Epoch: 42 Training Loss: 0.016782
Epoch: 43 Training Loss: 0.014614
Epoch: 44 Training Loss: 0.014893
Epoch: 45 Training Loss: 0.014049
Epoch: 46 Training Loss: 0.013888
Epoch: 47 Training Loss: 0.013382
Epoch: 48 Training Loss: 0.012637
Epoch: 49 Training Loss: 0.012014
Epoch: 50 Training Loss: 0.012430
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
a = torch.randn(1, 3)
###Output
_____no_output_____
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Processing...
Done!
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.819049
Epoch: 2 Training Loss: 0.321645
Epoch: 3 Training Loss: 0.249828
Epoch: 4 Training Loss: 0.204055
Epoch: 5 Training Loss: 0.171745
Epoch: 6 Training Loss: 0.147592
Epoch: 7 Training Loss: 0.128249
Epoch: 8 Training Loss: 0.114969
Epoch: 9 Training Loss: 0.104242
Epoch: 10 Training Loss: 0.093462
Epoch: 11 Training Loss: 0.086792
Epoch: 12 Training Loss: 0.079076
Epoch: 13 Training Loss: 0.073538
Epoch: 14 Training Loss: 0.067969
Epoch: 15 Training Loss: 0.063757
Epoch: 16 Training Loss: 0.058263
Epoch: 17 Training Loss: 0.053879
Epoch: 18 Training Loss: 0.050935
Epoch: 19 Training Loss: 0.048815
Epoch: 20 Training Loss: 0.045646
Epoch: 21 Training Loss: 0.042279
Epoch: 22 Training Loss: 0.041534
Epoch: 23 Training Loss: 0.037504
Epoch: 24 Training Loss: 0.036142
Epoch: 25 Training Loss: 0.034621
Epoch: 26 Training Loss: 0.032302
Epoch: 27 Training Loss: 0.030729
Epoch: 28 Training Loss: 0.028596
Epoch: 29 Training Loss: 0.027836
Epoch: 30 Training Loss: 0.026933
Epoch: 31 Training Loss: 0.025351
Epoch: 32 Training Loss: 0.024490
Epoch: 33 Training Loss: 0.022963
Epoch: 34 Training Loss: 0.022364
Epoch: 35 Training Loss: 0.020460
Epoch: 36 Training Loss: 0.020045
Epoch: 37 Training Loss: 0.019103
Epoch: 38 Training Loss: 0.018757
Epoch: 39 Training Loss: 0.017940
Epoch: 40 Training Loss: 0.017460
Epoch: 41 Training Loss: 0.016098
Epoch: 42 Training Loss: 0.015511
Epoch: 43 Training Loss: 0.015343
Epoch: 44 Training Loss: 0.014177
Epoch: 45 Training Loss: 0.013880
Epoch: 46 Training Loss: 0.013219
Epoch: 47 Training Loss: 0.013468
Epoch: 48 Training Loss: 0.012551
Epoch: 49 Training Loss: 0.012012
Epoch: 50 Training Loss: 0.011781
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.055764
Test Accuracy of 0: 99% (971/980)
Test Accuracy of 1: 99% (1126/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (991/1010)
Test Accuracy of 4: 98% (966/982)
Test Accuracy of 5: 98% (877/892)
Test Accuracy of 6: 97% (938/958)
Test Accuracy of 7: 98% (1008/1028)
Test Accuracy of 8: 97% (950/974)
Test Accuracy of 9: 98% (992/1009)
Test Accuracy (Overall): 98% (9831/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
isinstance(criterion, nn.Module)
if not isinstance(optimizer, torch.nn.Module):
print("a")
print(model.type)
isinstance(train_loader, torch.utils.data.DataLoader)
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.815295
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
_____no_output_____
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.852090
Epoch: 2 Training Loss: 0.325030
Epoch: 3 Training Loss: 0.250651
Epoch: 4 Training Loss: 0.203980
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
_____no_output_____
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2, inplace=False)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.834291
Epoch: 2 Training Loss: 0.322621
Epoch: 3 Training Loss: 0.248215
Epoch: 4 Training Loss: 0.200578
Epoch: 5 Training Loss: 0.168204
Epoch: 6 Training Loss: 0.146013
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
_____no_output_____
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
model.cuda(1)
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
data.cuda()
target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.227577
Epoch: 2 Training Loss: 0.186746
Epoch: 3 Training Loss: 0.160968
Epoch: 4 Training Loss: 0.137820
Epoch: 5 Training Loss: 0.122956
Epoch: 6 Training Loss: 0.110834
Epoch: 7 Training Loss: 0.098691
Epoch: 8 Training Loss: 0.090602
Epoch: 9 Training Loss: 0.083795
Epoch: 10 Training Loss: 0.075558
Epoch: 11 Training Loss: 0.072494
Epoch: 12 Training Loss: 0.066294
Epoch: 13 Training Loss: 0.062118
Epoch: 14 Training Loss: 0.057753
Epoch: 15 Training Loss: 0.054140
Epoch: 16 Training Loss: 0.051135
Epoch: 17 Training Loss: 0.047108
Epoch: 18 Training Loss: 0.044572
Epoch: 19 Training Loss: 0.042683
Epoch: 20 Training Loss: 0.040203
Epoch: 21 Training Loss: 0.037545
Epoch: 22 Training Loss: 0.034705
Epoch: 23 Training Loss: 0.033359
Epoch: 24 Training Loss: 0.031737
Epoch: 25 Training Loss: 0.030314
Epoch: 26 Training Loss: 0.028681
Epoch: 27 Training Loss: 0.027776
Epoch: 28 Training Loss: 0.026859
Epoch: 29 Training Loss: 0.024807
Epoch: 30 Training Loss: 0.023255
Epoch: 31 Training Loss: 0.022459
Epoch: 32 Training Loss: 0.021736
Epoch: 33 Training Loss: 0.020226
Epoch: 34 Training Loss: 0.019968
Epoch: 35 Training Loss: 0.019678
Epoch: 36 Training Loss: 0.018569
Epoch: 37 Training Loss: 0.016381
Epoch: 38 Training Loss: 0.016496
Epoch: 39 Training Loss: 0.016199
Epoch: 40 Training Loss: 0.014987
Epoch: 41 Training Loss: 0.015019
Epoch: 42 Training Loss: 0.014120
Epoch: 43 Training Loss: 0.013636
Epoch: 44 Training Loss: 0.014085
Epoch: 45 Training Loss: 0.012403
Epoch: 46 Training Loss: 0.012268
Epoch: 47 Training Loss: 0.012353
Epoch: 48 Training Loss: 0.011792
Epoch: 49 Training Loss: 0.012551
Epoch: 50 Training Loss: 0.010580
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.055648
Test Accuracy of 0: 98% (970/980)
Test Accuracy of 1: 99% (1126/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (969/982)
Test Accuracy of 5: 98% (875/892)
Test Accuracy of 6: 98% (943/958)
Test Accuracy of 7: 98% (1008/1028)
Test Accuracy of 8: 97% (948/974)
Test Accuracy of 9: 97% (987/1009)
Test Accuracy (Overall): 98% (9832/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
# The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection
# Run this script to enable the datasets download
# Reference: https://github.com/pytorch/vision/issues/1938
from six.moves import urllib
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'Mozilla/5.0')]
urllib.request.install_opener(opener)
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
/home/oriol/exs/deep-learning-v2-pytorch/venv/lib/python3.8/site-packages/torchvision/datasets/mnist.py:498: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program. (Triggered internally at ../torch/csrc/utils/tensor_numpy.cpp:180.)
return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s)
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
/tmp/ipykernel_2795/611598023.py:12: MatplotlibDeprecationWarning: Passing non-integers as three-element position specification is deprecated since 3.3 and will be removed two minor releases later.
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2, inplace=False)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833544
Epoch: 2 Training Loss: 0.321996
Epoch: 3 Training Loss: 0.247905
Epoch: 4 Training Loss: 0.201408
Epoch: 5 Training Loss: 0.169627
Epoch: 6 Training Loss: 0.147488
Epoch: 7 Training Loss: 0.129424
Epoch: 8 Training Loss: 0.116433
Epoch: 9 Training Loss: 0.104333
Epoch: 10 Training Loss: 0.094504
Epoch: 11 Training Loss: 0.085769
Epoch: 12 Training Loss: 0.080728
Epoch: 13 Training Loss: 0.073689
Epoch: 14 Training Loss: 0.067905
Epoch: 15 Training Loss: 0.063251
Epoch: 16 Training Loss: 0.058666
Epoch: 17 Training Loss: 0.055106
Epoch: 18 Training Loss: 0.050979
Epoch: 19 Training Loss: 0.048491
Epoch: 20 Training Loss: 0.046173
Epoch: 21 Training Loss: 0.044311
Epoch: 22 Training Loss: 0.041405
Epoch: 23 Training Loss: 0.038702
Epoch: 24 Training Loss: 0.036634
Epoch: 25 Training Loss: 0.035159
Epoch: 26 Training Loss: 0.033605
Epoch: 27 Training Loss: 0.030255
Epoch: 28 Training Loss: 0.029026
Epoch: 29 Training Loss: 0.028722
Epoch: 30 Training Loss: 0.027026
Epoch: 31 Training Loss: 0.026134
Epoch: 32 Training Loss: 0.022992
Epoch: 33 Training Loss: 0.023809
Epoch: 34 Training Loss: 0.022347
Epoch: 35 Training Loss: 0.021212
Epoch: 36 Training Loss: 0.020292
Epoch: 37 Training Loss: 0.019413
Epoch: 38 Training Loss: 0.019758
Epoch: 39 Training Loss: 0.017851
Epoch: 40 Training Loss: 0.017023
Epoch: 41 Training Loss: 0.016846
Epoch: 42 Training Loss: 0.016187
Epoch: 43 Training Loss: 0.015530
Epoch: 44 Training Loss: 0.014553
Epoch: 45 Training Loss: 0.014781
Epoch: 46 Training Loss: 0.013546
Epoch: 47 Training Loss: 0.013328
Epoch: 48 Training Loss: 0.012698
Epoch: 49 Training Loss: 0.012012
Epoch: 50 Training Loss: 0.012588
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for training
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.052876
Test Accuracy of 0: 99% (972/980)
Test Accuracy of 1: 99% (1127/1135)
Test Accuracy of 2: 98% (1012/1032)
Test Accuracy of 3: 98% (992/1010)
Test Accuracy of 4: 98% (968/982)
Test Accuracy of 5: 98% (875/892)
Test Accuracy of 6: 98% (946/958)
Test Accuracy of 7: 98% (1010/1028)
Test Accuracy of 8: 97% (949/974)
Test Accuracy of 9: 98% (990/1009)
Test Accuracy (Overall): 98% (9841/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.dataset)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____
###Markdown
Multi-Layer Perceptron, MNIST---In this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.The process will be broken down into the following steps:>1. Load and visualize the data2. Define a neural network3. Train the model4. Evaluate the performance of our trained model on a test dataset!Before we begin, we have to import the necessary libraries for working with data and PyTorch.
###Code
# import libraries
import torch
import numpy as np
###Output
_____no_output_____
###Markdown
--- Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)Downloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.This cell will create DataLoaders for each of our datasets.
###Code
from torchvision import datasets
import torchvision.transforms as transforms
# number of subprocesses to use for data loading
num_workers = 0
# how many samples per batch to load
batch_size = 20
# convert data to torch.FloatTensor
transform = transforms.ToTensor()
# choose the training and test datasets
train_data = datasets.MNIST(root='data', train=True,
download=True, transform=transform)
test_data = datasets.MNIST(root='data', train=False,
download=True, transform=transform)
# prepare data loaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,
num_workers=num_workers)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size,
num_workers=num_workers)
###Output
_____no_output_____
###Markdown
Visualize a Batch of Training DataThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.
###Code
import matplotlib.pyplot as plt
%matplotlib inline
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = dataiter.next()
images = images.numpy()
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
# print out the correct label for each image
# .item() gets the value contained in a Tensor
ax.set_title(str(labels[idx].item()))
###Output
_____no_output_____
###Markdown
View an Image in More Detail
###Code
img = np.squeeze(images[1])
fig = plt.figure(figsize = (12,12))
ax = fig.add_subplot(111)
ax.imshow(img, cmap='gray')
width, height = img.shape
thresh = img.max()/2.5
for x in range(width):
for y in range(height):
val = round(img[x][y],2) if img[x][y] !=0 else 0
ax.annotate(str(val), xy=(y,x),
horizontalalignment='center',
verticalalignment='center',
color='white' if img[x][y]<thresh else 'black')
###Output
_____no_output_____
###Markdown
--- Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)The architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.
###Code
import torch.nn as nn
import torch.nn.functional as F
# define the NN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# number of hidden nodes in each layer (512)
hidden_1 = 512
hidden_2 = 512
# linear layer (784 -> hidden_1)
self.fc1 = nn.Linear(28 * 28, hidden_1)
# linear layer (n_hidden -> hidden_2)
self.fc2 = nn.Linear(hidden_1, hidden_2)
# linear layer (n_hidden -> 10)
self.fc3 = nn.Linear(hidden_2, 10)
# dropout layer (p=0.2)
# dropout prevents overfitting of data
self.dropout = nn.Dropout(0.2)
def forward(self, x):
# flatten image input
x = x.view(-1, 28 * 28)
# add hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add hidden layer, with relu activation function
x = F.relu(self.fc2(x))
# add dropout layer
x = self.dropout(x)
# add output layer
x = self.fc3(x)
return x
# initialize the NN
model = Net()
print(model)
###Output
Net(
(fc1): Linear(in_features=784, out_features=512, bias=True)
(fc2): Linear(in_features=512, out_features=512, bias=True)
(fc3): Linear(in_features=512, out_features=10, bias=True)
(dropout): Dropout(p=0.2)
)
###Markdown
Specify [Loss Function](http://pytorch.org/docs/stable/nn.htmlloss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)It's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.
###Code
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
###Output
_____no_output_____
###Markdown
--- Train the NetworkThe steps for training/learning from a batch of data are described in the comments below:1. Clear the gradients of all optimized variables2. Forward pass: compute predicted outputs by passing inputs to the model3. Calculate the loss4. Backward pass: compute gradient of the loss with respect to model parameters5. Perform a single optimization step (parameter update)6. Update average training lossThe following loop trains for 50 epochs; take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data.
###Code
# number of epochs to train the model
n_epochs = 50
model.train() # prep model for training
for epoch in range(n_epochs):
# monitor training loss
train_loss = 0.0
###################
# train the model #
###################
for data, target in train_loader:
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update running training loss
train_loss += loss.item()*data.size(0)
# print training statistics
# calculate average loss over an epoch
train_loss = train_loss/len(train_loader.sampler)
print('Epoch: {} \tTraining Loss: {:.6f}'.format(
epoch+1,
train_loss
))
###Output
Epoch: 1 Training Loss: 0.833473
Epoch: 2 Training Loss: 0.327687
Epoch: 3 Training Loss: 0.253410
Epoch: 4 Training Loss: 0.207910
Epoch: 5 Training Loss: 0.175181
Epoch: 6 Training Loss: 0.150914
Epoch: 7 Training Loss: 0.132632
Epoch: 8 Training Loss: 0.118618
Epoch: 9 Training Loss: 0.106204
Epoch: 10 Training Loss: 0.096267
Epoch: 11 Training Loss: 0.087913
Epoch: 12 Training Loss: 0.080274
Epoch: 13 Training Loss: 0.074449
Epoch: 14 Training Loss: 0.068821
Epoch: 15 Training Loss: 0.064086
Epoch: 16 Training Loss: 0.059503
Epoch: 17 Training Loss: 0.055954
Epoch: 18 Training Loss: 0.053344
Epoch: 19 Training Loss: 0.049000
Epoch: 20 Training Loss: 0.045827
Epoch: 21 Training Loss: 0.043791
Epoch: 22 Training Loss: 0.040171
Epoch: 23 Training Loss: 0.039099
Epoch: 24 Training Loss: 0.036421
Epoch: 25 Training Loss: 0.035540
Epoch: 26 Training Loss: 0.032844
Epoch: 27 Training Loss: 0.031552
Epoch: 28 Training Loss: 0.029653
Epoch: 29 Training Loss: 0.027701
Epoch: 30 Training Loss: 0.026159
Epoch: 31 Training Loss: 0.025966
Epoch: 32 Training Loss: 0.025283
Epoch: 33 Training Loss: 0.024035
Epoch: 34 Training Loss: 0.022611
Epoch: 35 Training Loss: 0.020826
Epoch: 36 Training Loss: 0.019852
Epoch: 37 Training Loss: 0.019876
Epoch: 38 Training Loss: 0.018899
Epoch: 39 Training Loss: 0.018174
Epoch: 40 Training Loss: 0.017065
Epoch: 41 Training Loss: 0.016415
Epoch: 42 Training Loss: 0.016385
Epoch: 43 Training Loss: 0.015594
Epoch: 44 Training Loss: 0.014499
Epoch: 45 Training Loss: 0.013917
Epoch: 46 Training Loss: 0.014068
Epoch: 47 Training Loss: 0.013215
Epoch: 48 Training Loss: 0.012838
Epoch: 49 Training Loss: 0.012641
Epoch: 50 Training Loss: 0.012142
###Markdown
--- Test the Trained NetworkFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.
###Code
# initialize lists to monitor test loss and accuracy
test_loss = 0.0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
model.eval() # prep model for evaluation
for data, target in test_loader:
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# calculate the loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
# compare predictions to true label
correct = np.squeeze(pred.eq(target.data.view_as(pred)))
# calculate test accuracy for each object class
for i in range(len(target)):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# calculate and print avg test loss
test_loss = test_loss/len(test_loader.sampler)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(10):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
str(i), 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (
100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
###Output
Test Loss: 0.071405
Test Accuracy of 0: 98% (968/980)
Test Accuracy of 1: 98% (1123/1135)
Test Accuracy of 2: 97% (1008/1032)
Test Accuracy of 3: 98% (994/1010)
Test Accuracy of 4: 98% (965/982)
Test Accuracy of 5: 98% (878/892)
Test Accuracy of 6: 97% (935/958)
Test Accuracy of 7: 96% (997/1028)
Test Accuracy of 8: 96% (941/974)
Test Accuracy of 9: 97% (982/1009)
Test Accuracy (Overall): 97% (9791/10000)
###Markdown
Visualize Sample Test ResultsThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.
###Code
# obtain one batch of test images
dataiter = iter(test_loader)
images, labels = dataiter.next()
# get sample outputs
output = model(images)
# convert output probabilities to predicted class
_, preds = torch.max(output, 1)
# prep images for display
images = images.numpy()
# plot the images in the batch, along with predicted and true labels
fig = plt.figure(figsize=(25, 4))
for idx in np.arange(20):
ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])
ax.imshow(np.squeeze(images[idx]), cmap='gray')
ax.set_title("{} ({})".format(str(preds[idx].item()), str(labels[idx].item())),
color=("green" if preds[idx]==labels[idx] else "red"))
###Output
_____no_output_____ |
notebooks/lightgbm-minimal.ipynb | ###Markdown
Scikit-Learn style API
###Code
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
print('Feature importances:', list(gbm.feature_importances_))
###Output
Feature importances: [23, 7, 0, 33, 5, 56, 9, 1, 1, 21, 2, 5, 1, 19, 9, 6, 1, 10, 4, 10, 0, 31, 61, 4, 48, 102, 52, 79]
###Markdown
Advanced Example
###Code
import json
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
try:
import cPickle as pickle
except BaseException:
import pickle
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../../LightGBM/examples/binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../../LightGBM/examples/binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../../LightGBM/examples/binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../../LightGBM/examples/binary_classification/binary.test.weight', header=None)[0]
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
num_train, num_feature = X_train.shape
# create dataset for lightgbm
# if you want to re-use data, remember to set free_raw_data=False
lgb_train = lgb.Dataset(X_train, y_train,
weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
weight=W_test, free_raw_data=False)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# generate a feature name
feature_name = ['feature_' + str(col) for col in range(num_feature)]
print('Start training...')
# feature_name and categorical_feature
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train, # eval training data
feature_name=feature_name,
categorical_feature=[21])
# check feature name
print('Finish first 10 rounds...')
print('7th feature name is:', repr(lgb_train.feature_name[6]))
# save model to file
gbm.save_model('model.txt')
# dump model to JSON (and save to file)
print('Dump model to JSON...')
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
# feature names
print('Feature names:', gbm.feature_name())
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
# load model to predict
print('Load model to predict')
bst = lgb.Booster(model_file='model.txt')
# can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test)
# eval with loaded model
print('The rmse of loaded model\'s prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
# dump model with pickle
with open('model.pkl', 'wb') as fout:
pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
print('The rmse of pickled model\'s prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
# continue training
# init_model accepts:
# 1. model file name
# 2. Booster()
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model='model.txt',
valid_sets=lgb_eval)
print('Finish 10 - 20 rounds with model file...')
# decay learning rates
# learning_rates accepts:
# 1. list/tuple with length = num_boost_round
# 2. function(curr_iter)
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter),
valid_sets=lgb_eval)
print('Finish 20 - 30 rounds with decay learning rates...')
# change other parameters during training
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
valid_sets=lgb_eval,
callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
print('Finish 30 - 40 rounds with changing bagging_fraction...')
# self-defined objective function
# f(preds: array, train_data: Dataset) -> grad: array, hess: array
# log likelihood loss
def loglikelood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
# self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# binary error
def binary_error(preds, train_data):
labels = train_data.get_label()
return 'error', np.mean(labels != (preds > 0.5)), False
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelood,
feval=binary_error,
valid_sets=lgb_eval)
print('Finish 40 - 50 rounds with self-defined objective function and eval metric...')
print('Start a new training job...')
# callback
def reset_metrics():
def callback(env):
lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
if env.iteration - env.begin_iteration == 5:
print('Add a new valid dataset at iteration 5...')
env.model.add_valid(lgb_eval_new, 'new valid')
callback.before_iteration = True
callback.order = 0
return callback
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
callbacks=[reset_metrics()])
print('Finish first 10 rounds with callback function...')
###Output
Load data...
Start training...
[1] training's binary_logloss: 0.680298
[2] training's binary_logloss: 0.672021
[3] training's binary_logloss: 0.664444
[4] training's binary_logloss: 0.655536
[5] training's binary_logloss: 0.647375
[6] training's binary_logloss: 0.640788
[7] training's binary_logloss: 0.635012
[8] training's binary_logloss: 0.628454
[9] training's binary_logloss: 0.622423
[10] training's binary_logloss: 0.616808
Finish first 10 rounds...
7th feature name is: 'feature_6'
Dump model to JSON...
Feature names: ['feature_0', 'feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5', 'feature_6', 'feature_7', 'feature_8', 'feature_9', 'feature_10', 'feature_11', 'feature_12', 'feature_13', 'feature_14', 'feature_15', 'feature_16', 'feature_17', 'feature_18', 'feature_19', 'feature_20', 'feature_21', 'feature_22', 'feature_23', 'feature_24', 'feature_25', 'feature_26', 'feature_27']
Feature importances: [8, 4, 0, 19, 8, 36, 3, 0, 2, 10, 5, 1, 0, 9, 5, 3, 0, 2, 2, 5, 1, 0, 35, 3, 28, 45, 31, 35]
Load model to predict
The rmse of loaded model's prediction is: 0.461818980951
The rmse of pickled model's prediction is: 0.46989528982
[11] valid_0's binary_logloss: 0.613941
[12] valid_0's binary_logloss: 0.610317
[13] valid_0's binary_logloss: 0.606257
[14] valid_0's binary_logloss: 0.601789
[15] valid_0's binary_logloss: 0.597803
[16] valid_0's binary_logloss: 0.594579
[17] valid_0's binary_logloss: 0.590794
[18] valid_0's binary_logloss: 0.58741
[19] valid_0's binary_logloss: 0.584296
[20] valid_0's binary_logloss: 0.581739
Finish 10 - 20 rounds with model file...
[21] valid_0's binary_logloss: 0.613941
[22] valid_0's binary_logloss: 0.610352
[23] valid_0's binary_logloss: 0.60637
[24] valid_0's binary_logloss: 0.602024
[25] valid_0's binary_logloss: 0.598221
[26] valid_0's binary_logloss: 0.595039
[27] valid_0's binary_logloss: 0.591429
[28] valid_0's binary_logloss: 0.588352
[29] valid_0's binary_logloss: 0.585486
[30] valid_0's binary_logloss: 0.582613
Finish 20 - 30 rounds with decay learning rates...
[31] valid_0's binary_logloss: 0.614007
[32] valid_0's binary_logloss: 0.610266
[33] valid_0's binary_logloss: 0.6069
[34] valid_0's binary_logloss: 0.60323
[35] valid_0's binary_logloss: 0.599283
[36] valid_0's binary_logloss: 0.597029
[37] valid_0's binary_logloss: 0.593506
[38] valid_0's binary_logloss: 0.590785
[39] valid_0's binary_logloss: 0.587927
[40] valid_0's binary_logloss: 0.585659
Finish 30 - 40 rounds with changing bagging_fraction...
[41] valid_0's binary_logloss: 4.5024 valid_0's error: 0.406
[42] valid_0's binary_logloss: 4.42402 valid_0's error: 0.394
[43] valid_0's binary_logloss: 4.28182 valid_0's error: 0.396
[44] valid_0's binary_logloss: 4.45371 valid_0's error: 0.386
[45] valid_0's binary_logloss: 4.50148 valid_0's error: 0.38
[46] valid_0's binary_logloss: 4.86515 valid_0's error: 0.376
[47] valid_0's binary_logloss: 4.73269 valid_0's error: 0.372
[48] valid_0's binary_logloss: 4.91303 valid_0's error: 0.37
[49] valid_0's binary_logloss: 4.84746 valid_0's error: 0.376
[50] valid_0's binary_logloss: 5.09557 valid_0's error: 0.368
Finish 40 - 50 rounds with self-defined objective function and eval metric...
Start a new training job...
[1] training's binary_logloss: 0.611569
[2] training's binary_logloss: 0.607464
[3] training's binary_logloss: 0.603421
[4] training's binary_logloss: 0.598912
[5] training's binary_logloss: 0.594808
Add a new valid dataset at iteration 5...
[6] training's binary_logloss: 0.590757 new valid's binary_logloss: 0.663743
[7] training's binary_logloss: 0.587491 new valid's binary_logloss: 0.659936
[8] training's binary_logloss: 0.5838 new valid's binary_logloss: 0.655852
[9] training's binary_logloss: 0.58033 new valid's binary_logloss: 0.651817
[10] training's binary_logloss: 0.576876 new valid's binary_logloss: 0.648346
Finish first 10 rounds with callback function...
###Markdown
Scikit-Learn style API
###Code
gbm = lgb.LGBMRegressor(objective='regression',
num_leaves=31,
learning_rate=0.05,
n_estimators=20)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric='l1',
early_stopping_rounds=5)
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
print('Feature importances:', list(gbm.feature_importances_))
###Output
Feature importances: [23, 7, 0, 33, 5, 56, 9, 1, 1, 21, 2, 5, 1, 19, 9, 6, 1, 10, 4, 10, 0, 31, 61, 4, 48, 102, 52, 79]
###Markdown
Advanced Example
###Code
import json
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
try:
import cPickle as pickle
except BaseException:
import pickle
# load or create your dataset
print('Load data...')
df_train = pd.read_csv('../../LightGBM/examples/binary_classification/binary.train', header=None, sep='\t')
df_test = pd.read_csv('../../LightGBM/examples/binary_classification/binary.test', header=None, sep='\t')
W_train = pd.read_csv('../../LightGBM/examples/binary_classification/binary.train.weight', header=None)[0]
W_test = pd.read_csv('../../LightGBM/examples/binary_classification/binary.test.weight', header=None)[0]
y_train = df_train[0].values
y_test = df_test[0].values
X_train = df_train.drop(0, axis=1).values
X_test = df_test.drop(0, axis=1).values
num_train, num_feature = X_train.shape
# create dataset for lightgbm
# if you want to re-use data, remember to set free_raw_data=False
lgb_train = lgb.Dataset(X_train, y_train,
weight=W_train, free_raw_data=False)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train,
weight=W_test, free_raw_data=False)
# specify your configurations as a dict
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'binary_logloss',
'num_leaves': 31,
'learning_rate': 0.05,
'feature_fraction': 0.9,
'bagging_fraction': 0.8,
'bagging_freq': 5,
'verbose': 0
}
# generate a feature name
feature_name = ['feature_' + str(col) for col in range(num_feature)]
print('Start training...')
# feature_name and categorical_feature
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train, # eval training data
feature_name=feature_name,
categorical_feature=[21])
# check feature name
print('Finish first 10 rounds...')
print('7th feature name is:', repr(lgb_train.feature_name[6]))
# save model to file
gbm.save_model('model.txt')
# dump model to JSON (and save to file)
print('Dump model to JSON...')
model_json = gbm.dump_model()
with open('model.json', 'w+') as f:
json.dump(model_json, f, indent=4)
# feature names
print('Feature names:', gbm.feature_name())
# feature importances
print('Feature importances:', list(gbm.feature_importance()))
# load model to predict
print('Load model to predict')
bst = lgb.Booster(model_file='model.txt')
# can only predict with the best iteration (or the saving iteration)
y_pred = bst.predict(X_test)
# eval with loaded model
print('The rmse of loaded model\'s prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
# dump model with pickle
with open('model.pkl', 'wb') as fout:
pickle.dump(gbm, fout)
# load model with pickle to predict
with open('model.pkl', 'rb') as fin:
pkl_bst = pickle.load(fin)
# can predict with any iteration when loaded in pickle way
y_pred = pkl_bst.predict(X_test, num_iteration=7)
# eval with loaded model
print('The rmse of pickled model\'s prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)
# continue training
# init_model accepts:
# 1. model file name
# 2. Booster()
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model='model.txt',
valid_sets=lgb_eval)
print('Finish 10 - 20 rounds with model file...')
# decay learning rates
# learning_rates accepts:
# 1. list/tuple with length = num_boost_round
# 2. function(curr_iter)
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
learning_rates=lambda iter: 0.05 * (0.99 ** iter),
valid_sets=lgb_eval)
print('Finish 20 - 30 rounds with decay learning rates...')
# change other parameters during training
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
valid_sets=lgb_eval,
callbacks=[lgb.reset_parameter(bagging_fraction=[0.7] * 5 + [0.6] * 5)])
print('Finish 30 - 40 rounds with changing bagging_fraction...')
# self-defined objective function
# f(preds: array, train_data: Dataset) -> grad: array, hess: array
# log likelihood loss
def loglikelood(preds, train_data):
labels = train_data.get_label()
preds = 1. / (1. + np.exp(-preds))
grad = preds - labels
hess = preds * (1. - preds)
return grad, hess
# self-defined eval metric
# f(preds: array, train_data: Dataset) -> name: string, eval_result: float, is_higher_better: bool
# binary error
def binary_error(preds, train_data):
labels = train_data.get_label()
return 'error', np.mean(labels != (preds > 0.5)), False
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
init_model=gbm,
fobj=loglikelood,
feval=binary_error,
valid_sets=lgb_eval)
print('Finish 40 - 50 rounds with self-defined objective function and eval metric...')
print('Start a new training job...')
# callback
def reset_metrics():
def callback(env):
lgb_eval_new = lgb.Dataset(X_test, y_test, reference=lgb_train)
if env.iteration - env.begin_iteration == 5:
print('Add a new valid dataset at iteration 5...')
env.model.add_valid(lgb_eval_new, 'new valid')
callback.before_iteration = True
callback.order = 0
return callback
gbm = lgb.train(params,
lgb_train,
num_boost_round=10,
valid_sets=lgb_train,
callbacks=[reset_metrics()])
print('Finish first 10 rounds with callback function...')
###Output
Load data...
Start training...
[1] training's binary_logloss: 0.680298
[2] training's binary_logloss: 0.672021
[3] training's binary_logloss: 0.664444
[4] training's binary_logloss: 0.655536
[5] training's binary_logloss: 0.647375
[6] training's binary_logloss: 0.640788
[7] training's binary_logloss: 0.635012
[8] training's binary_logloss: 0.628454
[9] training's binary_logloss: 0.622423
[10] training's binary_logloss: 0.616808
Finish first 10 rounds...
7th feature name is: 'feature_6'
Dump model to JSON...
Feature names: ['feature_0', 'feature_1', 'feature_2', 'feature_3', 'feature_4', 'feature_5', 'feature_6', 'feature_7', 'feature_8', 'feature_9', 'feature_10', 'feature_11', 'feature_12', 'feature_13', 'feature_14', 'feature_15', 'feature_16', 'feature_17', 'feature_18', 'feature_19', 'feature_20', 'feature_21', 'feature_22', 'feature_23', 'feature_24', 'feature_25', 'feature_26', 'feature_27']
Feature importances: [8, 4, 0, 19, 8, 36, 3, 0, 2, 10, 5, 1, 0, 9, 5, 3, 0, 2, 2, 5, 1, 0, 35, 3, 28, 45, 31, 35]
Load model to predict
The rmse of loaded model's prediction is: 0.461818980951
The rmse of pickled model's prediction is: 0.46989528982
[11] valid_0's binary_logloss: 0.613941
[12] valid_0's binary_logloss: 0.610317
[13] valid_0's binary_logloss: 0.606257
[14] valid_0's binary_logloss: 0.601789
[15] valid_0's binary_logloss: 0.597803
[16] valid_0's binary_logloss: 0.594579
[17] valid_0's binary_logloss: 0.590794
[18] valid_0's binary_logloss: 0.58741
[19] valid_0's binary_logloss: 0.584296
[20] valid_0's binary_logloss: 0.581739
Finish 10 - 20 rounds with model file...
[21] valid_0's binary_logloss: 0.613941
[22] valid_0's binary_logloss: 0.610352
[23] valid_0's binary_logloss: 0.60637
[24] valid_0's binary_logloss: 0.602024
[25] valid_0's binary_logloss: 0.598221
[26] valid_0's binary_logloss: 0.595039
[27] valid_0's binary_logloss: 0.591429
[28] valid_0's binary_logloss: 0.588352
[29] valid_0's binary_logloss: 0.585486
[30] valid_0's binary_logloss: 0.582613
Finish 20 - 30 rounds with decay learning rates...
[31] valid_0's binary_logloss: 0.614007
[32] valid_0's binary_logloss: 0.610266
[33] valid_0's binary_logloss: 0.6069
[34] valid_0's binary_logloss: 0.60323
[35] valid_0's binary_logloss: 0.599283
[36] valid_0's binary_logloss: 0.597029
[37] valid_0's binary_logloss: 0.593506
[38] valid_0's binary_logloss: 0.590785
[39] valid_0's binary_logloss: 0.587927
[40] valid_0's binary_logloss: 0.585659
Finish 30 - 40 rounds with changing bagging_fraction...
[41] valid_0's binary_logloss: 4.5024 valid_0's error: 0.406
[42] valid_0's binary_logloss: 4.42402 valid_0's error: 0.394
[43] valid_0's binary_logloss: 4.28182 valid_0's error: 0.396
[44] valid_0's binary_logloss: 4.45371 valid_0's error: 0.386
[45] valid_0's binary_logloss: 4.50148 valid_0's error: 0.38
[46] valid_0's binary_logloss: 4.86515 valid_0's error: 0.376
[47] valid_0's binary_logloss: 4.73269 valid_0's error: 0.372
[48] valid_0's binary_logloss: 4.91303 valid_0's error: 0.37
[49] valid_0's binary_logloss: 4.84746 valid_0's error: 0.376
[50] valid_0's binary_logloss: 5.09557 valid_0's error: 0.368
Finish 40 - 50 rounds with self-defined objective function and eval metric...
Start a new training job...
[1] training's binary_logloss: 0.611569
[2] training's binary_logloss: 0.607464
[3] training's binary_logloss: 0.603421
[4] training's binary_logloss: 0.598912
[5] training's binary_logloss: 0.594808
Add a new valid dataset at iteration 5...
[6] training's binary_logloss: 0.590757 new valid's binary_logloss: 0.663743
[7] training's binary_logloss: 0.587491 new valid's binary_logloss: 0.659936
[8] training's binary_logloss: 0.5838 new valid's binary_logloss: 0.655852
[9] training's binary_logloss: 0.58033 new valid's binary_logloss: 0.651817
[10] training's binary_logloss: 0.576876 new valid's binary_logloss: 0.648346
Finish first 10 rounds with callback function...
|
notebooks/draw_prominent_modes_basestack_1.ipynb | ###Markdown
Part 1: Prepare s-agent
###Code
host = 'gcgc_21mer'
interval_time = 500
s_agent = StackMeanModeAgent(host, rootfolder, interval_time)
s_agent.load_mean_mode_laplacian_from_npy()
s_agent.eigen_decompose()
s_agent.initialize_nodes_information()
s_agent.split_node_list_into_two_strand()
s_agent.set_benchmark_array()
s_agent.set_strand_array()
###Output
Load laplacian_mat from /home/ytcdata/bigtraj_fluctmatch/500ns/gcgc_21mer/mean_mode_npy/laplacian.npy
Thare are 399 nodes.
Total number of nodes: 399
There are 201 eigenvectors belonging to STRAND1.
There are 198 eigenvectors belonging to STRAND2.
Sum of two strands: 399
###Markdown
Part 2: Prepare b-agent
###Code
base_type = 'C' # 'A', 'T', 'C', 'G'
strand_id = 'STRAND2' # 'STRAND1', 'STRAND2'
b_agent = BaseTypeEigenvector(host, base_type, strand_id, s_agent)
#b_agent.get_d_idx()
###Output
_____no_output_____
###Markdown
Part 3: Assign the index of eigenvector and Plot
###Code
eigv_id = 29
strand_id_by_eigv_id = s_agent.decide_eigenvector_strand_by_strand_array(eigv_id)
if strand_id_by_eigv_id != strand_id:
print('STRAND-ID is not consistent!!!!!')
else:
print('STRAND-ID is consistent.')
figsize = (24, 6)
a_agent = AtomSeparatePlot(host, base_type, figsize)
ylims = (-0.570, 0.477) # None or (ymin, ymax)
ylims = None
fig, axes = a_agent.plot_eigenvector_by_eigv_id(eigv_id, s_agent, b_agent, ylims)
plt.tight_layout()
#plt.savefig(f'/home/yizaochen/Desktop/drawzone_temp/{host}-{eigv_id}.png', dpi=200, transparent=False)
plt.show()
eigv_id = 15
strand_id_by_eigv_id = s_agent.decide_eigenvector_strand_by_strand_array(eigv_id)
if strand_id_by_eigv_id != strand_id:
print('STRAND-ID is not consistent!!!!!')
else:
print('STRAND-ID is consistent.')
figsize = (24, 6)
a_agent = AtomSeparatePlot(host, base_type, figsize)
ylims = (-0.570, 0.477) # None or (ymin, ymax)
ylims = None
fig, axes = a_agent.plot_eigenvector_by_eigv_id(eigv_id, s_agent, b_agent, ylims)
plt.tight_layout()
plt.savefig(f'/home/yizaochen/Desktop/drawzone_temp/{host}-{eigv_id}.png', dpi=200, transparent=False)
plt.show()
###Output
STRAND-ID is consistent.
(-0.564, 0.361)
###Markdown
Part 4: Mixing-Plot
###Code
strand_id = 'STRAND2' # 'STRAND1', 'STRAND2'
b_agent = StrandEigenvector(host, strand_id, s_agent)
eigv_id = 19
strand_id_by_eigv_id = s_agent.decide_eigenvector_strand_by_strand_array(eigv_id)
if strand_id_by_eigv_id != strand_id:
print('STRAND-ID is not consistent!!!!!')
else:
print('STRAND-ID is consistent.')
figsize = (24, 6)
a_agent = AtomSeparatePlotPurPyr(host, figsize)
ylims = (-0.570, 0.477) # None or (ymin, ymax)
ylims = None
fig, axes = a_agent.plot_eigenvector_by_eigv_id(eigv_id, s_agent, b_agent, ylims)
plt.tight_layout()
plt.savefig(f'/home/yizaochen/Desktop/drawzone_temp/{host}-{eigv_id}.png', dpi=200, transparent=False)
plt.show()
###Output
STRAND-ID is consistent.
(-0.148, 0.583)
|
Code/Finetuning/0_finetune_dbrd_debias_bert__DEBIASING.ipynb | ###Markdown
Change per model:1. Model imports (e.g. BertTokenizer -> RobertaTokenizer) - model name2. File where bias_subspace is.3. Examples files - load_and_cache_examples("bert", tokenizer, "../../Rodrigo-data/Finetuning/dbrd/train")4. Change name of file_pickle_stats to save stats and dont override5. Where we save the model output_dir = "../../Rodrigo-data/Finetuning/dbrd/v2/bert_debiased/"
###Code
tokenizer = BertTokenizer.from_pretrained("wietsedv/bert-base-dutch-cased")
# tokenizer = RobertaTokenizer.from_pretrained("pdelobelle/robBERT-base")
model = BertForSequenceClassification.from_pretrained("wietsedv/bert-base-dutch-cased")
logging.info("loaded BERTJe")
###Output
INFO:debias_transformers.configuration_utils:loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/wietsedv/bert-base-dutch-cased/config.json from cache at /home/ubuntu/.cache/torch/transformers/6702c5c53edb76b65d71f73ff2d9811ba62f16257ea58e36dedceffd71290a6a.1a78bd120fe46d78b55efa59f4ffa1dafcc9242743ab9fd6629d1b56672c9119
INFO:debias_transformers.configuration_utils:Model config BertConfig {
"architectures": [
"BertForMaskedLM"
],
"attention_probs_dropout_prob": 0.1,
"gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 12,
"num_hidden_layers": 12,
"pad_token_id": 3,
"type_vocab_size": 2,
"vocab_size": 30000
}
INFO:debias_transformers.modeling_utils:loading weights file https://cdn.huggingface.co/wietsedv/bert-base-dutch-cased/pytorch_model.bin from cache at /home/ubuntu/.cache/torch/transformers/e5754f612ca0f16edba5b775fdddba806751f5e4b87c5e7f16cc0c8d8d17df4d.b7c03627733fd0712f078a4d3a31ad964550f50a6113efdf874ecbcf5ddf6b53
WARNING:debias_transformers.modeling_utils:Some weights of the model checkpoint at wietsedv/bert-base-dutch-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias']
- This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPretraining model).
- This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
WARNING:debias_transformers.modeling_utils:Some weights of BertForSequenceClassification were not initialized from the model checkpoint at wietsedv/bert-base-dutch-cased and are newly initialized: ['classifier.weight', 'classifier.bias']
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
INFO:root:loaded BERTJe
###Markdown
Create a training config
###Code
from train_config import Config
config = Config()
config.model_type = 'bert'
config.evaluate_dataset = "../../Rodrigo-data/Finetuning/dbrd/eval"
###Output
_____no_output_____
###Markdown
Create bias direction
###Code
#Get bias dir
# Rodrigo-data/bias_subspace/bias_subspace_k10_nl_large30000k.npy
# file_bias_subspace = "../../Rodrigo-data/bias_subspace/Robbert_ForFinetune-bias_subspace_k50_nl_large30000k" #Robbert bias
file_bias_subspace = "../../Rodrigo-data/bias_subspace/bias_subspace_k50_nl_large30000k" #Bert bias
bias_subspace = sen_debias.get_bias_direction(model,tokenizer, #reload from cache
from_cache = True,
filename_cache= file_bias_subspace)
bias_direction = bias_subspace[0] #just use the direction for testing
bias_direction = torch.tensor(bias_direction).to(torch.device("cuda"))
bias_direction.shape
###Output
_____no_output_____
###Markdown
Load in the dataWe use `load_and_cache_examples` to load in the training data, this will produce a tokenized version with torch tensors.For the test and evaluation sets, we'll use the following `evaluate` function, since we're in the end interested in a dataframe with all inputs and predictions.
###Code
def evaluate(dataset, model,debias,bias_dir):
df = pd.read_table(dataset + ".labels.txt", header=None, names=['labels'])
df['sentence'] = pd.read_table(dataset + ".sentences.txt", header=None, names=['sentence'])
model.eval() # disable dropout etc.
mask_padding_with_zero = True
block_size = 512
results = []
# file_bias_subspace = "../Rodrigo-data/bias_subspace/bias_subspace_k"+str(k_dim_file)+"_nl_large"+str(pairs_aumount)+"k"
# bias_dir = sen_debias.get_bias_direction(model_nl,tokenizer_nl,from_cache = True,filename_cache= file_bias_subspace)
for row in tqdm(df.iterrows(), total=len(df), mininterval=1, position=1, leave=True):
index = row[0]
sentence = row[1]['sentence']
label = row[1]['labels']
#tokens = roberta.encode(sentence)
tokenized_text = tokenizer.tokenize(sentence)[-block_size + 3: -1]
if len(tokenized_text) is 0: #avoid sentence that couldnt be tokenized in bertje
continue
tokenized_text = tokenizer.encode(tokenized_text)
# tokenized_text = tokenizer.encode(tokenizer.tokenize(sentence)[- block_size + 3 : -1])
input_mask = [1 if mask_padding_with_zero else 0] * len(tokenized_text)
pad_token = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
while len(tokenized_text) < block_size:
tokenized_text.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
#segment_ids.append(pad_token_segment_id)
#p_mask.append(1)
#self.examples.append([tokenizer.build_inputs_with_special_tokens(tokenized_text[0 : block_size]), [0], [0]])
batch = tuple(torch.tensor(t).to(torch.device("cuda")) for t in [tokenized_text[0 : block_size - 3], input_mask[0 : block_size- 3], [0], [1] if label else [0]])
inputs = {"input_ids": batch[0].unsqueeze(0), "attention_mask": batch[1].unsqueeze(0), "labels": batch[3].unsqueeze(0),
"remove_bias":debias,"bias_dir":bias_direction,
"token_type_ids" : batch[2].unsqueeze(0)
}
with torch.no_grad():
outputs = model(**inputs)
results.append({"true": label, "predicted": outputs[1][0].argmax().item()})
model.train() # make sure the model is back in training mode
return results
train_dataset = load_and_cache_examples("bert", tokenizer, "../../Rodrigo-data/Finetuning/dbrd/train")
model.train()
logging.info("Put BERT in training mode")
# You can use the following code to adjust the training size to experiment with the benefits of pretraining. It will not likely get better though ...
# train_dataset.examples = train_dataset.examples[:100]
# 46min after iteration 1:13
DEBIAS_BOOL = True #Used to indicate whether to debias or not accross the notebook
# Random interesting insight, num examples with bertje is 19525 while in robbert is 19528, this is probably due to some bad sentences
# in the data which robbert could tokenize while bert tokenizer kept crashing so had to avoid the tokenized sentences with 0 elements. -Rodrigo
RobBERTTrainer.train(config, train_dataset, model, tokenizer, evaluate,debias=DEBIAS_BOOL, bias_dir=bias_direction)
###Output
INFO:train:***** Running training *****
INFO:train: Num examples = 19525
INFO:train: Num Epochs = 4
INFO:train: Instantaneous batch size per GPU = 4
INFO:train: Total train batch size (w. parallel, distributed & accumulation) = 32
INFO:train: Gradient Accumulation steps = 8
INFO:train: Total optimization steps = 2000
Iteration: 100%|██████████| 4882/4882 [11:15<00:00, 7.22it/s, learning_rate=3.97e-5, loss=0.394, step=610]
0%| | 0/500 [00:00<?, ?it/s][A
11%|█▏ | 57/500 [00:01<00:07, 56.27it/s][A
22%|██▏ | 112/500 [00:02<00:06, 55.75it/s][A
33%|███▎ | 167/500 [00:03<00:06, 55.19it/s][A
45%|████▍ | 224/500 [00:04<00:04, 55.70it/s][A
56%|█████▌ | 279/500 [00:05<00:03, 55.31it/s][A
67%|██████▋ | 335/500 [00:06<00:02, 55.38it/s][A
78%|███████▊ | 388/500 [00:07<00:02, 54.27it/s][A
89%|████████▊ | 443/500 [00:08<00:01, 54.27it/s][A
100%|██████████| 500/500 [00:09<00:00, 54.71it/s]
INFO:train:Results on eval: {'eval_f1_macro': 0.9278415116166971, 'eval_acc_macro': 0.9278557114228457, 'eval_acc_overall': 0.9278557114228457}
Iteration: 100%|██████████| 4882/4882 [11:17<00:00, 7.21it/s, learning_rate=2.23e-5, loss=0.345, step=1220]
0%| | 0/500 [00:00<?, ?it/s][A
11%|█ | 55/500 [00:01<00:09, 45.94it/s][A
22%|██▏ | 110/500 [00:02<00:08, 48.31it/s][A
33%|███▎ | 165/500 [00:03<00:06, 49.94it/s][A
44%|████▍ | 222/500 [00:04<00:05, 51.80it/s][A
55%|█████▌ | 277/500 [00:05<00:04, 52.61it/s][A
67%|██████▋ | 333/500 [00:06<00:03, 53.55it/s][A
77%|███████▋ | 386/500 [00:07<00:02, 53.24it/s][A
88%|████████▊ | 441/500 [00:08<00:01, 53.46it/s][A
100%|██████████| 500/500 [00:09<00:00, 53.54it/s]
INFO:train:Results on eval: {'eval_f1_macro': 0.9619085499857374, 'eval_acc_macro': 0.9619238476953907, 'eval_acc_overall': 0.9619238476953907}
Iteration: 100%|██████████| 4882/4882 [11:16<00:00, 7.22it/s, learning_rate=4.86e-6, loss=0.234, step=1830]
0%| | 0/500 [00:00<?, ?it/s][A
11%|█ | 56/500 [00:01<00:07, 55.95it/s][A
22%|██▏ | 111/500 [00:02<00:07, 55.51it/s][A
33%|███▎ | 166/500 [00:03<00:06, 54.99it/s][A
44%|████▍ | 222/500 [00:04<00:05, 55.20it/s][A
55%|█████▌ | 277/500 [00:05<00:04, 54.89it/s][A
67%|██████▋ | 333/500 [00:06<00:03, 55.17it/s][A
77%|███████▋ | 387/500 [00:07<00:02, 54.46it/s][A
88%|████████▊ | 441/500 [00:08<00:01, 54.27it/s][A
100%|██████████| 500/500 [00:09<00:00, 54.60it/s]
INFO:train:Results on eval: {'eval_f1_macro': 0.9819636381161219, 'eval_acc_macro': 0.9819639278557114, 'eval_acc_overall': 0.9819639278557114}
Iteration: 28%|██▊ | 1367/4882 [03:09<08:06, 7.22it/s, learning_rate=0, loss=0.147, step=2001]
Epoch: 75%|███████▌ | 3/4 [37:26<12:28, 748.95s/it]
###Markdown
Evaluate our trained model
###Code
model.eval()
logging.info("putting model in eval mode")
results = pd.DataFrame(evaluate("../../Rodrigo-data/Finetuning/dbrd/test", model,debias=DEBIAS_BOOL, bias_dir=bias_direction))
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0, aspect="equal")
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.ylim(len(tick_marks) - 0.5, -0.5)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
cnf_matrix = confusion_matrix(results['true'], results['predicted'] ) # index to evaluate partial runs
np.set_printoptions(precision=2)
title="Confusion matrix"
# Plot normalized confusion matrix
fig = plt.figure(figsize=(5,5), dpi=100)
#fig.set_size_inches(6,6)
plot_confusion_matrix(cnf_matrix, classes=["Negative", "Positive"],normalize=False,
title=title, cmap=plt.cm.Blues)
cm = ConfusionMatrix(actual_vector=results['true'].values, predict_vector=results['predicted'].values )
cm.stat()
import pickle
# file_pickle_stats = "../../Rodrigo-data/Finetuning/dbrd/v2/cm_stat_bertje_debiased"
file_pickle_stats = "../../Rodrigo-data/Finetuning/dbrd/v2/cm_stat_bertje_debiased"
pickle.dump( cm, open(file_pickle_stats+".p", "wb" ))
st_test =pickle.load( open(file_pickle_stats+".p", "rb" ))
cm.save_csv(file_pickle_stats) #save csv for easy to read
###Output
_____no_output_____
###Markdown
Saving the model
###Code
from transformers import WEIGHTS_NAME, CONFIG_NAME
# output_dir = "../../Rodrigo-data/Finetuning/dbrd/v2/bert_debiased/"
output_dir = "../../Rodrigo-data/Finetuning/dbrd/v2/bert_debiased/"
# Step 1: Save a model, configuration and vocabulary that you have fine-tuned
# If we have a distributed model, save only the encapsulated model
# (it was wrapped in PyTorch DistributedDataParallel or DataParallel)
model_to_save = model.module if hasattr(model, 'module') else model
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
###Output
_____no_output_____ |
utils/read_npy.ipynb | ###Markdown
Read .npy and visualize (use it with python3 and matplotlib)
###Code
# %matplotlib
# %matplotlib notebook
# %matplotlib inline
## import statements
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from colour import Color
from sklearn.neighbors import KernelDensity
## Inputs
log_name = "log_airplane_se_skip"
dir_path = os.getcwd()
LOG_DIR = os.path.join(os.path.dirname(dir_path), "logs", log_name)
PRED_DIR = os.path.join(LOG_DIR, "preds.npy")
if not os.path.exists(PRED_DIR):
print("Run test.py first!")
# ! python test.py --model model_se_skip --model_path logs/log_airplane_se_skip/model.ckpt --category Airplane
## Read data
data = np.load(PRED_DIR)
## Save the first point cloud as obj file
data1 = data[0,:,:]
out_path_temp = os.path.join(os.path.dirname(PRED_DIR), "one_out_temp.txt")
out_path = os.path.join(os.path.dirname(PRED_DIR), "one_out.obj")
np.savetxt(out_path_temp, data1, delimiter=" ")
f = open(out_path_temp, "r")
f_out = open(out_path, "w")
lines = f.readlines()
for line in lines:
f_out.write("v " + line)
f.close()
f_out.close()
os.remove(out_path_temp)
print("Obj file is created at ", out_path)
## Slice the data
all_0 = data[:,0,:]
all_1 = data[:,1,:]
## Find most front vertex
num_of_models = data.shape[0]
# Create arrays to keep the info for all the models
max_z_ind_arr = np.empty((num_of_models,1), dtype="int32") # right
min_z_ind_arr = np.empty((num_of_models,1), dtype="int32") # left
max_y_ind_arr = np.empty((num_of_models,1), dtype="int32") # bottom
min_y_ind_arr = np.empty((num_of_models,1), dtype="int32") # top
max_x_ind_arr = np.empty((num_of_models,1), dtype="int32") # front
min_x_ind_arr = np.empty((num_of_models,1), dtype="int32") # back
for i in range(num_of_models):
id = i
max_z_ind_arr[i] = np.argmax(data[id, :, 2]) # right
min_z_ind_arr[i] = np.argmin(data[id, :, 2]) # left
max_y_ind_arr[i] = np.argmax(data[id, :, 1]) # bottom
min_y_ind_arr[i] = np.argmin(data[id, :, 1]) # top
max_x_ind_arr[i] = np.argmax(data[id, :, 0]) # front
min_x_ind_arr[i] = np.argmin(data[id, :, 0]) # back
# Choose the max values
max_z_ind = np.max(max_z_ind_arr) # right
print("Max z ind: ", max_z_ind)
min_z_ind = np.max(min_z_ind_arr) # left
print("Min z ind: ", min_z_ind)
max_y_ind = np.max(max_y_ind_arr) # bottom
print("Max y ind: ", max_y_ind)
min_y_ind = np.max(min_y_ind_arr) # top
print("Min y ind: ", min_y_ind)
max_x_ind = np.max(max_x_ind_arr) # front
print("Max x ind: ", max_x_ind)
min_x_ind = np.max(min_x_ind_arr) # back
print("Min x ind: ", min_x_ind)
# Add to dictionary
max_values = {}
max_values["front"] = max_x_ind
max_values["back"] = min_x_ind
max_values["right"] = max_z_ind
max_values["left"] = min_z_ind
max_values["top"] = min_y_ind
max_values["bottom"] = max_y_ind
###Output
Max z ind: 2031
Min z ind: 1894
Max y ind: 1924
Min y ind: 1968
Max x ind: 1937
Min x ind: 2037
###Markdown
Plots
###Code
## Plot some intermediate
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
ax.scatter3D(all_0[:,0], all_0[:,1], all_0[:,2])
## Show figure - Update
plt.show()
## Plot shapes
fig2 = plt.figure(dpi=100)
ax = plt.axes(projection='3d')
fig2.set_size_inches(6,6)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
id = 6
# ax.scatter3D(data[id,:,0], data[id,:,1], data[id,:,2], s=0.5, c="blue")
ax.scatter3D(data[id,:,0], data[id,:,2], data[id,:,1], s=0.5, c="blue") # rotated
## Plot second one on top
id2 = 5
ax.scatter3D(data[id2,:,0], data[id2,:,2], data[id2,:,1], s=0.5, c="red") # rotated
## Plot a choosen point idx for both
point_name = "right"
point = max_values[point_name]
ax.scatter3D(data[id,point,0], data[id,point,2], data[id,point,1], s=255, c="blue") # rotated
ax.scatter3D(data[id2,point,0], data[id2,point,2], data[id2,point,1], s=255, c="red") # rotated
plt.show()
# fig
## Save figure
fig_name_2 = "mesh_" + str(id) + "_mesh_" + str(id2) + "_" + point_name + ".png"
fig2.savefig(os.path.join(LOG_DIR, fig_name_2), dpi=800)
###Output
_____no_output_____
###Markdown
Compare two point clouds next to each other
###Code
## Plot the point clouds in a gradient-like colors next to each other
fig_color = plt.figure(dpi=100)
fig_color.set_size_inches(16,12)
fig_color.tight_layout()
# Create color list that is the gradient from red to blue
red = Color("red")
colors = list(red.range_to(Color("blue"), data.shape[1]))
colors_rgb = [colors[i].rgb for i in range(len(colors))]
colors_rgb_arr = np.array(colors_rgb, dtype='float32')
# Plot first one
ax3 = fig_color.add_subplot(1, 2, 1, projection='3d')
ax3.set_title("")
ax3.set_xlabel("x")
ax3.set_ylabel("y")
ax3.set_zlabel("z")
ax3.set_xlim(-1, 1)
ax3.set_ylim(-1, 1)
ax3.set_zlim(-1, 1)
ax3.scatter3D(data[id,:,0], data[id,:,2], data[id,:,1], s=0.5, c=colors_rgb_arr) # rotated
# Plot second one
ax4 = fig_color.add_subplot(1, 2, 2, projection='3d')
ax4.set_xlabel("x")
ax4.set_ylabel("y")
ax4.set_zlabel("z")
ax4.set_xlim(-1, 1)
ax4.set_ylim(-1, 1)
ax4.set_zlim(-1, 1)
ax4.scatter3D(data[id2,:,0], data[id2,:,2], data[id2,:,1], s=0.5, c=colors_rgb_arr) # rotated
fig_color.tight_layout()
plt.show()
## Save figure
fig_name_color = "mesh_" + str(id) + "_mesh_" + str(id2) + "_comparison" + ".png"
fig_color.savefig(os.path.join(LOG_DIR, fig_name_color), dpi=800)
###Output
_____no_output_____
###Markdown
Look at the point distributions
###Code
## Analyze point
point_name = "back"
point_data = max_values[point_name]
x_axis = np.arange(data.shape[0])
x_data_curr = data[:, point_data, 0]
y_data_curr = data[:, point_data, 1]
z_data_curr = data[:, point_data, 2]
## Plot figure
fig_2d = plt.figure()
fig_2d.set_size_inches(16,6)
# x
ax1 = plt.subplot(3,1,1)
ax1.scatter(x_axis, x_data_curr, s=5)
# y
ax2 = plt.subplot(3,1,2)
ax2.scatter(x_axis, y_data_curr, s=5)
# z
ax3 = plt.subplot(3,1,3)
ax3.scatter(x_axis, z_data_curr, s=5)
plt.show()
## Plot histogram
fig_hist = plt.figure()
fig_hist.set_size_inches(16,6)
data_space = 20
# x
ax1 = plt.subplot(1,3,1)
ax1.set_title("x")
ax1 = plt.hist(x_data_curr, data_space)
# y
ax2 = plt.subplot(1,3,2)
ax2.set_title("y")
ax2 = plt.hist(y_data_curr, data_space)
# z
ax3 = plt.subplot(1,3,3)
ax3.set_title("z")
ax3 = plt.hist(z_data_curr, data_space)
plt.show()
## Fit to a PDF
fig_pdf = plt.figure()
fig_pdf.set_size_inches(16,6)
data_space_pdf = 0.01
## X
# fit density
model_x = KernelDensity(bandwidth=2, kernel='gaussian')
x_data_curr = x_data_curr.reshape((len(x_data_curr), 1))
model_x.fit(x_data_curr)
# Create x axis data for PDF
x_pdf_x = np.arange(np.min(x_data_curr), np.max(x_data_curr), data_space_pdf)
print(x_pdf_x.shape)
x_pdf_x = np.reshape(x_pdf_x, (x_pdf_x.shape[0], 1))
# Get probabilities
probabilities_x = model_x.score_samples(x_pdf_x)
probabilities_x = np.exp(probabilities_x)
# Visualize
ax1 = plt.subplot(1,3,1)
ax1.set_title("x")
plt.plot(x_pdf_x[:], probabilities_x)
## Y
# fit density
model_y = KernelDensity(bandwidth=2, kernel='gaussian')
y_data_curr = y_data_curr.reshape((len(y_data_curr), 1))
model_y.fit(y_data_curr)
# Create x axis data for PDF
x_pdf_y = np.arange(np.min(y_data_curr), np.max(y_data_curr), data_space_pdf)
print(x_pdf_y.shape)
x_pdf_y = np.reshape(x_pdf_y, (x_pdf_y.shape[0], 1))
# Get probabilities
probabilities_y = model_y.score_samples(x_pdf_y)
probabilities_y = np.exp(probabilities_y)
# Visualize
ax2 = plt.subplot(1,3,2)
ax2.set_title("y")
plt.plot(x_pdf_y[:], probabilities_y)
## Z
# fit density
model_z = KernelDensity(bandwidth=2, kernel='gaussian')
z_data_curr = z_data_curr.reshape((len(z_data_curr), 1))
model_z.fit(z_data_curr)
# Create x axis data for PDF
x_pdf_z = np.arange(np.min(z_data_curr), np.max(z_data_curr), data_space_pdf)
print(x_pdf_z.shape)
x_pdf_z = np.reshape(x_pdf_z, (x_pdf_z.shape[0], 1))
# Get probabilities
probabilities_z = model_z.score_samples(x_pdf_z)
probabilities_z = np.exp(probabilities_z)
# Visualize
ax3 = plt.subplot(1,3,3)
ax3.set_title("z")
plt.plot(x_pdf_z[:], probabilities_z)
plt.show()
## Big ones
big_1 = np.where(data[:,671,0] > 1)
big_1[0].shape
###Output
_____no_output_____ |
(uncased)hate-speech-detection-with-bert-uncased.ipynb | ###Markdown
Hate Speech Detection with BERT - Sentence Level
###Code
# !cat /proc/cpuinfo
# !cat /proc/meminfo
# !df -h
!nvidia-smi
###Output
Sun Mar 28 19:05:25 2021
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 460.56 Driver Version: 460.32.03 CUDA Version: 11.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
| | | MIG M. |
|===============================+======================+======================|
| 0 Tesla P100-PCIE... Off | 00000000:00:04.0 Off | 0 |
| N/A 35C P0 26W / 250W | 0MiB / 16280MiB | 0% Default |
| | | N/A |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: |
| GPU GI CI PID Type Process name GPU Memory |
| ID ID Usage |
|=============================================================================|
| No running processes found |
+-----------------------------------------------------------------------------+
###Markdown
SetupWe'll need [the Transformers library](https://huggingface.co/transformers/) by Hugging Face:> the version 4.0 of transformer make a lot changes to version 3.0, we'll still keep use the 3.0
###Code
!pip install -qq transformers==3
#@title Setup & Config
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
from textwrap import wrap
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
%matplotlib inline
%config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
from google.colab import drive
drive.mount('/content/drive/')
import os
os.chdir("/content/drive/MyDrive/Hate Speech Detection/Bert_Sentence_Level_Detection/")
#@title Setup & Config
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
from textwrap import wrap
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
%matplotlib inline
%config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
###Output
_____no_output_____
###Markdown
Data ExplorationWe'll load the reviews dataset:
###Code
# find the encoding method of the csv file
import chardet
rawdata = open('./data/2016-hate-tweets.csv', 'rb').read()
result = chardet.detect(rawdata)
charenc = result['encoding']
print(charenc)
df = pd.read_csv("data/2016-hate-tweets.csv", encoding='Windows-1252')
df.head()
df.shape
class_names = ['none', 'racism', 'sexism']
###Output
_____no_output_____
###Markdown
Data Preprocessing
###Code
PRE_TRAINED_MODEL_NAME = 'bert-base-uncased'
###Output
_____no_output_____
###Markdown
> In BERT, people can use a cased and uncased version of BERT and tokenizer. The cased version works better when considering "BAD" might convey more sentiment than “bad". However, in the hate speech detection scenario, the uppercased key words hold the same weight as the uncased key words. Still, the different sentiment meaning in the context may be influential when there is no obvious key words. Thus, both methods should be tried and to find out the best solution.> Here, we use the **uncased** version of BERT Let's load a pre-trained [BertTokenizer](https://huggingface.co/transformers/model_doc/bert.htmlberttokenizer):
###Code
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
###Output
_____no_output_____
###Markdown
We'll use this text to understand the tokenization process: Choosing Sequence LengthBERT works with fixed-length sequences. We'll use a simple strategy to choose the max length. Let's store the token length of each review:
###Code
token_lens = []
for txt in df.review:
tokens = tokenizer.encode(txt, max_length=512, truncation=True)
token_lens.append(len(tokens))
###Output
_____no_output_____
###Markdown
and plot the distribution:
###Code
sns.distplot(token_lens)
plt.xlim([0, 256]);
plt.xlabel('Token count');
###Output
/usr/local/lib/python3.7/dist-packages/seaborn/distributions.py:2557: FutureWarning: `distplot` is a deprecated function and will be removed in a future version. Please adapt your code to use either `displot` (a figure-level function with similar flexibility) or `histplot` (an axes-level function for histograms).
warnings.warn(msg, FutureWarning)
###Markdown
Most of the reviews seem to contain less than 50 tokens, but we'll be on the safe side and choose a maximum length of 80.
###Code
MAX_LEN = 80
###Output
_____no_output_____
###Markdown
We have all building blocks required to create a PyTorch dataset. Let's do it:
###Code
class GPReviewDataset(Dataset):
def __init__(self, reviews, targets, tokenizer, max_len):
self.reviews = reviews
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.reviews)
def __getitem__(self, item):
review = str(self.reviews[item])
target = self.targets[item]
if target == 'none':
target = 0
elif target == 'racism':
target = 1
else:
target = 2
encoding = self.tokenizer.encode_plus(
review,
add_special_tokens=True,
truncation=True,
max_length=self.max_len,
return_token_type_ids=False,
# pad_to_max_length=True,
padding='max_length',
return_attention_mask=True,
return_tensors='pt',
)
return {
'review_text': review,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.long)
}
###Output
_____no_output_____
###Markdown
Let's split the data:
###Code
df_train = df[df.split=="train"]
df_val = df[df.split=="val"]
df_test = df[df.split=="test"]
df_train.shape, df_val.shape, df_test.shape
###Output
_____no_output_____
###Markdown
We also need to create a couple of data loaders. Here's a helper function to do it:- Truncation: Truncate to a maximum length specified with the argument max_length or to the maximum acceptable input length for the model if that argument is not provided.
###Code
def create_data_loader(df, tokenizer, max_len, batch_size):
ds = GPReviewDataset(
reviews=df.review.to_numpy(),
targets=df.rating.to_numpy(),
tokenizer=tokenizer,
max_len=max_len,
)
return DataLoader(
ds,
batch_size=batch_size,
#num_workers=4 # process hangs with num_workers=4
num_workers=0, # num_workers: how many subprocesses to use for data loading. 0 means that the data will be
# loaded in the main process. (default: 0)
shuffle=True # , drop_last=True
)
BATCH_SIZE = 32
train_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
###Output
_____no_output_____
###Markdown
Let's have a look at an example batch from our training data loader: Hate Speech Detection with BERT and Hugging Face There are a lot of helpers that make using BERT easy with the Transformers library. Depending on the task you might want to use [BertForSequenceClassification](https://huggingface.co/transformers/model_doc/bert.htmlbertforsequenceclassification), [BertForQuestionAnswering](https://huggingface.co/transformers/model_doc/bert.htmlbertforquestionanswering) or something else. Here, we'll use the basic [BertModel](https://huggingface.co/transformers/model_doc/bert.htmlbertmodel) and build our detector on top of it. Let's load the model:
###Code
bert_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
###Output
_____no_output_____
###Markdown
We can use all of this knowledge to create a classifier that uses the BERT model:
###Code
class SentimentClassifier(nn.Module):
def __init__(self, n_classes):
super(SentimentClassifier, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
self.drop = nn.Dropout(p=0.3)
self.out = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
)
output = self.drop(pooled_output)
return self.out(output)
###Output
_____no_output_____
###Markdown
Our classifier delegates most of the heavy lifting to the BertModel. We use a dropout layer for some regularization and a fully-connected layer for our output. Note that we're returning the raw output of the last layer since that is required for the cross-entropy loss function in PyTorch to work.This should work like any other PyTorch model. Let's create an instance and move it to the GPU:
###Code
model = SentimentClassifier(len(class_names))
model = model.to(device)
###Output
_____no_output_____
###Markdown
We'll move the example batch of our training data to the GPU: Training To reproduce the training procedure from the BERT paper, we'll use the [AdamW](https://huggingface.co/transformers/main_classes/optimizer_schedules.htmladamw) optimizer provided by Hugging Face. It corrects weight decay, so it's similar to the original paper. We'll also use a linear scheduler with no warmup steps:
###Code
EPOCHS = 4
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
loss_fn = nn.CrossEntropyLoss().to(device)
###Output
_____no_output_____
###Markdown
How do we come up with all hyperparameters? The BERT authors have some recommendations for fine-tuning:- Batch size: 16, 32- Learning rate (Adam): 5e-5, 3e-5, 2e-5- Number of epochs: 2, 3, 4We're going to ignore the number of epochs recommendation but stick with the rest. Note that increasing the batch size reduces the training time significantly, but gives you lower accuracy.Let's continue with writing a helper function for training our model for one epoch:
###Code
def train_epoch(
model,
data_loader,
loss_fn,
optimizer,
device,
scheduler,
n_examples
):
model = model.train()
losses = []
correct_predictions = 0
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
###Output
_____no_output_____
###Markdown
Training the model should look familiar, except for two things. The scheduler gets called every time a batch is fed to the model. We're avoiding exploding gradients by clipping the gradients of the model using [clip_grad_norm_](https://pytorch.org/docs/stable/nn.htmlclip-grad-norm).Let's write another one that helps us evaluate the model on a given data loader:
###Code
def eval_model(model, data_loader, loss_fn, device, n_examples):
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
# return_dict=False
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
return correct_predictions.double() / n_examples, np.mean(losses)
###Output
_____no_output_____
###Markdown
Using those two, we can write our training loop. We'll also store the training history:
###Code
%%time
history = defaultdict(list)
best_accuracy = 0
for epoch in range(EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
print('-' * 10)
train_acc, train_loss = train_epoch(
model,
train_data_loader,
loss_fn,
optimizer,
device,
scheduler,
len(df_train)
)
print(f'Train loss {train_loss} accuracy {train_acc}')
val_acc, val_loss = eval_model(
model,
val_data_loader,
loss_fn,
device,
len(df_val)
)
print(f'Val loss {val_loss} accuracy {val_acc}')
print()
history['train_acc'].append(train_acc)
history['train_loss'].append(train_loss)
history['val_acc'].append(val_acc)
history['val_loss'].append(val_loss)
if val_acc > best_accuracy:
torch.save(model.state_dict(), 'best_model_state.bin')
best_accuracy = val_acc
###Output
Epoch 1/4
----------
Train loss 0.43767586048457313 accuracy 0.8187298617607318
Val loss 0.40122823623737486 accuracy 0.8453865336658354
Epoch 2/4
----------
Train loss 0.20873284468619135 accuracy 0.9247479471988359
Val loss 0.5924626436859074 accuracy 0.8435162094763092
Epoch 3/4
----------
Train loss 0.10397580933085708 accuracy 0.9643488202889513
Val loss 0.7274666180705079 accuracy 0.8391521197007481
Epoch 4/4
----------
Train loss 0.054687450394696524 accuracy 0.983681529986488
Val loss 0.8317935840033068 accuracy 0.841645885286783
CPU times: user 4min 12s, sys: 2min 14s, total: 6min 27s
Wall time: 6min 34s
###Markdown
Note that we're storing the state of the best model, indicated by the highest validation accuracy. We can look at the training vs validation accuracy:
###Code
plt.plot(history['train_acc'], label='train accuracy')
plt.plot(history['val_acc'], label='validation accuracy')
plt.title('Training history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.ylim([0, 1]);
###Output
_____no_output_____
###Markdown
The training accuracy starts to approach 100% after 10 epochs or so. You might try to fine-tune the parameters a bit more, but this will be good enough for us.Don't want to wait? Uncomment the next cell to download my pre-trained model:
###Code
# model = SentimentClassifier(len(class_names))
# model.load_state_dict(torch.load('best_model_state.bin'))
# model = model.to(device)
###Output
_____no_output_____
###Markdown
EvaluationSo how good is our model on predicting sentiment? Let's start by calculating the accuracy on the test data:
###Code
test_acc, test_loss = eval_model(
model,
test_data_loader,
loss_fn,
device,
len(df_test)
)
test_acc.item(), test_loss.item()
###Output
_____no_output_____
###Markdown
The accuracy is about 1% lower on the test set. Our model seems to generalize well.We'll define a helper function to get the predictions from our model:
###Code
def get_predictions(model, data_loader):
model = model.eval()
review_texts = []
predictions = []
prediction_probs = []
real_values = []
with torch.no_grad():
for d in data_loader:
texts = d["review_text"]
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
probs = F.softmax(outputs, dim=1)
review_texts.extend(texts)
predictions.extend(preds)
prediction_probs.extend(probs)
real_values.extend(targets)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
real_values = torch.stack(real_values).cpu()
return review_texts, predictions, prediction_probs, real_values
###Output
_____no_output_____
###Markdown
This is similar to the evaluation function, except that we're storing the text of the reviews and the predicted probabilities (by applying the softmax on the model outputs):
###Code
y_review_texts, y_pred, y_pred_probs, y_test = get_predictions(
model,
test_data_loader
)
###Output
_____no_output_____
###Markdown
Let's have a look at the classification report
###Code
torch.set_printoptions(profile="full")
# y_test
# y_pred
print(classification_report(y_test, y_pred, target_names=class_names, labels=[0, 1, 2], digits=4))
###Output
precision recall f1-score support
none 0.8024 0.8751 0.8372 2186
racism 0.9075 0.5323 0.6710 387
sexism 0.5731 0.5387 0.5554 633
accuracy 0.7673 3206
macro avg 0.7610 0.6487 0.6879 3206
weighted avg 0.7698 0.7673 0.7615 3206
###Markdown
We'll continue with the confusion matrix:
###Code
def show_confusion_matrix(confusion_matrix):
hmap = sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap="Blues")
hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha='right')
hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha='right')
plt.ylabel('True sentiment')
plt.xlabel('Predicted sentiment');
cm = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(cm, index=class_names, columns=class_names)
show_confusion_matrix(df_cm)
###Output
_____no_output_____
###Markdown
This confirms that our model is having difficulty classifying neutral reviews. It mistakes those for negative and positive at a roughly equal frequency.That's a good overview of the performance of our model. But let's have a look at an example from our test data:
###Code
idx = 3
review_text = y_review_texts[idx]
true_sentiment = y_test[idx]
pred_df = pd.DataFrame({
'class_names': class_names,
'values': y_pred_probs[idx]
})
print("\n".join(wrap(review_text)))
print()
print(f'True sentiment: {class_names[true_sentiment]}')
###Output
*sigh* oh Colin ? #MKR
True sentiment: none
###Markdown
Now we can look at the confidence of each sentiment of our model:
###Code
sns.barplot(x='values', y='class_names', data=pred_df, orient='h')
plt.ylabel('sentiment')
plt.xlabel('probability')
plt.xlim([0, 1]);
###Output
_____no_output_____
###Markdown
Predicting on Raw TextLet's use our model to predict the sentiment of some raw text:
###Code
review_text1 = "I think #mkr is good to hear"
review_text2 = "I do not think women can do work better or as good as man"
encoded_review = tokenizer.encode_plus(
review_text2,
max_length=MAX_LEN,
add_special_tokens=True,
return_token_type_ids=False,
#pad_to_max_length=True,
padding='max_length',
return_attention_mask=True,
return_tensors='pt',
truncation=True
)
###Output
_____no_output_____
###Markdown
Let's get the predictions from our model:
###Code
input_ids = encoded_review['input_ids'].to(device)
attention_mask = encoded_review['attention_mask'].to(device)
output = model(input_ids, attention_mask)
_, prediction = torch.max(output, dim=1)
print(f'Review text: {review_text1}')
print(f'Sentiment : {class_names[prediction]}')
print(f'Review text: {review_text2}')
print(f'Sentiment : {class_names[prediction]}')
###Output
Review text: I think #mkr is good to hear
Sentiment : none
Review text: I do not think women can do work better or as good as man
Sentiment : none
|
src/SuLab-WD-rephetio-analysis/1_code/01_querying_wikidata_for_hetnet_edges.ipynb | ###Markdown
Querying WikiData for henet edges
###Code
import json
import pandas as pd
from pathlib import Path
from datetime import datetime
from tqdm import tqdm_notebook
import wdhetnetbuilder as wdh
net_info_dir = Path('../0_data/manual').resolve()
h = wdh.WDHetnetQueryBuilder(net_info_dir.joinpath('node_info.json'),
net_info_dir.joinpath('edge_info.json'))
###Output
_____no_output_____
###Markdown
Defining the strucure of the metagraph
###Code
hetnet_edges = [
{'abbrev': 'CdiC'},
{'abbrev': 'CtD'},
#{'abbrev': 'PPaiC'},
{'abbrev': 'CHhcC'},
{'abbrev': 'PWhpC'},
{'abbrev': 'CpP'},
{'abbrev': 'PiwC'},
{'abbrev': 'VntC'},
{'abbrev': 'VptC'},
{'abbrev': 'DaP', 'target': 'Gene'},
{'abbrev': 'DaG'},
{'abbrev': 'DsyS'},
{'abbrev': 'DmsMS'},
{'abbrev': 'CHsyS'},
{'abbrev': 'CHsyD'},
{'abbrev': 'VndD'},
{'abbrev': 'VpdD'},
{'abbrev': 'VvP', 'target': 'Gene'},
{'abbrev': 'VvG'},
{'abbrev': 'PWhpP', 'target': 'Gene'},
{'abbrev': 'PWhpG'},
{'abbrev': 'PccCC'},
{'abbrev': 'PbpBP'},
{'abbrev': 'PmfMF'},
{'abbrev': 'PhpPD'},
{'abbrev': 'PhpSS'},
{'abbrev': 'PFhpP'},
{'abbrev': 'PhpBS'},
{'abbrev': 'PhpAS'},
{'abbrev': 'PhpSM'},
#{'abbrev': 'PPtaD'},
{'abbrev': 'CrCR'},
{'abbrev': 'DlA'},
{'abbrev': 'CHafA'},
{'abbrev': 'CtCH'},
{'abbrev': 'BPhpC'},
{'abbrev': 'PccA'},
{'abbrev': 'PWhpBP'},
{'abbrev': 'PFhpBS'},
{'abbrev': 'PDhpSS'},
{'abbrev': 'PFhpSS'},
{'abbrev': 'PWhpBP'},
{'abbrev': 'PFhpPD'},
{'abbrev': 'PFhpAS'},
{'abbrev': 'PregBP'}
]
queries = [h.build_query_from_abbrev(**edge) for edge in hetnet_edges]
###Output
_____no_output_____
###Markdown
An error was found in the Feburary 2018 Data Dump... The majority of Biological Process nodes are missing their `instance_of Biological Process` statment (`wdt:P31 'wd:Q996394`), leading to severely decreased number of edges with these node types. Because biological processes are also defined by the property `Biological Process` (`wdt:P686`) we can use this as well as a check for a GO Term Identifier to recover these edges.
###Code
ini_queries_2_2018 = [h.build_query_from_abbrev(**edge) for edge in hetnet_edges]
# Biological Process nodes forwhatever reason lost their wdt:P31 wd:Q2996394 statments in 2018 for whatever reason
# so instead still use the biological process proterty (wdt:P682) beteen the protien and bp
# and check to make sure they have a go id... (wdt:P686)
queries_2_2018 = []
for q in ini_queries_2_2018:
queries_2_2018.append(q.replace(""" ?biological_process wdt:P31 wd:Q2996394 .""",
""" ?biological_process wdt:P686 ?go_id .""")
.replace(""" ?biological_process1 wdt:P31 wd:Q2996394 .""",
""" ?biological_process1 wdt:P686 ?go_id1 .""")
.replace(""" ?biological_process2 wdt:P31 wd:Q2996394 .""",
""" ?biological_process2 wdt:P686 ?go_id2 ."""))
###Output
_____no_output_____
###Markdown
A similar problem was found back in early 2017: Genes and proteins were `subclass of` Gene or Protein... not `instance of`... Disease was a mess, with some `subclass of` some `instance of` and some both... fixing these for our 2017 queries
###Code
# Fix gene and protein
h.node_info['Gene']['subclass'] = True
h.node_info['Protein']['subclass'] = True
# Update the class with the new info
# TODO: Add an update node method that re-runs this auto-magically...
h.subclass = h._extract_node_key('subclass')
h.extend = h._extract_node_key('extend')
ini_queries_2017 = [h.build_query_from_abbrev(**edge) for edge in hetnet_edges]
# Disease are sometimes 'instance_of', sometimes 'subclass_of', so we will ectend both...
queries_2017 = []
for q in ini_queries_2017:
queries_2017.append(q.replace(""" # Initial typing for Disease
?disease wdt:P31 wd:Q12136 .""", """ # Initial typing for Disease
?disease wdt:P31|wdt:P279* wd:Q12136 ."""))
print(h.build_query_from_abbrev('CtD'))
endpoints = {
'https://query.wikidata.org/sparql': datetime.today().strftime('%Y-%m-%d'),
'http://avalanche.scripps.edu:9988/bigdata/sparql': '2018-11-12',
'http://avalanche.scripps.edu:9999/bigdata/sparql': '2018-02-05',
'http://kylo.scripps.edu:9988/bigdata/sparql': '2017-01-16',
}
results = dict()
# Sort so live wikidata is done last incase of errors on local instances...
for ep, dump_date in tqdm_notebook(sorted(endpoints.items()), desc='All Endpoints'):
# Get the correct set of queries for the correct years...
if dump_date.startswith('2017'):
to_query = queries_2017
elif dump_date.startswith('2018-02'):
to_query = queries_2_2018
else:
to_query = queries
cur_res = dict()
for meta_edge, query in tqdm_notebook(zip(hetnet_edges, to_query),
desc=dump_date+' Data',
total=len(hetnet_edges)):
cur_res[meta_edge['abbrev']] = wdh.execute_sparql_query(query, endpoint=ep)
results[dump_date] = cur_res
edge_count = []
for date, res in results.items():
counts = pd.Series({name: len(res[name]) for name in res}, name=date)
edge_count.append(counts)
edge_count = pd.concat(edge_count, axis=1)
edge_count
this_name = '01_querying_wikidata_for_hetnet_edges'
out_dir = Path('../2_pipeline').resolve().joinpath(this_name, 'out')
out_dir.mkdir(parents=True, exist_ok=True)
edge_count.to_csv(out_dir.joinpath('edge_counts.csv'))
###Output
_____no_output_____
###Markdown
Some Error Fixing1. If start and end nodetypes are the same, could potentiall have node_id1 -> node_id2 and node_id2 -> node_id1... This is only useful if the edge is directed, but most of these edges are bi-directional (undirected) so only one of the directions is needed.2. Since WikiData can have more than one 'instance_of' statment per node, some nodes may be members of mulitple types... will look at those queried and see where they are.3. Qualified statments need further processing, so we will collect those4. Multi-step edges that will be compresssed to 1 edge need further processing, so we will collect those
###Code
def remove_query_numb(query_name):
numb = wdh.get_query_numb(query_name)
if numb:
idx = query_name.index(numb)
return query_name[:idx]
else:
return query_name
def to_full_name(query_name):
name = remove_query_numb(query_name)
return name.replace('_', ' ').title()
def process_query_res(q_result):
node_ids = dict()
id_to_name = dict()
self_ref = set()
qualified = set()
multi_step = set()
# Do some processing on the collected edges
for e, r in q_result.items():
s_kind, e_type, e_kind = wdh.gt.parse_edge_abbrev(e)
all_n_types = [c for c in r.columns if not c.endswith('Label')]
for nt in all_n_types:
# Get the node type by removing any trailing numbers
numb = wdh.get_query_numb(nt)
if numb:
idx = nt.index(numb)
node_type = nt[:idx]
else:
node_type = nt
# For a given node type, collect all the ids... don't need qualifiers
if node_type != 'qualifier':
if node_type in node_ids:
node_ids[node_type].update(set(r[nt]))
else:
node_ids[node_type] = set(r[nt])
id_to_name.update(r.set_index(nt)[nt+'Label'].to_dict())
# Identifiy self_reffrenetial edges
if s_kind == e_kind:
self_ref.add(e)
if len(all_n_types) > 2:
# Grab qualified edges for further processing
if 'qualifier' in all_n_types:
qualified.add(e)
# Currently, an edge can not be both multi-step and qualified
else:
multi_step.add(e)
return node_ids, id_to_name, self_ref, qualified, multi_step
def fix_self_ref_edges(q_result, self_ref, id_to_name):
fixed = dict()
for kind in tqdm_notebook(self_ref):
# no need to worry about forward vs reverse in directed edges
if '>' in kind or '<' in kind:
continue
# Only look at 1 kind of edge at a time
this_edges = q_result[kind]
col_names = this_edges.columns
edge_ids = set()
for row in this_edges.itertuples():
# Grab the edge ID, sorting, so lowest ID first:
# If both 'Q00001 -- Q00002' and 'Q00002 -- Q00001' exist, effectively standarizes to
# 'Q00001 -- Q00002'
edge_id = tuple(sorted([row[1], row[3]]))
edge_ids.add(edge_id)
start_ids = []
start_names = []
end_ids = []
end_names = []
for edge_id in edge_ids:
start_ids.append(edge_id[0])
start_names.append(id_to_name[edge_id[0]])
end_ids.append(edge_id[1])
end_names.append(id_to_name[edge_id[1]])
fixed[kind] = pd.DataFrame({col_names[0]: start_ids, col_names[1]: start_names, col_names[2]: end_ids, col_names[3]: end_names})
return fixed
def find_func_numb(node_names, name, func):
return func([wdh.get_query_numb(n) for n in node_names if n.startswith(name)])
def find_max_numb(node_names, name):
return find_func_numb(node_names, name, max)
def find_min_numb(node_names, name):
return find_func_numb(node_names, name, min)
def find_correct_node_name(node_names, name, func):
for node in node_names:
numb = wdh.get_query_numb(node)
if node.startswith(name) and node != name and numb:
return name + str(func(node_names, name))
return name
def get_start_and_end_names(node_names, s_type, e_type):
s_name = find_correct_node_name(node_names, s_type, find_min_numb)
e_name = find_correct_node_name(node_names, e_type, find_max_numb)
return s_name, e_name
def process_multi_step_edges(q_result, qualified, multi_step):
fixed = dict()
# Essentially just change the column order for later processing...
for kind in tqdm_notebook(multi_step.union(qualified)):
# Get the information for the current edge
this_edges = q_result[kind]
col_names = this_edges.columns
node_cols = [c for c in col_names if not c.endswith('Label')]
# Need to know what start and end types we're looking for
s_kind, e_type, e_kind = wdh.gt.parse_edge_abbrev(kind)
s_name = wdh.to_query_name(h.node_abv_to_full[s_kind])[1:]
e_name = wdh.to_query_name(h.node_abv_to_full[e_kind])[1:]
if 'qualifier' not in node_cols:
s_name, e_name = get_start_and_end_names(node_cols, s_name, e_name)
new_node_order = [s_name, e_name]
new_node_order += [n for n in node_cols if n not in new_node_order]
new_col_names = []
for n in new_node_order:
new_col_names += [n, n+'Label']
fixed[kind] = this_edges[new_col_names].copy()
return fixed
###Output
_____no_output_____
###Markdown
Hetnet To Nodes
###Code
def build_hetnet_nodes(node_ids, id_to_name):
nodes = []
for k, v in node_ids.items():
curr_nodes = pd.DataFrame({'id': list(v), 'label': len(v)*[k]})
curr_nodes['name'] = curr_nodes['id'].map(id_to_name)
nodes.append(curr_nodes)
# Make dataframe
nodes = pd.concat(nodes).reset_index(drop=True)
# Fix labels (from lowercase_underscore to As Defined in node_info.json)
label_map = {wdh.to_query_name(k)[1:]: k for k in h.node_info.keys()}
nodes['label'] = nodes['label'].map(label_map)
return nodes
###Output
_____no_output_____
###Markdown
To Hetnet Edges
###Code
def process_PregBP(edges):
edges_out = edges.copy()
keep_map = {'positive regulation': 'UP_REGULATES_GuBP',
'negative regulation': 'DOWN_REGULATES_GdBP',
'regulation': 'REGULATES_GregBP'}
direction = edges['biological_process1Label'].str.split(' of ', expand=True)[0]
edges_out['type'] = direction.map(keep_map)
return edges_out.dropna(subset=['type']).reset_index(drop=True)
def process_CpP(edges):
edges_out = edges.copy()
type_map = {'receptor antagonist': 'INHIBITS_CiG',
'enzyme inhibitor': 'INHIBITS_CiG',
'agonist': 'ACTIVATES_CacG',
'channel blocker': 'INHIBITS_CiG',
'substrate': 'BINDS_CbG',
'allosteric modulator': 'BINDS_CbG',
'channel activator activity': 'ACTIVATES_CacG',
'protein-protein interaction inhibitor': 'INHIBITS_CiG',
'ligand in biochemistry': 'BINDS_CbG',
'reuptake inhibitor': 'INHIBITS_CiG',
'neutralizing antibody': 'INHIBITS_CiG'}
edges_out['type'] = edges_out['qualifierLabel'].str.lower().map(type_map)
return edges_out
def build_hetnet_edges(q_result, fixed_edges):
edges = []
for k, v in q_result.items():
if k in fixed_edges.keys():
v = fixed_edges[k]
col_names = v.columns
keep_cols = [c for c in col_names if not c.endswith('Label')]
# Queries sometimes return zero results, so skip those...
if not keep_cols:
continue
col_name_map = {keep_cols[0]: 'start_id', keep_cols[1]: 'end_id'}
# Inner nodes in multi-step edges become inner1, inner2, etc...
inner_cols = {k: 'inner'+str(idx+1) for idx, k in enumerate(keep_cols[2:]) if k != 'qualifier'}
col_name_map = {**inner_cols, **col_name_map}
v = v.rename(columns=col_name_map)
if k == "PregBP":
v = process_PregBP(v)
elif k == "CpP":
v = process_CpP(v)
# Replace Proteins with Genes, to merge the protein and gene metanodes
parsed_edge = wdh.gt.parse_edge_abbrev(k)
if 'P' in parsed_edge:
idx = parsed_edge.index('P')
parsed_edge = list(parsed_edge)
parsed_edge[idx] = 'G'
k = ''.join(parsed_edge)
if 'type' not in v.columns:
v['type'] = h.edge_abv_to_full[parsed_edge[1]] + '_' + k
edges.append(v)
# Combine the edges into a single dataframe
edges = pd.concat(edges, sort=False).reset_index(drop=True)
col_order = ['start_id', 'end_id', 'type', 'qualifier']
col_order = col_order + [c for c in col_name_map.values() if c not in col_order]
edges = edges[col_order]
return edges
###Output
_____no_output_____
###Markdown
Fixing nodes that are duplicated across two different Node Types
###Code
def find_combos(nodes):
duplicated_nodes = nodes[nodes.duplicated(keep=False, subset=['id'])]['id'].unique()
# Find out what types are being combined...
combos = (nodes.query('id in @duplicated_nodes')
.sort_values(['id', 'label'])
.groupby('id')['label']
.apply(list)
.astype(str)
.to_frame()
.reset_index())
return combos
def uniquify_node_types(nodes, edges, type_fix_map=None, verbose=True):
# Set a default value for the map
if type_fix_map is None:
type_fix_map = {"['Structural Motif', 'Super-Secondary Structure']": 'Structural Motif',
"['Chemical Hazard', 'Disease']": 'Chemical Hazard',
"['Disease', 'Symptom']": 'Symptom',
"['Sequence Variant', 'Symptom']": 'Symptom',
"['Disease', 'Sequence Variant', 'Symptom']": 'Symptom',
"['Compound', 'Gene']": 'Compound',
"['Chemical Role', 'Compound']": 'Compound',
"['Biological Process', 'Disease']": 'Disease',
"['Anatomical Structure', 'Cellular Component']": 'Cellular Component',
"['Protein Domain', 'Structural Motif', 'Super-Secondary Structure']": 'Protein Domain',
"['Protein Domain', 'Protein Family']": 'Protein Family',
"['Gene', 'Protein Family']": 'Gene',
"['Disease', 'Sequence Variant']": 'Disease'
}
# Find out what's combined...
combos = find_combos(nodes)
# Map from the original combination to resolved type
final_types = combos.set_index('id')['label'].map(type_fix_map).to_dict()
# Fill in types for already unique nodes and map
final_types = {**nodes.set_index('id')['label'].to_dict(), **final_types}
nodes['label'] = nodes['id'].map(final_types)
if verbose:
print('Number of nodes before fixing: {:,}'.format(len(nodes)))
nodes = nodes.drop_duplicates().reset_index(drop=True)
if verbose:
print('Number of nodes after fixing: {:,}'.format(len(nodes)))
# Now check that the node types in the edge abbreviation match the newly resolved node types
combo = wdh.gt.combine_nodes_and_edges(nodes, edges)
combo['edge_abv'] = combo['type'].apply(lambda t: t.split('_')[-1])
combo['actual_start'] = combo['edge_abv'].apply(lambda a: h.node_abv_to_full[wdh.gt.parse_edge_abbrev(a)[0]])
combo['actual_end'] = combo['edge_abv'].apply(lambda a: h.node_abv_to_full[wdh.gt.parse_edge_abbrev(a)[2]])
bad_edge = combo.query('start_label != actual_start or end_label != actual_end')
if verbose:
print('Number of edges with issues to be removed: {:,}'.format(len(bad_edge)))
print('Number of edges before fixing: {:,}'.format(len(edges)))
edges = edges.drop(bad_edge.index).reset_index(drop=True)
if verbose:
print('Number of edges after fixing: {:,}'.format(len(edges)))
return nodes, edges
def build_hetnet(q_result):
node_ids, id_to_name, self_ref, qualified, multi_step = process_query_res(q_result)
fixed_self_ref = fix_self_ref_edges(q_result, self_ref, id_to_name)
fixed_multi_step = process_multi_step_edges(q_result, qualified, multi_step)
nodes = build_hetnet_nodes(node_ids, id_to_name)
edges = build_hetnet_edges(q_result, {**fixed_multi_step, **fixed_self_ref})
# merge the genes and proteins in the nodes file
idx = nodes.query('label == "Protein"').index
nodes.loc[idx, 'label'] = 'Gene'
nodes, edges = uniquify_node_types(nodes, edges)
return nodes, edges
for date, q_result in results.items():
out_dir.joinpath(date).mkdir(exist_ok=True, parents=True)
print('DUMP DATE: {}'.format(date))
nodes, edges = build_hetnet(q_result)
wdh.gt.add_colons(nodes).to_csv(out_dir.joinpath(date, 'nodes.csv'), index=False)
wdh.gt.add_colons(edges).to_csv(out_dir.joinpath(date, 'edges.csv'), index=False)
print('\n\n')
###Output
DUMP DATE: 2018-11-12
|
toy-examples/Prediction-ST-BTMF-Gdata.ipynb | ###Markdown
About this NotebookBayesian temporal matrix factorization is a type of Bayesian matrix factorization that achieves state-of-the-art results on challenging imputation and prediction problems. In the following, we will discuss:- What the proposed Bayesian temporal matrix factorization (BTMF for short) is?- How to implement BTMF mainly using Python `Numpy` with high efficiency?- How to develop a spatiotemporal prediction model by adapting BTMF?- How to make predictions with real-world spatiotemporal datasets?If you want to understand what is BTMF and its modeling tricks in detail, our paper is for you:> Xinyu Chen, Lijun Sun (2019). **Bayesian temporal factorization for multidimensional time series prediction**.
###Code
import numpy as np
from numpy import linalg as LA
from numpy.random import multivariate_normal
from scipy.stats import wishart
def Normal_Wishart(mu_0, lamb, W, nu, seed = None):
"""Function drawing a Gaussian-Wishart random variable"""
Lambda = wishart(df = nu, scale = W, seed = seed).rvs()
cov = np.linalg.inv(lamb * Lambda)
mu = multivariate_normal(mu_0, cov)
return mu, Lambda
###Output
_____no_output_____
###Markdown
Matrix Computation Concepts Kronecker product- **Definition**:Given two matrices $A\in\mathbb{R}^{m_1\times n_1}$ and $B\in\mathbb{R}^{m_2\times n_2}$, then, the **Kronecker product** between these two matrices is defined as$$A\otimes B=\left[ \begin{array}{cccc} a_{11}B & a_{12}B & \cdots & a_{1m_2}B \\ a_{21}B & a_{22}B & \cdots & a_{2m_2}B \\ \vdots & \vdots & \ddots & \vdots \\ a_{m_11}B & a_{m_12}B & \cdots & a_{m_1m_2}B \\ \end{array} \right]$$where the symbol $\otimes$ denotes Kronecker product, and the size of resulted $A\otimes B$ is $(m_1m_2)\times (n_1n_2)$ (i.e., $m_1\times m_2$ columns and $n_1\times n_2$ rows).- **Example**:If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]$ and $B=\left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10 \\ \end{array} \right]$, then, we have$$A\otimes B=\left[ \begin{array}{cc} 1\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 2\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ 3\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] & 4\times \left[ \begin{array}{ccc} 5 & 6 & 7\\ 8 & 9 & 10\\ \end{array} \right] \\ \end{array} \right]$$$$=\left[ \begin{array}{cccccc} 5 & 6 & 7 & 10 & 12 & 14 \\ 8 & 9 & 10 & 16 & 18 & 20 \\ 15 & 18 & 21 & 20 & 24 & 28 \\ 24 & 27 & 30 & 32 & 36 & 40 \\ \end{array} \right]\in\mathbb{R}^{4\times 6}.$$ Khatri-Rao product (`kr_prod`)- **Definition**:Given two matrices $A=\left( \boldsymbol{a}_1,\boldsymbol{a}_2,...,\boldsymbol{a}_r \right)\in\mathbb{R}^{m\times r}$ and $B=\left( \boldsymbol{b}_1,\boldsymbol{b}_2,...,\boldsymbol{b}_r \right)\in\mathbb{R}^{n\times r}$ with same number of columns, then, the **Khatri-Rao product** (or **column-wise Kronecker product**) between $A$ and $B$ is given as follows,$$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2,...,\boldsymbol{a}_r\otimes \boldsymbol{b}_r \right)\in\mathbb{R}^{(mn)\times r}$$where the symbol $\odot$ denotes Khatri-Rao product, and $\otimes$ denotes Kronecker product.- **Example**:If $A=\left[ \begin{array}{cc} 1 & 2 \\ 3 & 4 \\ \end{array} \right]=\left( \boldsymbol{a}_1,\boldsymbol{a}_2 \right) $ and $B=\left[ \begin{array}{cc} 5 & 6 \\ 7 & 8 \\ 9 & 10 \\ \end{array} \right]=\left( \boldsymbol{b}_1,\boldsymbol{b}_2 \right) $, then, we have$$A\odot B=\left( \boldsymbol{a}_1\otimes \boldsymbol{b}_1,\boldsymbol{a}_2\otimes \boldsymbol{b}_2 \right) $$$$=\left[ \begin{array}{cc} \left[ \begin{array}{c} 1 \\ 3 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 5 \\ 7 \\ 9 \\ \end{array} \right] & \left[ \begin{array}{c} 2 \\ 4 \\ \end{array} \right]\otimes \left[ \begin{array}{c} 6 \\ 8 \\ 10 \\ \end{array} \right] \\ \end{array} \right]$$$$=\left[ \begin{array}{cc} 5 & 12 \\ 7 & 16 \\ 9 & 20 \\ 15 & 24 \\ 21 & 32 \\ 27 & 40 \\ \end{array} \right]\in\mathbb{R}^{6\times 2}.$$
###Code
def kr_prod(a, b):
return np.einsum('ir, jr -> ijr', a, b).reshape(a.shape[0] * b.shape[0], -1)
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8], [9, 10]])
print(kr_prod(A, B))
def BTMF(dense_mat, sparse_mat, init, time_lags, maxiter1, maxiter2):
"""Bayesian Temporal Matrix Factorization, BTMF."""
W = init["W"]
X = init["X"]
theta = init["theta"]
d=theta.shape[0]
dim1 = sparse_mat.shape[0]
dim2 = sparse_mat.shape[1]
rank = W.shape[1]
pos = np.where((dense_mat > 0) & (sparse_mat == 0))
position = np.where(sparse_mat > 0)
binary_mat = np.zeros((dim1, dim2))
binary_mat[position] = 1
tau = 1
alpha = 1e-6
beta = 1e-6
beta0 = 1
nu0 = rank
mu0 = np.zeros((rank))
W0 = np.eye(rank)
for iter in range(maxiter1):
W_bar = np.mean(W, axis = 0)
var_mu0 = (dim1 * W_bar + beta0 * mu0)/(dim1 + beta0)
var_nu = dim1 + nu0
var_W = np.linalg.inv(np.linalg.inv(W0)
+ dim1 * np.cov(W.T) + dim1 * beta0/(dim1 + beta0)
* np.outer(W_bar - mu0, W_bar - mu0))
var_W = (var_W + var_W.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim1 + beta0, var_W, var_nu, seed = None)
var1 = X.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat.T).reshape([rank, rank,
dim1]) + np.dstack([var_Lambda0] * dim1)
var4 = tau * np.matmul(var1, sparse_mat.T) + np.dstack([np.matmul(var_Lambda0,
var_mu0)] * dim1)[0, :, :]
for i in range(dim1):
var_Lambda1 = var3[ :, :, i]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, i])
W[i, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
var_nu = dim2 + nu0
mat0 = X[0 : np.max(time_lags), :]
mat = np.matmul(mat0.T, mat0)
new_mat = np.zeros((dim2 - np.max(time_lags), rank))
for t in range(dim2 - np.max(time_lags)):
new_mat[t, :] = X[t + np.max(time_lags), :] - np.einsum('ij, ij -> j',
theta, X[t + np.max(time_lags)
- time_lags, :])
mat += np.matmul(new_mat.T, new_mat)
var_W = np.linalg.inv(np.linalg.inv(W0) + mat)
var_W = (var_W + var_W.T)/2
Lambda_x = wishart(df = var_nu, scale = var_W, seed = None).rvs()
var1 = W.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank,
dim2]) + np.dstack([Lambda_x] * dim2)
var4 = tau * np.matmul(var1, sparse_mat)
for t in range(dim2):
Mt = np.zeros((rank, rank))
Nt = np.zeros(rank)
if t >= 0 and t <= np.max(time_lags) - 1:
Qt = np.zeros(rank)
else:
Qt = np.matmul(Lambda_x, np.einsum('ij, ij -> j', theta, X[t - time_lags, :]))
if t >= 0 and t <= dim2 - np.min(time_lags) - 1:
if t > np.max(time_lags) - 1 and t <= dim2 - np.max(time_lags) - 1:
index = list(range(0, d))
else:
index = list(np.where((t + time_lags > np.max(time_lags) - 1)
& (t + time_lags <= dim2 - 1)))[0]
for k in index:
Ak = theta[k, :]
Mt += np.multiply(np.outer(Ak, Ak), Lambda_x)
theta0 = theta.copy()
theta0[k, :] = 0
var5 = X[t + time_lags[k], :] - np.einsum('ij, ij -> j',
theta0, X[t + time_lags[k]
- time_lags, :])
Nt += np.matmul(np.matmul(np.diag(Ak), Lambda_x), var5)
var_mu = var4[:, t] + Nt + Qt
var_Lambda = var3[:, :, t] + Mt
inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T)/2)
var_mu = np.matmul(inv_var_Lambda, var_mu)
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda)
mat_hat = np.matmul(W, X.T)
rmse = np.sqrt(np.sum((dense_mat[pos] - mat_hat[pos]) ** 2)/dense_mat[pos].shape[0])
var_alpha = alpha + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_beta = beta + 0.5 * np.sum(error[position] ** 2)
tau = np.random.gamma(var_alpha, 1/var_beta)
theta_bar = np.mean(theta, axis = 0)
var_mu0 = (d * theta_bar + beta0 * mu0)/(d + beta0)
var_nu = d + nu0
var_W = np.linalg.inv(np.linalg.inv(W0)
+ d * np.cov(theta.T) + d * beta0/(d + beta0)
* np.outer(theta_bar - mu0, theta_bar - mu0))
var_W = (var_W + var_W.T)/2
mu_theta, Lambda_theta = Normal_Wishart(var_mu0, d + beta0, var_W, var_nu, seed = None)
for k in range(d):
theta0 = theta.copy()
theta0[k, :] = 0
mat0 = np.zeros((dim2 - np.max(time_lags), rank))
for L in range(d):
mat0 += np.matmul(X[np.max(time_lags) - time_lags[L] : dim2 - time_lags[L] , :],
np.diag(theta0[L, :]))
VarPi = X[np.max(time_lags) : dim2 , :] - mat0
mat1 = np.zeros((rank, rank))
mat2 = np.zeros(rank)
for t in range(np.max(time_lags), dim2):
B = X[t - time_lags[k], :]
mat1 += np.multiply(np.outer(B, B), Lambda_x)
mat2 += np.matmul(np.matmul(np.diag(B), Lambda_x), VarPi[t - np.max(time_lags), :])
var_Lambda = mat1 + Lambda_theta
inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T)/2)
var_mu = np.matmul(inv_var_Lambda, mat2 + np.matmul(Lambda_theta, mu_theta))
theta[k, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda)
if (iter + 1) % 200 == 0:
print('Iter: {}'.format(iter + 1))
print('RMSE: {:.6}'.format(rmse))
print()
W_plus = np.zeros((dim1, rank))
X_plus = np.zeros((dim2, rank))
theta_plus = np.zeros((d, rank))
mat_hat_plus = np.zeros((dim1, dim2))
for iter in range(maxiter2):
W_bar = np.mean(W, axis = 0)
var_mu0 = (dim1 * W_bar + beta0 * mu0)/(dim1 + beta0)
var_nu = dim1 + nu0
var_W = np.linalg.inv(np.linalg.inv(W0)
+ dim1 * np.cov(W.T) + dim1 * beta0/(dim1 + beta0)
* np.outer(W_bar - mu0, W_bar - mu0))
var_W = (var_W + var_W.T)/2
var_mu0, var_Lambda0 = Normal_Wishart(var_mu0, dim1 + beta0, var_W, var_nu, seed = None)
var1 = X.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat.T).reshape([rank, rank,
dim1]) + np.dstack([var_Lambda0] * dim1)
var4 = tau * np.matmul(var1, sparse_mat.T) + np.dstack([np.matmul(var_Lambda0,
var_mu0)] * dim1)[0, :, :]
for i in range(dim1):
var_Lambda1 = var3[ :, :, i]
inv_var_Lambda1 = np.linalg.inv((var_Lambda1 + var_Lambda1.T)/2)
var_mu = np.matmul(inv_var_Lambda1, var4[:, i])
W[i, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda1)
W_plus += W
var_nu = dim2 + nu0
mat0 = X[0 : max(time_lags), :]
mat = np.matmul(mat0.T, mat0)
new_mat = np.zeros((dim2 - max(time_lags), rank))
for t in range(dim2 - np.max(time_lags)):
new_mat[t, :] = X[t + np.max(time_lags), :] - np.einsum('ij, ij -> j',
theta, X[t + np.max(time_lags)
- time_lags, :])
mat += np.matmul(new_mat.T, new_mat)
var_W = np.linalg.inv(np.linalg.inv(W0) + mat)
var_W = (var_W + var_W.T)/2
Lambda_x = wishart(df = var_nu, scale = var_W, seed = None).rvs()
var1 = W.T
var2 = kr_prod(var1,var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank,
dim2]) + np.dstack([Lambda_x] * dim2)
var4 = tau * np.matmul(var1, sparse_mat)
for t in range(dim2):
Mt = np.zeros((rank, rank))
Nt = np.zeros(rank)
if t >= 0 and t <= np.max(time_lags) - 1:
Qt = np.zeros(rank)
else:
Qt = np.matmul(Lambda_x, np.einsum('ij, ij -> j', theta, X[t - time_lags, :]))
if t >= 0 and t <= dim2 - np.min(time_lags) - 1:
if t > np.max(time_lags) - 1 and t <= dim2 - np.max(time_lags) - 1:
index = list(range(0, d))
else:
index = list(np.where((t + time_lags > np.max(time_lags) - 1)
& (t + time_lags <= dim2 - 1)))[0]
for k in index:
Ak = theta[k, :]
Mt += np.multiply(np.outer(Ak, Ak), Lambda_x)
theta0 = theta.copy()
theta0[k, :] = 0
var5 = X[t + time_lags[k], :] - np.einsum('ij, ij -> j',
theta0, X[t + time_lags[k]
- time_lags, :])
Nt += np.matmul(np.matmul(np.diag(Ak), Lambda_x), var5)
var_mu = var4[:, t] + Nt + Qt
var_Lambda = var3[:, :, t] + Mt
inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T)/2)
var_mu = np.matmul(inv_var_Lambda, var_mu)
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda)
X_plus += X
mat_hat = np.matmul(W, X.T)
mat_hat_plus += mat_hat
var_alpha = alpha + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_beta = beta + 0.5 * np.sum(error[position] ** 2)
tau = np.random.gamma(var_alpha, 1/var_beta)
theta_bar = np.mean(theta, axis = 0)
var_mu0 = (d * theta_bar + beta0 * mu0)/(d + beta0)
var_nu = d + nu0
var_W = np.linalg.inv(np.linalg.inv(W0)
+ d * np.cov(theta.T) + d * beta0/(d + beta0)
* np.outer(theta_bar - mu0, theta_bar - mu0))
var_W = (var_W + var_W.T)/2
mu_theta, Lambda_theta = Normal_Wishart(var_mu0, d + beta0, var_W, var_nu, seed = None)
for k in range(d):
theta0 = theta.copy()
theta0[k, :] = 0
mat0 = np.zeros((dim2 - np.max(time_lags), rank))
for L in range(d):
mat0 += np.matmul(X[np.max(time_lags) - time_lags[L] : dim2 - time_lags[L] , :],
np.diag(theta0[L, :]))
VarPi = X[np.max(time_lags) : dim2 , :] - mat0
mat1 = np.zeros((rank, rank))
mat2 = np.zeros(rank)
for t in range(np.max(time_lags), dim2):
B = X[t - time_lags[k], :]
mat1 += np.multiply(np.outer(B, B), Lambda_x)
mat2 += np.matmul(np.matmul(np.diag(B), Lambda_x), VarPi[t - max(time_lags), :])
var_Lambda = mat1 + Lambda_theta
inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T)/2)
var_mu = np.matmul(inv_var_Lambda, mat2 + np.matmul(Lambda_theta, mu_theta))
theta[k, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda)
theta_plus += theta
W = W_plus/maxiter2
X = X_plus/maxiter2
theta = theta_plus/maxiter2
mat_hat = mat_hat_plus/maxiter2
final_mape = np.sum(np.abs(dense_mat[pos] -
mat_hat[pos])/dense_mat[pos])/dense_mat[pos].shape[0]
final_rmse = np.sqrt(np.sum((dense_mat[pos] -
mat_hat[pos])**2)/dense_mat[pos].shape[0])
return W, X, theta
###Output
_____no_output_____
###Markdown
Data Organization Part 1: Matrix StructureWe consider a dataset of $m$ discrete time series $\boldsymbol{y}_{i}\in\mathbb{R}^{f},i\in\left\{1,2,...,m\right\}$. The time series may have missing elements. We express spatio-temporal dataset as a matrix $Y\in\mathbb{R}^{m\times f}$ with $m$ rows (e.g., locations) and $f$ columns (e.g., discrete time intervals),$$Y=\left[ \begin{array}{cccc} y_{11} & y_{12} & \cdots & y_{1f} \\ y_{21} & y_{22} & \cdots & y_{2f} \\ \vdots & \vdots & \ddots & \vdots \\ y_{m1} & y_{m2} & \cdots & y_{mf} \\ \end{array} \right]\in\mathbb{R}^{m\times f}.$$ Part 2: Tensor StructureWe consider a dataset of $m$ discrete time series $\boldsymbol{y}_{i}\in\mathbb{R}^{nf},i\in\left\{1,2,...,m\right\}$. The time series may have missing elements. We partition each time series into intervals of predifined length $f$. We express each partitioned time series as a matrix $Y_{i}$ with $n$ rows (e.g., days) and $f$ columns (e.g., discrete time intervals per day),$$Y_{i}=\left[ \begin{array}{cccc} y_{11} & y_{12} & \cdots & y_{1f} \\ y_{21} & y_{22} & \cdots & y_{2f} \\ \vdots & \vdots & \ddots & \vdots \\ y_{n1} & y_{n2} & \cdots & y_{nf} \\ \end{array} \right]\in\mathbb{R}^{n\times f},i=1,2,...,m,$$therefore, the resulting structure is a tensor $\mathcal{Y}\in\mathbb{R}^{m\times n\times f}$. **How to transform a data set into something we can use for time series prediction?**Now we have the data in an easy-to-use form by parsing [**Urban Traffic Speed Dataset of Guangzhou, China**](http://doi.org/10.5281/zenodo.1205229).
###Code
import scipy.io
tensor = scipy.io.loadmat('Guangzhou-data-set/tensor.mat')
tensor = tensor['tensor']
random_matrix = scipy.io.loadmat('Guangzhou-data-set/random_matrix.mat')
random_matrix = random_matrix['random_matrix']
random_tensor = scipy.io.loadmat('Guangzhou-data-set/random_tensor.mat')
random_tensor = random_tensor['random_tensor']
dense_mat = tensor.reshape([tensor.shape[0], tensor.shape[1] * tensor.shape[2]])
missing_rate = 0.2
# =============================================================================
### Random missing (RM) scenario:
### ------------------------------
### missing rate | 0.2 | 0.4 |
### rank | 80 | 80 |
### ------------------------------
### Set the RM scenario by:
# binary_mat = np.round(random_tensor + 0.5 - missing_rate).reshape([random_tensor.shape[0],
# random_tensor.shape[1]
# * random_tensor.shape[2]])
# =============================================================================
# =============================================================================
### Non-random missing (NM) scenario:
### ------------------------------
### missing rate | 0.2 | 0.4 |
### rank | 10 | 10 |
### ------------------------------
### Set the NM scenario by:
binary_tensor = np.zeros(tensor.shape)
for i1 in range(tensor.shape[0]):
for i2 in range(tensor.shape[1]):
binary_tensor[i1,i2,:] = np.round(random_matrix[i1,i2] + 0.5 - missing_rate)
binary_mat = binary_tensor.reshape([binary_tensor.shape[0], binary_tensor.shape[1]
* binary_tensor.shape[2]])
# =============================================================================
sparse_mat = np.multiply(dense_mat, binary_mat)
###Output
_____no_output_____
###Markdown
Rolling Spatiotemporal Prediction**Using clear explanations**: If we have a partially observed matrix $Y\in\mathbb{R}^{m\times T}$, then how to do a single-step rolling prediction starting at the time interval $f+1$ and ending at the time interval $T$?The mechanism is:1. First learn spatial factors $W\in\mathbb{R}^{m\times r}$, temporal factors $X\in\mathbb{R}^{f\times r}$, and AR coefficients $\boldsymbol{\theta}_{s}\in\mathbb{R}^{d},s=1,2,...,r$ from partially observed matrix $Y\in\mathbb{R}^{m\times f}$.2. Predict $\boldsymbol{x}_{f+1}$ by$$\hat{\boldsymbol{x}}_{f+1}=\sum_{k=1}^{d}\boldsymbol{\theta}_{k}\circledast\boldsymbol{x}_{f+1-h_k}.$$3. Load partially observed matrix $Y_{f}\in\mathbb{R}^{m\times b}$ ($b$ is the number of back steps) and fix spatial factors $W\in\mathbb{R}^{m\times T}$ and AR coefficients $\boldsymbol{\theta}_{s}\in\mathbb{R}^{d},s=1,2,...,r$, then learn temporal factors $X\in\mathbb{R}^{b\times r}$.4. Compute the AR coefficients $\boldsymbol{\theta}_{s}\in\mathbb{R}^{d},s=1,2,...,r$ by5. Predict $\boldsymbol{x}_{f+2}$ by$$\hat{\boldsymbol{x}}_{f+2}=\sum_{k=1}^{d}\boldsymbol{\theta}_{k}\circledast\boldsymbol{x}_{b+1-h_k}.$$6. Make prediction iteratively until the time step $T$. How to estimate AR coefficients?$$\hat{\boldsymbol{\theta}}=\left(Q^\top\Sigma_{\eta}Q+\Sigma_{\theta}^{-1}\right)^{-1}Q^\top\Sigma_{\eta}^{-1}P$$where$$Q=[\tilde{\boldsymbol{x}}_{h_d+1},\cdots,\tilde{\boldsymbol{x}}_{T}]^{\top}\in\mathbb{R}^{T'\times d}$$and$$P=[x_{h_d+1},\cdots,x_{T}]^{\top}.$$
###Code
def OfflineBTMF(sparse_mat, init, time_lags, maxiter1, maxiter2):
"""Offline Bayesain Temporal Matrix Factorization"""
W = init["W"]
X = init["X"]
theta = init["theta"]
d=theta.shape[0]
dim1 = sparse_mat.shape[0]
dim2 = sparse_mat.shape[1]
rank = W.shape[1]
position = np.where(sparse_mat > 0)
binary_mat = np.zeros((dim1, dim2))
binary_mat[position] = 1
tau = 1
alpha = 1e-6
beta = 1e-6
beta0 = 1
nu0 = rank
mu0 = np.zeros((rank))
W0 = np.eye(rank)
# X_new = np.zeros((dim2, rank))
# X_new[0 : dim2 - 1, :] = X
# X_new[dim2 - 1, :] = np.einsum('ij, ij -> j', theta, X_new[dim2 - 1 - time_lags, :])
for iter in range(maxiter1):
var_nu = dim2 + nu0
mat0 = X[0 : np.max(time_lags), :]
mat = np.matmul(mat0.T, mat0)
new_mat = np.zeros((dim2 - np.max(time_lags), rank))
for t in range(dim2 - np.max(time_lags)):
new_mat[t, :] = X[t + np.max(time_lags), :] - np.einsum('ij, ij -> j',
theta, X[t + np.max(time_lags)
- time_lags, :])
mat += np.matmul(new_mat.T, new_mat)
var_W = np.linalg.inv(np.linalg.inv(W0) + mat)
var_W = (var_W + var_W.T)/2
Lambda_x = wishart(df = var_nu, scale = var_W, seed = None).rvs()
var1 = W.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank,
dim2]) + np.dstack([Lambda_x] * dim2)
var4 = tau * np.matmul(var1, sparse_mat)
for t in range(dim2):
Mt = np.zeros((rank, rank))
Nt = np.zeros(rank)
if t >= 0 and t <= np.max(time_lags) - 1:
Qt = np.zeros(rank)
else:
Qt = np.matmul(Lambda_x, np.einsum('ij, ij -> j', theta, X[t - time_lags, :]))
if t >= 0 and t <= dim2 - np.min(time_lags) - 1:
if t > np.max(time_lags) - 1 and t <= dim2 - np.max(time_lags) - 1:
index = list(range(0, d))
else:
index = list(np.where((t + time_lags > np.max(time_lags) - 1)
& (t + time_lags <= dim2 - 1)))[0]
for k in index:
Ak = theta[k, :]
Mt += np.multiply(np.outer(Ak, Ak), Lambda_x)
theta0 = theta.copy()
theta0[k, :] = 0
var5 = X[t + time_lags[k], :] - np.einsum('ij, ij -> j',
theta0, X[t + time_lags[k]
- time_lags, :])
Nt += np.matmul(np.matmul(np.diag(Ak), Lambda_x), var5)
var_mu = var4[:, t] + Nt + Qt
var_Lambda = var3[:, :, t] + Mt
inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T)/2)
var_mu = np.matmul(inv_var_Lambda, var_mu)
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda)
mat_hat = np.matmul(W, X.T)
var_alpha = alpha + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_beta = beta + 0.5 * np.sum(error[position] ** 2)
tau = np.random.gamma(var_alpha, 1/var_beta)
X_plus = np.zeros((dim2, rank))
for iter in range(maxiter2):
var_nu = dim2 + nu0
mat0 = X[0 : np.max(time_lags), :]
mat = np.matmul(mat0.T, mat0)
new_mat = np.zeros((dim2 - np.max(time_lags), rank))
for t in range(dim2 - np.max(time_lags)):
new_mat[t, :] = X[t + np.max(time_lags), :] - np.einsum('ij, ij -> j',
theta, X[t + np.max(time_lags)
- time_lags, :])
mat += np.matmul(new_mat.T, new_mat)
var_W = np.linalg.inv(np.linalg.inv(W0) + mat)
var_W = (var_W + var_W.T)/2
Lambda_x = wishart(df = var_nu, scale = var_W, seed = None).rvs()
var1 = W.T
var2 = kr_prod(var1, var1)
var3 = tau * np.matmul(var2, binary_mat).reshape([rank, rank,
dim2]) + np.dstack([Lambda_x] * dim2)
var4 = tau * np.matmul(var1, sparse_mat)
for t in range(dim2):
Mt = np.zeros((rank, rank))
Nt = np.zeros(rank)
if t >= 0 and t <= np.max(time_lags) - 1:
Qt = np.zeros(rank)
else:
Qt = np.matmul(Lambda_x, np.einsum('ij, ij -> j', theta, X[t - time_lags, :]))
if t >= 0 and t <= dim2 - np.min(time_lags) - 1:
if t > np.max(time_lags) - 1 and t <= dim2 - np.max(time_lags) - 1:
index = list(range(0, d))
else:
index = list(np.where((t + time_lags > np.max(time_lags) - 1)
& (t + time_lags <= dim2 - 1)))[0]
for k in index:
Ak = theta[k, :]
Mt += np.multiply(np.outer(Ak, Ak), Lambda_x)
theta0 = theta.copy()
theta0[k, :] = 0
var5 = X[t + time_lags[k], :] - np.einsum('ij, ij -> j',
theta0, X[t + time_lags[k]
- time_lags, :])
Nt += np.matmul(np.matmul(np.diag(Ak), Lambda_x), var5)
var_mu = var4[:, t] + Nt + Qt
var_Lambda = var3[:, :, t] + Mt
inv_var_Lambda = np.linalg.inv((var_Lambda + var_Lambda.T)/2)
var_mu = np.matmul(inv_var_Lambda, var_mu)
X[t, :] = np.random.multivariate_normal(var_mu, inv_var_Lambda)
X_plus += X
mat_hat = np.matmul(W, X.T)
var_alpha = alpha + 0.5 * sparse_mat[position].shape[0]
error = sparse_mat - mat_hat
var_beta = beta + 0.5 * np.sum(error[position] ** 2)
tau = np.random.gamma(var_alpha, 1/var_beta)
X = X_plus/maxiter2
Sigma_eta = np.eye(dim2 - np.max(time_lags))
Sigma_theta = np.eye(theta.shape[0])
for s in range(rank):
P = X[np.max(time_lags) : dim2, s]
Q = np.zeros((dim2 - np.max(time_lags), theta.shape[0]))
for t in range(np.max(time_lags), dim2):
Q[t - np.max(time_lags), :] = X[t - time_lags, s]
theta[:, s] = np.matmul(np.matmul(np.matmul(np.linalg.inv(np.matmul(np.matmul(Q.T, Sigma_eta), Q)
+ np.linalg.inv(Sigma_theta)),
Q.T), np.linalg.inv(Sigma_eta)), P)
return X, theta
def st_prediction(dense_mat, sparse_mat, pred_time_steps, back_steps, rank, time_lags, maxiter):
start_time = dense_mat.shape[1] - pred_time_steps
dense_mat0 = dense_mat[:, 0 : start_time]
sparse_mat0 = sparse_mat[:, 0 : start_time]
dim1 = sparse_mat0.shape[0]
dim2 = sparse_mat0.shape[1]
mat_hat = np.zeros((dim1, pred_time_steps))
init = {"W": np.random.rand(dim1, rank),
"X": np.random.rand(dim2, rank),
"theta": np.random.rand(time_lags.shape[0], rank)}
W, X, theta = BTMF(dense_mat0, sparse_mat0, init, time_lags, maxiter[0], maxiter[1])
init["W"] = W.copy()
init["theta"] = theta.copy()
X0 = np.zeros((dim2 + 1, rank))
X0[0 : dim2, :] = X.copy()
X0[dim2, :] = np.einsum('ij, ij -> j', theta, X0[dim2 - time_lags, :])
init["X"] = X0[X0.shape[0] - back_steps : X0.shape[0], :]
mat_hat[:, 0] = np.matmul(W, X0[dim2, :])
for t in range(1, pred_time_steps):
dense_mat1 = dense_mat[:, start_time - back_steps + t : start_time + t]
sparse_mat1 = sparse_mat[:, start_time - back_steps + t : start_time + t]
X, theta = OfflineBTMF(sparse_mat1, init, time_lags, maxiter[2], maxiter[3])
init["theta"] = theta.copy()
X0 = np.zeros((back_steps + 1, rank))
X0[0 : back_steps, :] = X.copy()
X0[back_steps, :] = np.einsum('ij, ij -> j', theta, X0[back_steps - time_lags, :])
init["X"] = X0[1: back_steps + 1, :]
mat_hat[:, t] = np.matmul(W, X0[back_steps, :])
if (t + 1) % 40 == 0:
print('Time step: {}'.format(t + 1))
small_dense_mat = dense_mat[:, start_time : dense_mat.shape[1]]
pos = np.where(small_dense_mat > 0)
final_mape = np.sum(np.abs(small_dense_mat[pos] -
mat_hat[pos])/small_dense_mat[pos])/small_dense_mat[pos].shape[0]
final_rmse = np.sqrt(np.sum((small_dense_mat[pos] -
mat_hat[pos]) ** 2)/small_dense_mat[pos].shape[0])
print('Final MAPE: {:.6}'.format(final_mape))
print('Final RMSE: {:.6}'.format(final_rmse))
print()
return mat_hat
###Output
_____no_output_____
###Markdown
The main influential factors for such prediction are:- The number of back steps $b$ (`back_steps`).- `rank`.- `maxiter`.- `time_lags`.
###Code
import time
start = time.time()
pred_time_steps = 144 * 5
back_steps = 144 * 7 * 1
rank = 30
time_lags = np.array([1, 2, 144])
maxiter = np.array([100, 100, 10, 20])
small_dense_mat = dense_mat[:, dense_mat.shape[1] - pred_time_steps : dense_mat.shape[1]]
mat_hat = st_prediction(dense_mat, sparse_mat, pred_time_steps, back_steps, rank, time_lags, maxiter)
end = time.time()
print('Running time: %d seconds'%(end - start))
###Output
Time step: 40
Time step: 80
Time step: 120
Time step: 160
Time step: 200
Time step: 240
Time step: 280
Time step: 320
Time step: 360
Time step: 400
Time step: 440
Time step: 480
Time step: 520
Time step: 560
Time step: 600
Time step: 640
Time step: 680
Time step: 720
Final MAPE: 0.110823
Final RMSE: 4.4694
Running time: 13744 seconds
|
tests/manual/reference implementation visualisation update.ipynb | ###Markdown
Done- created step plot functionality- created artificial endpoint generation- seperated legend label aruguments form hover label arguments- improved legend generation- added incomplete target functionality- created reference implementation for: (1) target distribution monitoring (2) realised performance monitoring (3) correct naming of plotting elements Todo- ordered function arguments by importance and added typing information- cleaned up the code
###Code
# add incomplete target data functionality
# clean up all arguments
%load_ext autoreload
%autoreload 2
import pandas as pd
import nannyml as nml
from nannyml.plots._step_plot import _step_plot
reference, analysis, analysis_target = nml.load_synthetic_binary_classification_dataset()
chunk_size = 5000
reference_and_analysis = pd.concat([reference, analysis], ignore_index=True)
metadata = nml.extract_metadata(data = reference, model_name='wfh_predictor')
metadata.target_column_name = 'work_home_actual'
CHUNK_KEY_COLUMN_NAME = 'key'
###Output
_____no_output_____
###Markdown
Confidence based performance estimation
###Code
estimator = nml.CBPE(model_metadata=metadata, chunk_size=chunk_size)
estimator.fit(reference)
estimated_performance = estimator.estimate(data=reference_and_analysis)
estimation_results = estimated_performance.data
###Output
_____no_output_____
###Markdown
still a bug here, roc_auc in referene period should not be estimatedsuggestion: add realised_roc_auc column to output ==> thant that column can be combined with the estimated roc_auc_column to create the metric_column and estimated can be set to false in reference
###Code
estimation_results['thresholds'] = list(zip(estimation_results.lower_threshold, estimation_results.upper_threshold))
estimation_results['estimated'] = True
plot_partition_separator = len(estimation_results.value_counts()) > 1
fig = _step_plot(
table=estimation_results,
metric_column_name='estimated_roc_auc',
chunk_column_name=CHUNK_KEY_COLUMN_NAME,
drift_column_name='alert',
threshold_column_name='thresholds',
threshold_legend_label='Performance threshold',
title='ROC AUC over time (realised and estimated)',
y_axis_title='ROC AUC',
v_line_separating_analysis_period=plot_partition_separator,
estimated_column_name='estimated',
confidence_column_name='confidence',
hover_labels=['Chunk', 'ROC AUC'],
# hover_marker_labels=['Reference', 'No performance drop', 'Probable performance drop'],
hover_marker_labels=['', '', ''],
chunk_legend_labels=['Reference period (realised performance)', 'Analysis period (estimated performance, using CBPE)'],
confidence_legend_label='Estimated performance confidence band',
drift_legend_label='Probable drop in performance',
)
fig.show()
import plotly.graph_objects as go
import numpy as np
# data = pd.DataFrame(
# {'end_date': pd.date_range('2021-01-01', '2021-08-30', freq='M'),
# 'chunk_type': ['p1'] * 3 + ['p2'] * 4,
# 'metric':np.arange(7)}
# )
# data.insert(0, 'start_date', data['end_date'] - pd.offsets.MonthBegin())
# data['mid_point_date'] = data['start_date'] + (data['end_date'] - data['start_date']) / 2
# def _create_artificial_end_point(data):
# data_point_hack = data.tail(1).copy()
# data_point_hack['start_date'] = data_point_hack['end_date']
# data_point_hack['end_date'] = pd.NaT
# data_point_hack['mid_point_date'] = pd.NaT
# data_point_hack.index = data_point_hack.index + 1
# return pd.concat([data, data_point_hack], axis=0)
# fig = go.Figure()
# colors = ['blue', 'red']
# chunk_types = data['chunk_type'].unique()
# for i, chunk_type in enumerate(chunk_types):
# data_subset = create_artificial_end_point(data.loc[(data['chunk_type'] == chunk_type)])
# display(data_subset)
# fig.add_trace(
# go.Scatter(
# mode='lines',
# line=dict(shape='hv', color=colors[i]),
# x=data_subset['start_date'],
# y=data_subset['metric'],
# hoverinfo='skip'
# )
# )
# fig.add_trace(
# go.Scatter(
# mode='markers',
# marker=dict(color=colors[i]),
# x=data_subset['mid_point_date'],
# y=data_subset['metric']
# )
# )
# fig
###Output
_____no_output_____
###Markdown
Reconstruction error
###Code
rcerror_calculator = nml.DataReconstructionDriftCalculator(model_metadata=metadata, chunk_size=chunk_size)
rcerror_calculator.fit(reference_data=reference)
rcerror_results = rcerror_calculator.calculate(data=reference_and_analysis)
data = rcerror_results.data
plot_partition_separator = len(data.value_counts()) > 1
data['thresholds'] = list(zip(data.lower_threshold, data.upper_threshold))
fig = _step_plot(
table=data,
metric_column_name='reconstruction_error',
chunk_column_name=CHUNK_KEY_COLUMN_NAME,
drift_column_name='alert',
threshold_column_name='thresholds',
title='Reconstruction error over time',
y_axis_title='Reconstruction error',
v_line_separating_analysis_period=plot_partition_separator,
hover_labels=['Chunk', 'RC Error'],
hover_marker_labels=['', '', '']
)
fig.show()
###Output
_____no_output_____
###Markdown
Continuous/cartegoric univariate data drift
###Code
univariate_calculator = nml.UnivariateStatisticalDriftCalculator(model_metadata=metadata, chunk_size=chunk_size)
univariate_calculator.fit(reference_data=reference)
univariate_results = univariate_calculator.calculate(data=reference_and_analysis)
data = univariate_results.data
metric_column_name, _, drift_column_name, _ = [c for c in univariate_results.data.columns if c.startswith('tenure')]
metric_label = metric_column_name.split('_')[0]
plot_partition_separator = len(data.value_counts()) > 1
fig = _step_plot(
table=data,
metric_column_name=metric_column_name,
chunk_column_name=CHUNK_KEY_COLUMN_NAME,
drift_column_name=drift_column_name,
# threshold_column_name=threshold_column_name,
title='KS D-statistic of {} over time'.format(metric_label),
y_axis_title='D-statistic',
v_line_separating_analysis_period=plot_partition_separator,
statistically_significant_column_name=drift_column_name,
hover_labels=['Chunk', 'D-statistic'],
hover_marker_labels=['', '', '']
)
fig.show()
###Output
_____no_output_____
###Markdown
Realised performance monitoring
###Code
rperf_results = estimation_results.copy()
rperf_results = rperf_results.rename(columns={'estimated_roc_auc':'roc_auc'})
rperf_results = rperf_results.drop(columns=['confidence', 'estimated'])
rperf_results['realised_target_percentage'] = 1
rperf_results.iloc[-8:, -1] = [0.33] * 8
###Output
_____no_output_____
###Markdown
when realised target percange < 0.25, rows should be removed ! use with caution, as it assumed incompletion rate is monotonous decreasing from the moment it happensSee example in target distribution monitoring
###Code
plot_partition_separator = len(estimation_results.value_counts()) > 1
fig = _step_plot(
table=rperf_results,
metric_column_name='roc_auc',
chunk_column_name=CHUNK_KEY_COLUMN_NAME,
drift_column_name='alert',
threshold_column_name='thresholds',
threshold_legend_label='Performance threshold',
title='ROC AUC over time (realised)',
y_axis_title='ROC AUC',
v_line_separating_analysis_period=plot_partition_separator,
hover_labels=['Chunk', 'ROC AUC', 'Target data'],
# hover_marker_labels=['Reference', 'No performance change', 'Performance change'],
hover_marker_labels=['', '', ''],
drift_legend_label='Change in performance',
partial_target_column_name='realised_target_percentage'
)
fig.show()
###Output
_____no_output_____
###Markdown
Target distribution monitoring
###Code
tdist_results = estimation_results.copy()
tdist_results = tdist_results.drop(columns=['confidence', 'estimated'])
tdist_results = tdist_results.rename(columns={'estimated_roc_auc':'churn_rate'})
tdist_results['churn_rate'] = tdist_results['churn_rate'] - 0.9
tdist_results.lower_threshold = tdist_results.lower_threshold - 0.9
tdist_results.upper_threshold = tdist_results.upper_threshold - 0.9
tdist_results['thresholds'] = list(zip(tdist_results.lower_threshold, tdist_results.upper_threshold))
tdist_results['realised_target_percentage'] = 1
tdist_results.iloc[-3:, -1] = [0.75, 0.33, 0.1]
###Output
_____no_output_____
###Markdown
when realised target percange < 0.25, rows should be removed ! use with caution, as it assumed incompletion rate is monotonous decreasing from the mome
###Code
tdist_results = tdist_results.loc[tdist_results['realised_target_percentage'] > 0.25, ]
plot_partition_separator = len(estimation_results.value_counts()) > 1
fig = _step_plot(
table=tdist_results,
metric_column_name='churn_rate',
chunk_column_name=CHUNK_KEY_COLUMN_NAME,
drift_column_name='alert',
statistically_significant_column_name='alert',
drift_legend_label='Target drift',
threshold_column_name='thresholds',
threshold_legend_label='Target threshold',
title='Churn rate over time',
y_axis_title='Churn rate',
v_line_separating_analysis_period=plot_partition_separator,
hover_labels=['Chunk', 'Churn rate', 'Target data'],
# hover_marker_labels=['Reference', 'No target drift', 'Target drift'],
hover_marker_labels=['', '', ''],
partial_target_column_name='realised_target_percentage'
)
fig.show()
###Output
_____no_output_____ |
Notebooks/Modul-2 (NumPy)/Modul-2(Numpy)/#0_numpy_intro.ipynb | ###Markdown
 Data Science va Sun'iy Intellekt Praktikum Ma'lumotlar tahlili. (NumPy kutubxonasi) NumPy kutubxonasini chaqirib olish
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Python list bilan NumPy kutubxonasidagi massivlar (arraylar) hisoblashlari orasidagi farqni ko'ramiz.
###Code
my_list = list(range(100000)) # python list 0~99999 -->Normal
my_array = np.array(range(100000)) # numpy array(massiv) 0~99999 --> Vektorlashgan
%time for _ in range(10): [x*2 for x in my_list] # Normal
%time for _ in range(10): my_array*2 # Vektorlashgan
105/3.14
###Output
_____no_output_____ |
jupyter/training/french/Train-Perceptron-French.ipynb | ###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/training/french/Train-Perceptron-French.ipynb) 0. Colab Setup
###Code
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
# Install pyspark
! pip install --ignore-installed pyspark==2.4.4
# Install Spark NLP
! pip install --ignore-installed spark-nlp
###Output
openjdk version "1.8.0_252"
OpenJDK Runtime Environment (build 1.8.0_252-8u252-b09-1~18.04-b09)
OpenJDK 64-Bit Server VM (build 25.252-b09, mixed mode)
[K |████████████████████████████████| 215.7MB 61kB/s
[K |████████████████████████████████| 204kB 44.1MB/s
[?25h Building wheel for pyspark (setup.py) ... [?25l[?25hdone
[K |████████████████████████████████| 122kB 2.7MB/s
[?25h
###Markdown
Train POS Tagger in French by Spark NLP Based on Universal Dependency `UD_French-GSD` version 2.3
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
###Output
Spark NLP version: 2.5.0
Apache Spark version: 2.4.4
###Markdown
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:```wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp```
###Code
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
from sparknlp.training import POS
training_data = POS().readDataset(
spark=spark,
path="/tmp/UD_French-GSD_2.3.txt",
delimiter="_",
outputPosCol="tags",
outputDocumentCol="document",
outputTextCol="text"
)
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.setPrefixPattern("\\A([^\\s\\p{L}\\d\\$\\.#]*)")\
.setSuffixPattern("([^\\s\\p{L}\\d]?)([^\\s\\p{L}\\d]*)\\z")\
.setInfixPatterns([
"([\\p{L}\\w]+'{1})",
"([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)",
"((?:\\p{L}\\.)+)",
"((?:\\p{L}+[^\\s\\p{L}]{1})+\\p{L}+)",
"([\\p{L}\\w]+)"
])
posTagger = PerceptronApproach() \
.setNIterations(6) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
%%time
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
###Output
CPU times: user 68.2 ms, sys: 17.6 ms, total: 85.8 ms
Wall time: 2min 40s
###Markdown
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
###Code
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show()
###Output
+--------------------+--------------------+
| result| result|
+--------------------+--------------------+
|[Je, sens, qu'ent...|[PRON, NOUN, VERB...|
|[On, pourra, touj...|[PRON, VERB, ADV,...|
+--------------------+--------------------+
###Markdown
 Train POS Tagger in French by Spark NLP Based on Universal Dependency `UD_French-GSD` version 2.3
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
###Output
Spark NLP version: 2.3.4
Apache Spark version: 2.4.3
###Markdown
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:```wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp```
###Code
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
from sparknlp.training import POS
training_data = POS().readDataset(
spark=spark,
path="/tmp/UD_French-GSD_2.3.txt",
delimiter="_",
outputPosCol="tags",
outputDocumentCol="document",
outputTextCol="text"
)
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.setPrefixPattern("\\A([^\\s\\p{L}\\d\\$\\.#]*)")\
.setSuffixPattern("([^\\s\\p{L}\\d]?)([^\\s\\p{L}\\d]*)\\z")\
.setInfixPatterns([
"([\\p{L}\\w]+'{1})",
"([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)",
"((?:\\p{L}\\.)+)",
"((?:\\p{L}+[^\\s\\p{L}]{1})+\\p{L}+)",
"([\\p{L}\\w]+)"
])
posTagger = PerceptronApproach() \
.setNIterations(6) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
%%time
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
###Output
CPU times: user 24.1 ms, sys: 9.9 ms, total: 34 ms
Wall time: 1min 9s
###Markdown
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
###Code
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show()
###Output
+--------------------+--------------------+
| result| result|
+--------------------+--------------------+
|[Je, sens, qu'ent...|[PRON, NOUN, ADJ,...|
|[On, pourra, touj...|[PRON, VERB, ADV,...|
+--------------------+--------------------+
###Markdown
 Train POS Tagger in French by Spark NLP Based on Universal Dependency `UD_French-GSD` version 2.3
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app
###Code
spark = sparknlp.start()
print("Spark NLP version")
sparknlp.version()
print("Apache Spark version")
spark.version
###Output
Spark NLP version
2.2.1
Apache Spark version
###Markdown
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:```wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp```
###Code
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
from sparknlp.training import POS
training_data = POS().readDataset(
spark=spark,
path="/tmp/UD_French-GSD_2.3.txt",
delimiter="_",
outputPosCol="tags",
outputDocumentCol="document",
outputTextCol="text"
)
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.setPrefixPattern("\\A([^\\s\\p{L}\\d\\$\\.#]*)")\
.setSuffixPattern("([^\\s\\p{L}\\d]?)([^\\s\\p{L}\\d]*)\\z")\
.setInfixPatterns([
"([\\p{L}\\w]+'{1})",
"([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)",
"((?:\\p{L}\\.)+)",
"((?:\\p{L}+[^\\s\\p{L}]{1})+\\p{L}+)",
"([\\p{L}\\w]+)"
])
posTagger = PerceptronApproach() \
.setNIterations(6) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
%%time
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
###Output
CPU times: user 24.1 ms, sys: 9.9 ms, total: 34 ms
Wall time: 1min 9s
###Markdown
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
###Code
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show()
###Output
+--------------------+--------------------+
| result| result|
+--------------------+--------------------+
|[Je, sens, qu'ent...|[PRON, NOUN, ADJ,...|
|[On, pourra, touj...|[PRON, VERB, ADV,...|
+--------------------+--------------------+
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/training/french/Train-Perceptron-French.ipynb) 0. Colab Setup
###Code
# This is only to setup PySpark and Spark NLP on Colab
!wget http://setup.johnsnowlabs.com/colab.sh -O - | bash
###Output
openjdk version "1.8.0_252"
OpenJDK Runtime Environment (build 1.8.0_252-8u252-b09-1~18.04-b09)
OpenJDK 64-Bit Server VM (build 25.252-b09, mixed mode)
[K |████████████████████████████████| 215.7MB 61kB/s
[K |████████████████████████████████| 204kB 44.1MB/s
[?25h Building wheel for pyspark (setup.py) ... [?25l[?25hdone
[K |████████████████████████████████| 122kB 2.7MB/s
[?25h
###Markdown
Train POS Tagger in French by Spark NLP Based on Universal Dependency `UD_French-GSD` version 2.3
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
###Output
Spark NLP version: 2.5.0
Apache Spark version: 2.4.4
###Markdown
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:```wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp```
###Code
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
from sparknlp.training import POS
training_data = POS().readDataset(
spark=spark,
path="/tmp/UD_French-GSD_2.3.txt",
delimiter="_",
outputPosCol="tags",
outputDocumentCol="document",
outputTextCol="text"
)
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.setPrefixPattern("\\A([^\\s\\p{L}\\d\\$\\.#]*)")\
.setSuffixPattern("([^\\s\\p{L}\\d]?)([^\\s\\p{L}\\d]*)\\z")\
.setInfixPatterns([
"([\\p{L}\\w]+'{1})",
"([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)",
"((?:\\p{L}\\.)+)",
"((?:\\p{L}+[^\\s\\p{L}]{1})+\\p{L}+)",
"([\\p{L}\\w]+)"
])
posTagger = PerceptronApproach() \
.setNIterations(6) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
%%time
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
###Output
CPU times: user 68.2 ms, sys: 17.6 ms, total: 85.8 ms
Wall time: 2min 40s
###Markdown
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
###Code
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show()
###Output
+--------------------+--------------------+
| result| result|
+--------------------+--------------------+
|[Je, sens, qu'ent...|[PRON, NOUN, VERB...|
|[On, pourra, touj...|[PRON, VERB, ADV,...|
+--------------------+--------------------+
###Markdown
[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/jupyter/training/french/Train-Perceptron-French.ipynb) 0. Colab Setup
###Code
import os
# Install java
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
# Install pyspark
! pip install --ignore-installed -q pyspark==2.4.4
# Install Spark NLP
! pip install --ignore-installed -q spark-nlp==2.5
###Output
openjdk version "1.8.0_252"
OpenJDK Runtime Environment (build 1.8.0_252-8u252-b09-1~18.04-b09)
OpenJDK 64-Bit Server VM (build 25.252-b09, mixed mode)
[K |████████████████████████████████| 215.7MB 61kB/s
[K |████████████████████████████████| 204kB 44.1MB/s
[?25h Building wheel for pyspark (setup.py) ... [?25l[?25hdone
[K |████████████████████████████████| 122kB 2.7MB/s
[?25h
###Markdown
Train POS Tagger in French by Spark NLP Based on Universal Dependency `UD_French-GSD` version 2.3
###Code
import sys
import time
#Spark ML and SQL
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.functions import array_contains
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
#Spark NLP
import sparknlp
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
###Output
_____no_output_____
###Markdown
Let's create a Spark Session for our app
###Code
spark = sparknlp.start()
print("Spark NLP version: ", sparknlp.version())
print("Apache Spark version: ", spark.version)
###Output
Spark NLP version: 2.5.0
Apache Spark version: 2.4.4
###Markdown
Let's prepare our training datasets containing `token_posTag` like `de_DET`. You can download this data set from Amazon S3:```wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp```
###Code
! wget -N https://s3.amazonaws.com/auxdata.johnsnowlabs.com/public/resources/fr/pos/UD_French/UD_French-GSD_2.3.txt -P /tmp
from sparknlp.training import POS
training_data = POS().readDataset(
spark=spark,
path="/tmp/UD_French-GSD_2.3.txt",
delimiter="_",
outputPosCol="tags",
outputDocumentCol="document",
outputTextCol="text"
)
training_data.show()
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetector() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = Tokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")\
.setPrefixPattern("\\A([^\\s\\p{L}\\d\\$\\.#]*)")\
.setSuffixPattern("([^\\s\\p{L}\\d]?)([^\\s\\p{L}\\d]*)\\z")\
.setInfixPatterns([
"([\\p{L}\\w]+'{1})",
"([\\$#]?\\d+(?:[^\\s\\d]{1}\\d+)*)",
"((?:\\p{L}\\.)+)",
"((?:\\p{L}+[^\\s\\p{L}]{1})+\\p{L}+)",
"([\\p{L}\\w]+)"
])
posTagger = PerceptronApproach() \
.setNIterations(6) \
.setInputCols(["sentence", "token"]) \
.setOutputCol("pos") \
.setPosCol("tags")
pipeline = Pipeline(stages=[
document_assembler,
sentence_detector,
tokenizer,
posTagger
])
%%time
# Let's train our Pipeline by using our training dataset
model = pipeline.fit(training_data)
###Output
CPU times: user 68.2 ms, sys: 17.6 ms, total: 85.8 ms
Wall time: 2min 40s
###Markdown
This is our testing DataFrame where we get some sentences in French. We are going to use our trained Pipeline to transform these sentence and predict each token's `Part Of Speech`.
###Code
dfTest = spark.createDataFrame([
"Je sens qu'entre ça et les films de médecins et scientifiques fous que nous avons déjà vus, nous pourrions emprunter un autre chemin pour l'origine.",
"On pourra toujours parler à propos d'Averroès de décentrement du Sujet."
], StringType()).toDF("text")
predict = model.transform(dfTest)
predict.select("token.result", "pos.result").show()
###Output
_____no_output_____ |
proj2/code/Project2.ipynb | ###Markdown
---
###Code
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.autograd import Variable
X_train = FloatTensor(1000, 2).uniform_(0, 1)
y_label_train = X_train.sub(0.5).pow(2).sum(1).lt(1./2./math.pi).float()
y_train = reshapeLabel(y_label_train)
X_test = FloatTensor(1000, 2).uniform_(0, 1)
y_label_test = X_test.sub(0.5).pow(2).sum(1).lt(1./2./math.pi).float()
y_test = reshapeLabel(y_label_test)
# convert to Variable
X_train, y_train = Variable(X_train), Variable(y_train)
X_test, y_test = Variable(X_test), Variable(y_test)
mu, std = X_train.mean(), X_train.std()
X_train.sub_(mu).div_(std)
X_test.sub_(mu).div_(std);
y_train = y_train.float()
y_test = y_test.float()
train_output = model.forward(X_train)
print("Before Train Error: {:.2%}".format(float(train_output.max(1)[1].ne(y_train.max(1)[1]).sum())/train_output.size(0)))
model = nn.Sequential(
nn.Linear(2,25),
nn.ReLU(),
nn.Linear(25,25),
nn.ReLU(),
nn.Linear(25,25),
nn.ReLU(),
nn.Linear(25,2),
nn.Tanh(),
)
%%time
lr = 0.01
batch_size = 50
nb_epochs = 10
criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=lr)
for k in range(nb_epochs):
print("******** Begin Epoch {} ********\n".format(k))
for b in range(0, X_train.size(0), batch_size):
output = model.forward(X_train.narrow(0, b, batch_size))
loss = criterion(output , y_train.narrow(0, b, batch_size))
error = float(output.max(1)[1].ne(y_train.narrow(0, b, batch_size).max(1)[1]).sum())/output.size(0)
print("Epoch {} Batch {:2.0f}: {:4.2f}, {:6.2%}".format(k, b/batch_size, loss.data[0], error))
model.zero_grad()
loss.backward()
optimizer.step()
print("\n******** After Epoch {} ********\n".format(k))
output = model.forward(X_train)
loss = criterion(output, y_train.float())
error = float(output.max(1)[1].ne(y_train.max(1)[1]).sum())/output.size(0)
print("Loss: {:4.2f}, Error: {:6.2%}\n".format(loss.data[0], error))
train_output = model.forward(X_train)
print("Train Error: {:.2%}".format(float(train_output.max(1)[1].ne(y_train.max(1)[1]).sum())/train_output.size(0)))
test_output = model.forward(X_test)
print("Test Error: {:.2%}".format(float(test_output.max(1)[1].ne(y_test.max(1)[1]).sum())/test_output.size(0)))
###Output
Train Error: 3.60%
Test Error: 8.50%
|
1.Study/2. with computer/4.Programming/1.Algorithms/0.Review/1.array,stack,queue,Linkedlist,DoubleLinkedlist.ipynb | ###Markdown
1. 배열
###Code
# 2차원 배열: 리스트로 구현시
data_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
data_list
###Output
_____no_output_____
###Markdown
연습1: 위의 2차원 배열에서 9, 8, 7 을 순서대로 출력해보기
###Code
dataset = ['Braund, Mr. Owen Harris',
'Cumings, Mrs. John Bradley (Florence Briggs Thayer)',
'Heikkinen, Miss. Laina',
'Futrelle, Mrs. Jacques Heath (Lily May Peel)',
'Allen, Mr. William Henry',
'Moran, Mr. James',
'McCarthy, Mr. Timothy J',
'Palsson, Master. Gosta Leonard',
'Johnson, Mrs. Oscar W (Elisabeth Vilhelmina Berg)',
'Nasser, Mrs. Nicholas (Adele Achem)',
'Sandstrom, Miss. Marguerite Rut',
'Bonnell, Miss. Elizabeth',
'Saundercock, Mr. William Henry',
'Andersson, Mr. Anders Johan',
'Vestrom, Miss. Hulda Amanda Adolfina',
'Hewlett, Mrs. (Mary D Kingcome) ',
'Rice, Master. Eugene',
'Williams, Mr. Charles Eugene',
'Vander Planke, Mrs. Julius (Emelia Maria Vandemoortele)',
'Masselmani, Mrs. Fatima',
'Fynney, Mr. Joseph J',
'Beesley, Mr. Lawrence',
'McGowan, Miss. Anna "Annie"',
'Sloper, Mr. William Thompson',
'Palsson, Miss. Torborg Danira',
'Asplund, Mrs. Carl Oscar (Selma Augusta Emilia Johansson)',
'Emir, Mr. Farred Chehab',
'Fortune, Mr. Charles Alexander',
'Dwyer, Miss. Ellen "Nellie"',
'Todoroff, Mr. Lalio']
###Output
_____no_output_____ |
docs/productive/testing/doctest.ipynb | ###Markdown
Doctests
###Code
import doctest
doctest.testmod()
def add(a, b):
'''
This is a test:
>>> add(2, 2)
5
'''
return a + b
###Output
_____no_output_____ |
Simple_imageClassifier.ipynb | ###Markdown
###Code
!git clone https://github.com/domkade/kill_me_learning.git
import os
os.chdir("kill_me_learning")
!python kml_train.py
!python kml_test.py --model ./*.model
###Output
_____no_output_____ |
courses/udacity_intro_to_tensorflow_for_deep_learning/l09c05_nlp_tweaking_the_model.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Tweaking the Model Run in Google Colab View source on GitHub In this colab, you'll investigate how various tweaks to data processing and the model itself can impact results. At the end, you'll once again be able to visualize how the network sees the related sentiment of each word in the dataset. Import TensorFlow and related functions
###Code
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
###Output
_____no_output_____
###Markdown
Get the datasetWe'll once again use the dataset containing Amazon and Yelp reviews. This dataset was originally extracted from [here](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).
###Code
!wget --no-check-certificate \
https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P \
-O /tmp/sentiment.csv
import numpy as np
import pandas as pd
dataset = pd.read_csv('/tmp/sentiment.csv')
sentences = dataset['text'].tolist()
labels = dataset['sentiment'].tolist()
# Separate out the sentences and labels into training and test sets
training_size = int(len(sentences) * 0.8)
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
# Make labels into numpy arrays for use with the network later
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)
###Output
_____no_output_____
###Markdown
Tokenize the dataset (with tweaks!)Now, we'll tokenize the dataset, but we can make some changes to this from before. Previously, we used: ```vocab_size = 1000embedding_dim = 16max_length = 100trunc_type='post'padding_type='post'```How might changing the `vocab_size`, `embedding_dim` or `max_length` affect how the model performs?
###Code
vocab_size = 500
embedding_dim = 16
max_length = 50
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
###Output
_____no_output_____
###Markdown
Train a Sentiment Model (with tweaks!)We'll use a slightly different model here, using `GlobalAveragePooling1D` instead of `Flatten()`.
###Code
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
num_epochs = 30
history = model.fit(training_padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final))
###Output
_____no_output_____
###Markdown
Visualize the training graphYou can use the code below to visualize the training and validation accuracy while you try out different tweaks to the hyperparameters and model.
###Code
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
###Output
_____no_output_____
###Markdown
Get files for visualizing the networkThe code below will download two files for visualizing how your network "sees" the sentiment related to each word. Head to http://projector.tensorflow.org/ and load these files, then click the checkbox to "sphereize" the data.Note: You may run into errors with the projection if your `vocab_size` earlier was larger than the actual number of words in the vocabulary, in which case you'll need to decrease this variable and re-train in order to visualize.
###Code
# First get the weights of the embedding layer
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
import io
# Create the reverse word index
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Write out the embedding vectors and metadata
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
# Download the files
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
###Output
_____no_output_____
###Markdown
Predicting Sentiment in New ReviewsBelow, we've again included some example new reviews you can test your results on.
###Code
# Use the model to predict a review
fake_reviews = ['I love this phone', 'I hate spaghetti',
'Everything was cold',
'Everything was hot exactly as I wanted',
'Everything was green',
'the host seated us immediately',
'they gave us free chocolate cake',
'not sure about the wilted flowers on the table',
'only works when I stand on tippy toes',
'does not work when I stand on my head']
print(fake_reviews)
# Create the sequences
padding_type='post'
sample_sequences = tokenizer.texts_to_sequences(fake_reviews)
fakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length)
print('\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\n')
classes = model.predict(fakes_padded)
# The closer the class is to 1, the more positive the review is deemed to be
for x in range(len(fake_reviews)):
print(fake_reviews[x])
print(classes[x])
print('\n')
# Try adding reviews of your own
# Add some negative words (such as "not") to the good reviews and see what happens
# For example:
# they gave us free chocolate cake and did not charge us
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Tweaking the Model Run in Google Colab View source on GitHub In this colab, you'll investigate how various tweaks to data processing and the model itself can impact results. At the end, you'll once again be able to visualize how the network sees the related sentiment of each word in the dataset. Import TensorFlow and related functions
###Code
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
###Output
_____no_output_____
###Markdown
Get the datasetWe'll once again use the dataset containing Amazon and Yelp reviews. This dataset was originally extracted from [here](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).
###Code
!wget --no-check-certificate \
https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P \
-O /tmp/sentiment.csv
import numpy as np
import pandas as pd
dataset = pd.read_csv('/tmp/sentiment.csv')
sentences = dataset['text'].tolist()
labels = dataset['sentiment'].tolist()
# Separate out the sentences and labels into training and test sets
training_size = int(len(sentences) * 0.8)
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
# Make labels into numpy arrays for use with the network later
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)
###Output
_____no_output_____
###Markdown
Tokenize the dataset (with tweaks!)Now, we'll tokenize the dataset, but we can make some changes to this from before. Previously, we used: ```vocab_size = 1000embedding_dim = 16max_length = 100trunc_type='post'padding_type='post'```How might changing the `vocab_size`, `embedding_dim` or `max_length` affect how the model performs?
###Code
vocab_size = 500
embedding_dim = 16
max_length = 50
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
###Output
_____no_output_____
###Markdown
Train a Sentiment Model (with tweaks!)We'll use a slightly different model here, using `GlobalAveragePooling1D` instead of `Flatten()`.
###Code
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
num_epochs = 30
history = model.fit(training_padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final))
###Output
_____no_output_____
###Markdown
Visualize the training graphYou can use the code below to visualize the training and validation accuracy while you try out different tweaks to the hyperparameters and model.
###Code
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
###Output
_____no_output_____
###Markdown
Get files for visualizing the networkThe code below will download two files for visualizing how your network "sees" the sentiment related to each word. Head to http://projector.tensorflow.org/ and load these files, then click the checkbox to "sphereize" the data.Note: You may run into errors with the projection if your `vocab_size` earlier was larger than the actual number of words in the vocabulary, in which case you'll need to decrease this variable and re-train in order to visualize.
###Code
# First get the weights of the embedding layer
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
import io
# Create the reverse word index
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Write out the embedding vectors and metadata
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
# Download the files
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
###Output
_____no_output_____
###Markdown
Predicting Sentiment in New ReviewsBelow, we've again included some example new reviews you can test your results on.
###Code
# Use the model to predict a review
fake_reviews = ['I love this phone', 'I hate spaghetti',
'Everything was cold',
'Everything was hot exactly as I wanted',
'Everything was green',
'the host seated us immediately',
'they gave us free chocolate cake',
'not sure about the wilted flowers on the table',
'only works when I stand on tippy toes',
'does not work when I stand on my head']
print(fake_reviews)
# Create the sequences
padding_type='post'
sample_sequences = tokenizer.texts_to_sequences(fake_reviews)
fakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length)
print('\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\n')
classes = model.predict(fakes_padded)
# The closer the class is to 1, the more positive the review is deemed to be
for x in range(len(fake_reviews)):
print(fake_reviews[x])
print(classes[x])
print('\n')
# Try adding reviews of your own
# Add some negative words (such as "not") to the good reviews and see what happens
# For example:
# they gave us free chocolate cake and did not charge us
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Tweaking the Model Run in Google Colab View source on GitHub In this colab, you'll investigate how various tweaks to data processing and the model itself can impact results. At the end, you'll once again be able to visualize how the network sees the related sentiment of each word in the dataset. Import TensorFlow and related functions
###Code
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
###Output
_____no_output_____
###Markdown
Get the datasetWe'll once again use the dataset containing Amazon and Yelp reviews. This dataset was originally extracted from [here](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).
###Code
!wget --no-check-certificate \
https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P \
-O /tmp/sentiment.csv
import numpy as np
import pandas as pd
dataset = pd.read_csv('/tmp/sentiment.csv')
sentences = dataset['text'].tolist()
labels = dataset['sentiment'].tolist()
# Separate out the sentences and labels into training and test sets
training_size = int(len(sentences) * 0.8)
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
# Make labels into numpy arrays for use with the network later
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)
###Output
_____no_output_____
###Markdown
Tokenize the dataset (with tweaks!)Now, we'll tokenize the dataset, but we can make some changes to this from before. Previously, we used: ```vocab_size = 1000embedding_dim = 16max_length = 100trunc_type='post'padding_type='post'```How might changing the `vocab_size`, `embedding_dim` or `max_length` affect how the model performs?
###Code
vocab_size = 500
embedding_dim = 16
max_length = 50
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
###Output
_____no_output_____
###Markdown
Train a Sentiment Model (with tweaks!)We'll use a slightly different model here, using `GlobalAveragePooling1D` instead of `Flatten()`.
###Code
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
num_epochs = 30
history = model.fit(training_padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final))
###Output
Epoch 1/30
50/50 [==============================] - 3s 10ms/step - loss: 0.6922 - accuracy: 0.5339 - val_loss: 0.6992 - val_accuracy: 0.4110
Epoch 2/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6910 - accuracy: 0.5116 - val_loss: 0.6985 - val_accuracy: 0.4110
Epoch 3/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6872 - accuracy: 0.5320 - val_loss: 0.6932 - val_accuracy: 0.4662
Epoch 4/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6853 - accuracy: 0.5511 - val_loss: 0.6898 - val_accuracy: 0.4912
Epoch 5/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6771 - accuracy: 0.5852 - val_loss: 0.6831 - val_accuracy: 0.5313
Epoch 6/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6688 - accuracy: 0.6718 - val_loss: 0.6742 - val_accuracy: 0.5614
Epoch 7/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6541 - accuracy: 0.6685 - val_loss: 0.6613 - val_accuracy: 0.6065
Epoch 8/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6326 - accuracy: 0.7275 - val_loss: 0.6442 - val_accuracy: 0.6441
Epoch 9/30
50/50 [==============================] - 0s 4ms/step - loss: 0.6110 - accuracy: 0.7501 - val_loss: 0.6186 - val_accuracy: 0.7544
Epoch 10/30
50/50 [==============================] - 0s 4ms/step - loss: 0.5748 - accuracy: 0.8129 - val_loss: 0.5934 - val_accuracy: 0.7694
Epoch 11/30
50/50 [==============================] - 0s 4ms/step - loss: 0.5462 - accuracy: 0.8148 - val_loss: 0.5824 - val_accuracy: 0.7268
Epoch 12/30
50/50 [==============================] - 0s 4ms/step - loss: 0.5195 - accuracy: 0.8361 - val_loss: 0.5552 - val_accuracy: 0.7794
Epoch 13/30
50/50 [==============================] - 0s 4ms/step - loss: 0.4830 - accuracy: 0.8329 - val_loss: 0.5459 - val_accuracy: 0.7544
Epoch 14/30
50/50 [==============================] - 0s 4ms/step - loss: 0.4483 - accuracy: 0.8496 - val_loss: 0.5234 - val_accuracy: 0.7870
Epoch 15/30
50/50 [==============================] - 0s 4ms/step - loss: 0.4354 - accuracy: 0.8453 - val_loss: 0.5262 - val_accuracy: 0.7444
Epoch 16/30
50/50 [==============================] - 0s 4ms/step - loss: 0.4031 - accuracy: 0.8765 - val_loss: 0.5047 - val_accuracy: 0.7870
Epoch 17/30
50/50 [==============================] - 0s 4ms/step - loss: 0.3767 - accuracy: 0.8804 - val_loss: 0.4984 - val_accuracy: 0.7719
Epoch 18/30
50/50 [==============================] - 0s 4ms/step - loss: 0.3687 - accuracy: 0.8722 - val_loss: 0.4966 - val_accuracy: 0.7694
Epoch 19/30
50/50 [==============================] - 0s 4ms/step - loss: 0.3407 - accuracy: 0.8916 - val_loss: 0.5145 - val_accuracy: 0.7343
Epoch 20/30
50/50 [==============================] - 0s 4ms/step - loss: 0.3375 - accuracy: 0.8635 - val_loss: 0.5006 - val_accuracy: 0.7444
Epoch 21/30
50/50 [==============================] - 0s 4ms/step - loss: 0.3223 - accuracy: 0.8904 - val_loss: 0.4949 - val_accuracy: 0.7519
Epoch 22/30
50/50 [==============================] - 0s 6ms/step - loss: 0.3216 - accuracy: 0.8691 - val_loss: 0.4832 - val_accuracy: 0.7694
Epoch 23/30
50/50 [==============================] - 0s 4ms/step - loss: 0.3053 - accuracy: 0.8807 - val_loss: 0.4941 - val_accuracy: 0.7544
Epoch 24/30
50/50 [==============================] - 0s 4ms/step - loss: 0.2959 - accuracy: 0.8909 - val_loss: 0.5162 - val_accuracy: 0.7218
Epoch 25/30
50/50 [==============================] - 0s 4ms/step - loss: 0.2884 - accuracy: 0.8921 - val_loss: 0.4939 - val_accuracy: 0.7469
Epoch 26/30
50/50 [==============================] - 0s 4ms/step - loss: 0.2820 - accuracy: 0.8974 - val_loss: 0.4843 - val_accuracy: 0.7594
Epoch 27/30
50/50 [==============================] - 0s 4ms/step - loss: 0.2779 - accuracy: 0.8962 - val_loss: 0.4986 - val_accuracy: 0.7419
Epoch 28/30
50/50 [==============================] - 0s 4ms/step - loss: 0.2504 - accuracy: 0.8971 - val_loss: 0.5141 - val_accuracy: 0.7343
Epoch 29/30
50/50 [==============================] - 0s 4ms/step - loss: 0.2615 - accuracy: 0.9008 - val_loss: 0.5063 - val_accuracy: 0.7368
Epoch 30/30
50/50 [==============================] - 0s 4ms/step - loss: 0.2629 - accuracy: 0.8935 - val_loss: 0.5173 - val_accuracy: 0.7318
###Markdown
Visualize the training graphYou can use the code below to visualize the training and validation accuracy while you try out different tweaks to the hyperparameters and model.
###Code
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
###Output
_____no_output_____
###Markdown
Get files for visualizing the networkThe code below will download two files for visualizing how your network "sees" the sentiment related to each word. Head to http://projector.tensorflow.org/ and load these files, then click the checkbox to "sphereize" the data.Note: You may run into errors with the projection if your `vocab_size` earlier was larger than the actual number of words in the vocabulary, in which case you'll need to decrease this variable and re-train in order to visualize.
###Code
# First get the weights of the embedding layer
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
import io
# Create the reverse word index
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Write out the embedding vectors and metadata
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
import os
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorboard.plugins import projector
log_dir='/logs/imdb-example/'
path = os.path.join(log_dir, 'metadata.tsv')
%cp 'meta.tsv' $path
# Save the weights we want to analyse as a variable. Note that the first
# value represents any unknown word, which is not in the metadata, so
# we will remove that value.
weights = tf.Variable(model.layers[0].get_weights()[0][1:])
# Create a checkpoint from embedding, the filename and key are
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join(log_dir, "embedding.ckpt"))
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`
embedding.tensor_name = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
embedding.metadata_path = 'metadata.tsv'
projector.visualize_embeddings(log_dir, config)
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
%load_ext tensorboard
%tensorboard --logdir /logs/imdb-example/
# Download the files
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
###Output
_____no_output_____
###Markdown
Predicting Sentiment in New ReviewsBelow, we've again included some example new reviews you can test your results on.
###Code
# Use the model to predict a review
fake_reviews = ['I love this phone', 'I hate spaghetti',
'Everything was cold',
'Everything was hot exactly as I wanted',
'Everything was green',
'the host seated us immediately',
'they gave us free chocolate cake',
'not sure about the wilted flowers on the table',
'only works when I stand on tippy toes',
'does not work when I stand on my head']
print(fake_reviews)
# Create the sequences
padding_type='post'
sample_sequences = tokenizer.texts_to_sequences(fake_reviews)
fakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length)
print('\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\n')
classes = model.predict(fakes_padded)
# The closer the class is to 1, the more positive the review is deemed to be
for x in range(len(fake_reviews)):
print(fake_reviews[x])
print(classes[x])
print('\n')
# Try adding reviews of your own
# Add some negative words (such as "not") to the good reviews and see what happens
# For example:
# they gave us free chocolate cake and did not charge us
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Tweaking the Model Run in Google Colab View source on GitHub In this colab, you'll investigate how various tweaks to data processing and the model itself can impact results. At the end, you'll once again be able to visualize how the network sees the related sentiment of each word in the dataset. Import TensorFlow and related functions
###Code
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
###Output
_____no_output_____
###Markdown
Get the datasetWe'll once again use the dataset containing Amazon and Yelp reviews. This dataset was originally extracted from [here](https://www.kaggle.com/marklvl/sentiment-labelled-sentences-data-set).
###Code
!wget --no-check-certificate \
https://drive.google.com/uc?id=13ySLC_ue6Umt9RJYSeM2t-V0kCv-4C-P \
-O /tmp/sentiment.csv
import numpy as np
import pandas as pd
dataset = pd.read_csv('/tmp/sentiment.csv')
sentences = dataset['text'].tolist()
labels = dataset['sentiment'].tolist()
# Separate out the sentences and labels into training and test sets
training_size = int(len(sentences) * 0.8)
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
# Make labels into numpy arrays for use with the network later
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)
###Output
_____no_output_____
###Markdown
Tokenize the dataset (with tweaks!)Now, we'll tokenize the dataset, but we can make some changes to this from before. Previously, we used: ```vocab_size = 1000embedding_dim = 16max_length = 100trunc_type='post'padding_type='post'```How might changing the `vocab_size`, `embedding_dim` or `max_length` affect how the model performs?
###Code
vocab_size = 500
embedding_dim = 16
max_length = 50
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(training_sentences)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(training_sentences)
training_padded = pad_sequences(training_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
###Output
_____no_output_____
###Markdown
Train a Sentiment Model (with tweaks!)We'll use a slightly different model here, using `GlobalAveragePooling1D` instead of `Flatten()`.
###Code
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim, input_length=max_length),
tf.keras.layers.GlobalAveragePooling1D(),
tf.keras.layers.Dense(6, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
num_epochs = 30
history = model.fit(training_padded, training_labels_final, epochs=num_epochs, validation_data=(testing_padded, testing_labels_final))
###Output
_____no_output_____
###Markdown
Visualize the training graphYou can use the code below to visualize the training and validation accuracy while you try out different tweaks to the hyperparameters and model.
###Code
import matplotlib.pyplot as plt
def plot_graphs(history, string):
plt.plot(history.history[string])
plt.plot(history.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
plt.show()
plot_graphs(history, "accuracy")
plot_graphs(history, "loss")
###Output
_____no_output_____
###Markdown
Get files for visualizing the networkThe code below will download two files for visualizing how your network "sees" the sentiment related to each word. Head to http://projector.tensorflow.org/ and load these files, then click the checkbox to "sphereize" the data.Note: You may run into errors with the projection if your `vocab_size` earlier was larger than the actual number of words in the vocabulary, in which case you'll need to decrease this variable and re-train in order to visualize.
###Code
# First get the weights of the embedding layer
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape) # shape: (vocab_size, embedding_dim)
import io
# Create the reverse word index
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
# Write out the embedding vectors and metadata
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for word_num in range(1, vocab_size):
word = reverse_word_index[word_num]
embeddings = weights[word_num]
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in embeddings]) + "\n")
out_v.close()
out_m.close()
# Download the files
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
###Output
_____no_output_____
###Markdown
Predicting Sentiment in New ReviewsBelow, we've again included some example new reviews you can test your results on.
###Code
# Use the model to predict a review
fake_reviews = ['I love this phone', 'I hate spaghetti',
'Everything was cold',
'Everything was hot exactly as I wanted',
'Everything was green',
'the host seated us immediately',
'they gave us free chocolate cake',
'not sure about the wilted flowers on the table',
'only works when I stand on tippy toes',
'does not work when I stand on my head']
print(fake_reviews)
# Create the sequences
padding_type='post'
sample_sequences = tokenizer.texts_to_sequences(fake_reviews)
fakes_padded = pad_sequences(sample_sequences, padding=padding_type, maxlen=max_length)
print('\nHOT OFF THE PRESS! HERE ARE SOME NEWLY MINTED, ABSOLUTELY GENUINE REVIEWS!\n')
classes = model.predict(fakes_padded)
# The closer the class is to 1, the more positive the review is deemed to be
for x in range(len(fake_reviews)):
print(fake_reviews[x])
print(classes[x])
print('\n')
# Try adding reviews of your own
# Add some negative words (such as "not") to the good reviews and see what happens
# For example:
# they gave us free chocolate cake and did not charge us
###Output
_____no_output_____ |
00.Python/[udemy]numpy/docs_numpy_basic_operations.ipynb | ###Markdown
다른 매트릭스 랭귀지와는 다르게, product operator * 는 각 배열읠 요소별로 수행된다.
###Code
A = np.array([[1,1],[0,1]])
B = np.array([[2,0], [3,4]])
# element-wise product
A * B
# matrix product
A @ B
A.dot(B)
###Output
_____no_output_____
###Markdown
일부 연산자들 `+=` 그리고 `*=` 는 새로운 배열을 생성하기 보다는 기존에 있던 배열을 수정하는 행동으로 사용된다.
###Code
a = np.ones((2,3), dtype=int)
b = np.random.random((2,3))
a *= 3
a
a += a # 스스로의 배열에만 영향을 미친다.
a # 변화되지 않았음.
a += b
a = np.ones(3, dtype=np.int32)
from numpy import pi
b = np.linspace(0, pi, 3)
b.dtype.name
c = a+ b
c
c.dtype.name
d = np.exp(c*1j)
d
d.dtype.name
a = np.random.random((2,3))
a
a.sum()
a.min()
a.max()
###Output
_____no_output_____
###Markdown
기본적으로 모든 연산자들은 모양에 상관없이 숫자목록인 것처럼 배열에 적용된다.
###Code
b = np.arange(12).reshape(3, 4)
b
# 각각의 column의 합으로 표현한다.
b.sum(axis=0)
# 0은 축의 모양을 뜻함 -> column
# 1은 row
# 각각의 row에서 최소
b.min(axis=1)
# 각 행에 누적을 계산 때 활용.
b.cumsum(axis=1)
###Output
_____no_output_____
###Markdown
Univarsal Functions Numpy는 sin, cos, exp 와 같은 수학 함수들을 제공합니다. 이러한 것을 우리는 universal functions 즉, ufunc 이라고 부릅니다. Numpy 안에서 이러한 Functions 는 배열안에서 각각의 요소별로 동작하게 됩니다. 아래를 보시죠
###Code
B = np.arange(3)
B
np.exp(B)
np.sqrt(B)
C = np.array([2., -1. , 4.])
np.add(B, C)
###Output
_____no_output_____
###Markdown
Indexing, Slicing and Iterating
###Code
a = np.arange(10) ** 3
# 각각의 요소를 3번씩 곱한다.
a
a[2]
a[2:5]
# 0:6:2 와 동일 시작부터 6번포지션까지만
# 예외적으로 매 2번째 요소에 -1000을 넣겠다.
a[0:6:2] = -1000
a
# 뒤집기
a[ : :-1]
a[ ::-1]
for i in a:
print(i**(1/3.))
###Output
nan
1.0
nan
3.0
nan
5.0
6.0
7.0
8.0
9.0
###Markdown
다차원 배열이 각 축마다 하나의 인덱스를 가질 수있다. 이러한 인덱스들은 컴마에 의해 튜플로 주어지게 된다.
###Code
def f(x,y):
return 10*x+y
b = np.fromfunction(f, (5,4), dtype=int)
b
b[2,3]
#두번째 컬럼에서 각각의 row들
b[0:5, 1]
b[:,1]
b[1:3, : ]
# the last row. Equivalent to b[-1,:]
b[-1]
###Output
_____no_output_____
###Markdown
b[i, ...]. 이런 표현도 있음. dots는 가능한 축들을 축약해서 쓸 수 있다. - x[1,2,....] 는 x[1,2,:,:,:],- x[...,3] 는 x[:,:,:,:,3]- x[4,...,5,:] 는 x[4,:,:,5,:]
###Code
c = np.array([[[0, 1, 2],
[10, 12, 13]],
[[100, 101, 102],
[110, 112, 113]]])
c.shape
c[1, ...] # c[1,:,:] or c[1]
c[..., 2] # c[:,:,2]
for row in b:
print(row)
for element in b.flat:
print(element)
###Output
0
1
2
3
10
11
12
13
20
21
22
23
30
31
32
33
40
41
42
43
###Markdown
Shap Manipulation
###Code
a= np.floor(10*np.random.random((3,4)))
a
a.ravel() # flattened
a.reshape(6,2) # returns the array with new modified shape
a.T # 가로 세로 변형된 형태 제공
a.T.shape
a.shape
###Output
_____no_output_____
###Markdown
Stacking together different arrays
###Code
a = np.floor(10*np.random.random((2,2)))
a
b = np.floor(10*np.random.random((2,2)))
b
np.vstack((a,b))
np.hstack((a,b))
from numpy import newaxis
np.column_stack((a, b))
a= np.array([4.,2.])
b = np.array([3.,8.])
np.column_stack((a,b)) # 2D 배열로 변환되어 리턴함.
np.hstack((a,b))
a[:, newaxis] # 이건 2D 열 벡터를 가질수 있도록 허락한다.
np.column_stack((a[:, newaxis], b[:,newaxis]))
np.hstack((a[:, newaxis], b[:, newaxis]))
###Output
_____no_output_____
###Markdown
Node
###Code
np.r_[1:4, 0, 4]
###Output
_____no_output_____
###Markdown
하나의 배열을 여러 배열로 쪼개기
###Code
a = np.floor(10*np.random.random((2,12)))
a
np.hsplit(a, 3) # 배열을 3개로 나눈다.
np.hsplit(a, (3, 4))
###Output
_____no_output_____
###Markdown
Copy and Views
###Code
a = np.arange(12)
b = a # 어떤 새로운 오브젝트가 생성된것이 아니다. 포인터로 연결된 것으로 예상됨
b is a # a and b 두개는 같은 이름을 가진 ndarray object이다.
b.shape = 3, 4
a. shape
def f(x):
print(id(x))
id(a) # id는 유니크한 UUID이다.
f(a)
###Output
4494863536
###Markdown
View or 얖은 복사
###Code
c = a.view()
c is a
c
c.base is a # C 는 데이터 a의 뷰이다. 데이터는 공유한다.
c.flags.owndata
c.shape
c.shape = 2,6 # a의 모양은 변하지 않는다.
c[0, 4] = 1234
c
a
###Output
_____no_output_____
###Markdown
슬라이싱하는 것 배열을 뷰에 의해 리턴 된 것을
###Code
a[:,1:3]
s = a[:, 1:3] # s = a[:,1:3] 지금 View를 리턴한 것!!
s
a
###Output
_____no_output_____
###Markdown
깊은 복사
###Code
d = a.copy() # 새로운 객체에 새로운 데이터가 생성됨
d is a
d.base is a # 얖은 복사와 달리 데이터도 공유하지 않음.
d[0, 0] = 9999
a # 변하지 않음을 알 수 있음.
###Output
_____no_output_____
###Markdown
방송은 보편적 인 기능이 정확히 같은 모양을 갖지 않는 입력을 의미있는 방식으로 처리 할 수있게합니다.브로드 캐스팅의 첫 번째 규칙은 모든 입력 배열의 차원 수가 같지 않으면 모든 배열의 차원 수가 같을 때까지 더 작은 배열의 모양에 "1"이 반복적으로 추가된다는 것입니다.브로드 캐스팅의 두 번째 규칙은 특정 차원을 따라 1 크기의 배열이 해당 차원을 따라 가장 큰 형태의 배열 크기를 갖는 것처럼 작동하도록합니다. 배열 요소의 값은 "브로드 캐스트"배열의 차원을 따라 동일하다고 가정합니다.브로드 캐스팅 규칙을 적용한 후에는 모든 배열의 크기가 일치해야합니다. 자세한 내용은 방송에서 찾을 수 있습니다. Fancy indexing and index tricks
###Code
a = np.arange(12) ** 2
a
i = np.array([1, 1, 3, 8, 5])
a[i] # i 위치에 있는 index로 위치를 반납.
j = np.array([[3, 4], [9, 7]])
a[j] # 이차원도 아래와 같이 같은 모양으로 나타냄.
###Output
_____no_output_____
###Markdown
```>>> palette = np.array( [ [0,0,0], black... [255,0,0], red... [0,255,0], green... [0,0,255], blue... [255,255,255] ] ) white>>> image = np.array( [ [ 0, 1, 2, 0 ], each value corresponds to a color in the palette... [ 0, 3, 4, 0 ] ] )>>> palette[image] the (2,4,3) color imagearray([[[ 0, 0, 0], [255, 0, 0], [ 0, 255, 0], [ 0, 0, 0]], [[ 0, 0, 0], [ 0, 0, 255], [255, 255, 255], [ 0, 0, 0]]])```
###Code
a = np.arange(12).reshape(3,4)
a
i = np.array([[0, 1],
[1, 2]])
j = np.array([[2, 1],
[3, 3]])
i
j
a[i,j]
a[i, 2]
a[:,j]
l = [i,j]
a[l]
s = np.array([i,j])
a[s]
a[tuple(s)]
time = np.linspace(20, 145,5)
time
data = np.sin(np.arange(20)).reshape(5,4)
data
ind = data.argmax(axis=0) # 각 시리즈의 최대값의 인덱스
ind
time_max = time[ind]
time_max
# => data[ind[0],0], data[ind[1],1]...
data_max = data[ind, range(data.shape[1])]
data_max
np.all(data_max == data.max(axis=0))
###Output
_____no_output_____
###Markdown
그러나 인덱스 목록에 반복이 포함되어 있으면 마지막 값 뒤에두고 여러 번 할당됩니다.
###Code
a= np.arange(5)
a[[0,0,2]] = [1,2,3]
a
###Output
_____no_output_____
###Markdown
Indexing with Boolean Arrays
###Code
a = np.arange(12).reshape(3,4)
b = a > 4
b
a[b]
a[b] = 0
a
###Output
_____no_output_____ |
examples/getting_started/1_Introduction.ipynb | ###Markdown
What is Datashader?**Datashader turns even the largest datasets into images, faithfully preserving the data's distribution.**Datashader is an [open-source](https://github.com/bokeh/datashader/) Python 2 and 3 library for analyzing and visualizing large datasets. Specifically, datashader is designed to "rasterize" or "aggregate" datasets into regular grids that can be viewed as images, making it simple and quick to see the properties and patterns of your data. Datashader can plot a billion points in a second or so on a 16GB laptop, and scales up easily to out-of-core or distributed processing for even larger datasets.This page of the getting-started guide will give a simple example to show how it works, and the following page will show how to use Datashader as a standalone library for generating arrays or images directly([2-Pipeline](2-Pipeline.ipynb)). Next we'll show how to use Datashader as a component in a larger visualization system like [HoloViews](http://holoviews.org) or [Bokeh](http://bokeh.pydata.org) that provides interactive plots with dynamic zooming, labeled axes, and overlays and layouts ([3-Interactivity](3-Interactivity.ipynb)). More detailed information about each topic is then provided in the [User Guide](../user_guide/). Example: NYC taxi tripsTo illustrate how this process works, we will demonstrate some of the key features of Datashader using a standard "big-data" example: millions of taxi trips from New York City, USA. First let's import the libraries we are going to use and then read the dataset.
###Code
import datashader as ds
import dask.dataframe as dd
from colorcet import fire
from datashader import transfer_functions as tf
df = dd.read_parquet('../data/nyc_taxi_hours.parq/').persist()
df.head()
###Output
_____no_output_____
###Markdown
Here you can see that we have a variety of columns with data about each of the 10 million taxi trips here, such as the locations in Web Mercator coordinates, the distance, etc. With datashader, we can choose what we want to plot on the `x` and `y` axes and see the full data immediately, with no parameter tweaking, magic numbers, subsampling, or approximation, up to the resolution of the display:
###Code
agg = ds.Canvas().points(df, 'dropoff_x', 'dropoff_y')
tf.set_background(tf.shade(agg, cmap=fire),"black")
###Output
_____no_output_____
###Markdown
Here you can immediately see that the data points are aligned to a street grid, that some areas have much more traffic than others, and that the quality of the signal varies spatially (with some areas having blurry patterns that indicate GPS errors, perhaps due to tall buildings). Getting a plot like this with other approaches would take quite a bit of time and effort, but with datashader it appears in milliseconds without trial and error.The output above is just a bare image, which is all that datashader knows how to generate directly. But datashader can integrate closely with Bokeh, HoloViews, and GeoViews, which makes it simple to allow interactive zooming, axis labeling, overlays and layouts, and complex web apps. For example, making a zoomable interactive overlay on a geographic map requires just a few more lines of code:
###Code
import holoviews as hv
import geoviews as gv
from holoviews.operation.datashader import datashade
hv.extension('bokeh')
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'
tile_opts = dict(width=1000,height=600,xaxis=None,yaxis=None,bgcolor='black',show_grid=False)
map_tiles = gv.WMTS(url).opts(style=dict(alpha=0.5), plot=tile_opts)
points = hv.Points(df, ['dropoff_x', 'dropoff_y'])
taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=fire, width=1000, height=600)
map_tiles * taxi_trips
###Output
_____no_output_____
###Markdown
What is Datashader?**Datashader turns even the largest datasets into images, faithfully preserving the data's distribution.**Datashader is an [open-source](https://github.com/bokeh/datashader/) Python 2 and 3 library for analyzing and visualizing large datasets. Specifically, Datashader is designed to "rasterize" or "aggregate" datasets into regular grids that can be viewed as images, making it simple and quick to see the properties and patterns of your data. Datashader can plot a billion points in a second or so on a 16GB laptop, and scales up easily to out-of-core, distributed, or GPU processing for even larger datasets.This page of the getting-started guide will give a simple example to show how it works, and the following page will show how to use Datashader as a standalone library for generating arrays or images directly([2-Pipeline](2-Pipeline.ipynb)). Next we'll show how to use Datashader as a component in a larger visualization system like [HoloViews](http://holoviews.org) or [Bokeh](http://bokeh.pydata.org) that provides interactive plots with dynamic zooming, labeled axes, and overlays and layouts ([3-Interactivity](3-Interactivity.ipynb)). More detailed information about each topic is then provided in the [User Guide](../user_guide/). Example: NYC taxi tripsTo illustrate how this process works, we will demonstrate some of the key features of Datashader using a standard "big-data" example: millions of taxi trips from New York City, USA. First let's import the libraries we are going to use and then read the dataset.
###Code
import datashader as ds
import pandas as pd
from colorcet import fire
from datashader import transfer_functions as tf
df = pd.read_csv('../data/nyc_taxi.csv', usecols=['dropoff_x', 'dropoff_y'])
df.head()
###Output
_____no_output_____
###Markdown
Here you can see that we have a variety of columns with data about each of the 10 million taxi trips here, such as the locations in Web Mercator coordinates, the distance, etc. With datashader, we can choose what we want to plot on the `x` and `y` axes and see the full data immediately, with no parameter tweaking, magic numbers, subsampling, or approximation, up to the resolution of the display:
###Code
agg = ds.Canvas().points(df, 'dropoff_x', 'dropoff_y')
tf.set_background(tf.shade(agg, cmap=fire),"black")
###Output
_____no_output_____
###Markdown
Here you can immediately see that the data points are aligned to a street grid, that some areas have much more traffic than others, and that the quality of the signal varies spatially (with some areas having blurry patterns that indicate GPS errors, perhaps due to tall buildings). Getting a plot like this with other approaches would take quite a bit of time and effort, but with Datashader it appears in milliseconds without trial and error.The output above is just a bare image, which is all that Datashader knows how to generate directly. But Datashader can integrate closely with Bokeh, HoloViews, and GeoViews, which makes it simple to allow interactive zooming, axis labeling, overlays and layouts, and complex web apps. For example, making a zoomable interactive overlay on a geographic map requires just a few more lines of code:
###Code
import holoviews as hv
from holoviews.element.tiles import EsriImagery
from holoviews.operation.datashader import datashade
hv.extension('bokeh')
map_tiles = EsriImagery().opts(alpha=0.5, width=900, height=480, bgcolor='black')
points = hv.Points(df, ['dropoff_x', 'dropoff_y'])
taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=fire, width=900, height=480)
map_tiles * taxi_trips
###Output
_____no_output_____
###Markdown
What is Datashader?**Datashader turns even the largest datasets into images, faithfully preserving the data's distribution.**Datashader is an [open-source](https://github.com/bokeh/datashader/) Python library for analyzing and visualizing large datasets. Specifically, Datashader is designed to "rasterize" or "aggregate" datasets into regular grids that can be analyzed further or viewed as images, making it simple and quick to see the properties and patterns of your data. Datashader can plot a billion points in a second or so on a 16GB laptop, and scales up easily to out-of-core, distributed, or GPU processing for even larger datasets.This page of the getting-started guide will give a simple example to show how it works, and the following page will show how to use Datashader as a standalone library for generating arrays or images directly([Pipeline](2_Pipeline.ipynb)). Next we'll show how to use Datashader as a component in a larger visualization system like [HoloViews](http://holoviews.org) or [Bokeh](http://bokeh.pydata.org) that provides interactive plots with dynamic zooming, labeled axes, and overlays and layouts ([3-Interactivity](3-Interactivity.ipynb)). More detailed information about each topic is then provided in the [User Guide](../user_guide/). Example: NYC taxi tripsTo illustrate how this process works, we will demonstrate some of the key features of Datashader using a standard "big-data" example: millions of taxi trips from New York City, USA. First let's import the libraries we are going to use and then read the dataset.
###Code
import datashader as ds, pandas as pd, colorcet as cc
df = pd.read_csv('../data/nyc_taxi.csv', usecols=['dropoff_x', 'dropoff_y'])
df.head()
###Output
_____no_output_____
###Markdown
Here you can see that we have a simple columnar dataset with x and y dropoff locations (in Web Mercator coordinates) for each of the 10 million taxi trips included; other columns were skipped during loading. With Datashader, we can choose what we want to plot on the `x` and `y` axes and see the full data immediately, with no parameter tweaking, magic numbers, subsampling, or approximation, up to the resolution of the display:
###Code
agg = ds.Canvas().points(df, 'dropoff_x', 'dropoff_y')
ds.tf.set_background(ds.tf.shade(agg, cmap=cc.fire), "black")
###Output
_____no_output_____
###Markdown
Here you can immediately see that the data points are aligned to a street grid, that some areas have much more traffic than others, and that the quality of the signal varies spatially (with some areas having blurry patterns that indicate GPS errors, perhaps due to tall buildings). Getting a plot like this with other approaches would take quite a bit of time and effort, but with Datashader it appears in milliseconds without trial and error.The output above is just a bare image, which is all that Datashader knows how to generate directly. But Datashader can integrate closely with Bokeh, HoloViews, and GeoViews, which makes it simple to allow interactive zooming, axis labeling, overlays and layouts, and complex web apps. For example, making a zoomable interactive overlay on a geographic map requires just a few more lines of code:
###Code
import holoviews as hv
from holoviews.element.tiles import EsriImagery
from holoviews.operation.datashader import datashade
hv.extension('bokeh')
map_tiles = EsriImagery().opts(alpha=0.5, width=900, height=480, bgcolor='black')
points = hv.Points(df, ['dropoff_x', 'dropoff_y'])
taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cc.fire, width=900, height=480)
map_tiles * taxi_trips
###Output
_____no_output_____
###Markdown
What is Datashader?**Datashader turns even the largest datasets into images, faithfully preserving the data's distribution.**Datashader is an [open-source](https://github.com/bokeh/datashader/) Python 2 and 3 library for analyzing and visualizing large datasets. Specifically, Datashader is designed to "rasterize" or "aggregate" datasets into regular grids that can be analyzed further or viewed as images, making it simple and quick to see the properties and patterns of your data. Datashader can plot a billion points in a second or so on a 16GB laptop, and scales up easily to out-of-core, distributed, or GPU processing for even larger datasets.This page of the getting-started guide will give a simple example to show how it works, and the following page will show how to use Datashader as a standalone library for generating arrays or images directly([2-Pipeline](2-Pipeline.ipynb)). Next we'll show how to use Datashader as a component in a larger visualization system like [HoloViews](http://holoviews.org) or [Bokeh](http://bokeh.pydata.org) that provides interactive plots with dynamic zooming, labeled axes, and overlays and layouts ([3-Interactivity](3-Interactivity.ipynb)). More detailed information about each topic is then provided in the [User Guide](../user_guide/). Example: NYC taxi tripsTo illustrate how this process works, we will demonstrate some of the key features of Datashader using a standard "big-data" example: millions of taxi trips from New York City, USA. First let's import the libraries we are going to use and then read the dataset.
###Code
import datashader as ds, pandas as pd, colorcet as cc
df = pd.read_csv('../data/nyc_taxi.csv', usecols=['dropoff_x', 'dropoff_y'])
df.head()
###Output
_____no_output_____
###Markdown
Here you can see that we have a simple columnar dataset with x and y dropoff locations (in Web Mercator coordinates) for each of the 10 million taxi trips included; other columns were skipped during loading. With Datashader, we can choose what we want to plot on the `x` and `y` axes and see the full data immediately, with no parameter tweaking, magic numbers, subsampling, or approximation, up to the resolution of the display:
###Code
agg = ds.Canvas().points(df, 'dropoff_x', 'dropoff_y')
ds.tf.set_background(ds.tf.shade(agg, cmap=cc.fire), "black")
###Output
_____no_output_____
###Markdown
Here you can immediately see that the data points are aligned to a street grid, that some areas have much more traffic than others, and that the quality of the signal varies spatially (with some areas having blurry patterns that indicate GPS errors, perhaps due to tall buildings). Getting a plot like this with other approaches would take quite a bit of time and effort, but with Datashader it appears in milliseconds without trial and error.The output above is just a bare image, which is all that Datashader knows how to generate directly. But Datashader can integrate closely with Bokeh, HoloViews, and GeoViews, which makes it simple to allow interactive zooming, axis labeling, overlays and layouts, and complex web apps. For example, making a zoomable interactive overlay on a geographic map requires just a few more lines of code:
###Code
import holoviews as hv
from holoviews.element.tiles import EsriImagery
from holoviews.operation.datashader import datashade
hv.extension('bokeh')
map_tiles = EsriImagery().opts(alpha=0.5, width=900, height=480, bgcolor='black')
points = hv.Points(df, ['dropoff_x', 'dropoff_y'])
taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=cc.fire, width=900, height=480)
map_tiles * taxi_trips
###Output
_____no_output_____
###Markdown
What is Datashader?**Datashader turns even the largest datasets into images, faithfully preserving the data's distribution.**Datashader is an [open-source](https://github.com/bokeh/datashader/) Python 2 and 3 library for analyzing and visualizing large datasets. Specifically, datashader is designed to "rasterize" or "aggregate" datasets into regular grids that can be viewed as images, making it simple and quick to see the properties and patterns of your data. Datashader can plot a billion points in a second or so on a 16GB laptop, and scales up easily to out-of-core or distributed processing for even larger datasets.This page of the getting-started guide will give a simple example to show how it works, and the following page will show how to use Datashader as a standalone library for generating arrays or images directly([2-Pipeline](2-Pipeline.ipynb)). Next we'll show how to use Datashader as a component in a larger visualization system like [HoloViews](http://holoviews.org) or [Bokeh](http://bokeh.pydata.org) that provides interactive plots with dynamic zooming, labeled axes, and overlays and layouts ([3-Interactivity](3-Interactivity.ipynb)). More detailed information about each topic is then provided in the [User Guide](../user_guide/). Example: NYC taxi tripsTo illustrate how this process works, we will demonstrate some of the key features of Datashader using a standard "big-data" example: millions of taxi trips from New York City, USA. First let's import the libraries we are going to use and then read the dataset.
###Code
import datashader as ds
import pandas as pd
from colorcet import fire
from datashader import transfer_functions as tf
df = pd.read_csv('../data/nyc_taxi.csv', usecols=['dropoff_x', 'dropoff_y'])
df.head()
###Output
_____no_output_____
###Markdown
Here you can see that we have a variety of columns with data about each of the 10 million taxi trips here, such as the locations in Web Mercator coordinates, the distance, etc. With datashader, we can choose what we want to plot on the `x` and `y` axes and see the full data immediately, with no parameter tweaking, magic numbers, subsampling, or approximation, up to the resolution of the display:
###Code
agg = ds.Canvas().points(df, 'dropoff_x', 'dropoff_y')
tf.set_background(tf.shade(agg, cmap=fire),"black")
###Output
_____no_output_____
###Markdown
Here you can immediately see that the data points are aligned to a street grid, that some areas have much more traffic than others, and that the quality of the signal varies spatially (with some areas having blurry patterns that indicate GPS errors, perhaps due to tall buildings). Getting a plot like this with other approaches would take quite a bit of time and effort, but with datashader it appears in milliseconds without trial and error.The output above is just a bare image, which is all that datashader knows how to generate directly. But datashader can integrate closely with Bokeh, HoloViews, and GeoViews, which makes it simple to allow interactive zooming, axis labeling, overlays and layouts, and complex web apps. For example, making a zoomable interactive overlay on a geographic map requires just a few more lines of code:
###Code
import holoviews as hv
from holoviews.element import tiles
from holoviews.operation.datashader import datashade
hv.extension('bokeh')
url = 'https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{Z}/{Y}/{X}.jpg'
tile_opts = dict(width=1000,height=600,xaxis=None,yaxis=None,bgcolor='black',show_grid=False)
map_tiles = tiles.EsriImagery().opts(style=dict(alpha=0.5), plot=tile_opts)
points = hv.Points(df, ['dropoff_x', 'dropoff_y'])
taxi_trips = datashade(points, x_sampling=1, y_sampling=1, cmap=fire, width=1000, height=600)
map_tiles * taxi_trips
###Output
_____no_output_____ |
machine_learning/reinforcement_learning/generalized_stochastic_policy_iteration/tabular/planning_and_learning_tabular/np_planning_and_learning_tabular/off_policy_stochastic_planning_and_learning_prioritized_sweeping.ipynb | ###Markdown
Planning and Learning: Prioritized Sweeping
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Create environment
###Code
def create_known_environment_states():
"""Creates known environment states.
Returns:
num_states: int, number of states.
num_term_states: int, number of terminal states.
num_non_term_states: int, number of non terminal states.
"""
num_states = 16
num_term_states = 2
num_non_term_states = num_states - num_term_states
return num_states, num_term_states, num_non_term_states
def create_known_environment_actions(num_non_term_states):
"""Creates environment actions.
Args:
num_non_term_states: int, number of non terminal states.
Returns:
max_num_actions: int, max number of actions possible.
num_actions_per_non_term_state: array[int], number of actions per
non terminal state.
"""
max_num_actions = 4
num_actions_per_non_term_state = np.repeat(
a=max_num_actions, repeats=num_non_term_states)
return max_num_actions, num_actions_per_non_term_state
def create_known_environment():
"""Creates known environment.
Returns:
num_states: int, number of states.
num_term_states: int, number of terminal states.
num_non_term_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
num_actions_per_non_term_state: array[int], number of actions per
non terminal state.
"""
(num_states,
num_term_states,
num_non_term_states) = create_known_environment_states()
(max_num_actions,
num_actions_per_non_term_state) = create_known_environment_actions(
num_non_term_states)
return (num_states,
num_term_states,
num_non_term_states,
max_num_actions,
num_actions_per_non_term_state)
class Environment:
"""Class to hold all environment properties.
Fields:
num_sp: array[int], number of successor states s' that can be reached
from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
"""
def __init__(self, num_states, num_non_term_states, max_num_actions):
# Create environment state-action successor state arrrays
self.num_sp = np.ones(
shape=[num_states, max_num_actions], dtype=np.int64)
self.sp_idx = np.reshape(
a=np.array([1, 0, 14, 4,
2, 1, 0, 5,
2, 2, 1, 6,
4, 14, 3, 7,
5, 0, 3, 8,
6, 1, 4, 9,
6, 2, 5, 10,
8, 3, 7, 11,
9, 4, 7, 12,
10, 5, 8, 13,
10, 6, 9, 15,
12, 7, 11, 11,
13, 8, 11, 12,
15, 9, 12, 13],
dtype=np.int64),
newshape=(num_non_term_states, max_num_actions, 1))
self.p = np.reshape(
a=np.repeat(
a=1.0, repeats=num_non_term_states * max_num_actions * 1),
newshape=(num_non_term_states, max_num_actions, 1))
self.r = np.reshape(
a=np.repeat(
a=-1.0, repeats=num_non_term_states * max_num_actions * 1),
newshape=(num_non_term_states, max_num_actions, 1))
###Output
_____no_output_____
###Markdown
Create model
###Code
class Model:
"""Class to hold all model properties.
Fields:
num_seen_non_term_states: int, number of seen non-terminal states.
seen_non_term_s_stack: array[int], stack to hold all seen non-terminal
states.
seen_non_term_s_stack_rev_lu: array[int], reverse lookup of stack
that holds all seen non-terminal states.
num_seen_non_term_s_a: array[int], number of seen non-terminal
state-action pairs.
seen_non_term_s_a_stack: array[int], stack to hold all seen
non-terminal state-action pairs.
seen_non_term_s_a_stack_rev_lu: array[int], reverse lookup of stack
that holds all seen non-terminal states-action pairs.
num_sp: array[int], number of successor states s' that can be reached
from state s by taking action a.
sp_idx: array[int], state indices of new state s' of taking action a
from state s.
p: array[float], transition probability to go from state s to s' by
taking action a.
r: array[float], reward from new state s' from state s by taking
action a.
s_a_ss_num_visits: array[int], number of visits to a particular
(s, a, s') tuple.
num_s_pred_s_a_pairs: array[int], number of state predecessor state
action pairs.
s_pred_s_a_pairs: dict, maps state indices to a
list of actions.
"""
def __init__(self, num_states, num_non_term_states, max_num_actions):
# Create model state visit counters
self.num_seen_non_term_states = 0
self.seen_non_term_s_stack = np.zeros(
shape=[num_non_term_states], dtype=np.int64)
self.seen_non_term_s_stack_rev_lu = np.zeros(
shape=[num_non_term_states], dtype=np.int64)
# Create model state-action visit counters
self.num_seen_non_term_s_a = np.zeros(
shape=[num_non_term_states], dtype=np.int64)
self.seen_non_term_s_a_stack = np.zeros(
shape=[num_non_term_states, max_num_actions], dtype=np.int64)
self.seen_non_term_s_a_stack_rev_lu = np.zeros(
shape=[num_non_term_states, max_num_actions], dtype=np.int64)
# Create model state-action successor state arrrays
self.num_sp = np.zeros(
shape=[num_states, max_num_actions], dtype=np.int64)
self.sp_idx = np.array(
object=[[[0] if s_idx == 0 and a_idx == 0 else []
for a_idx in range(0, max_num_actions)]
for s_idx in range(0, num_states)],
dtype=np.object)
self.p = np.array(
object=[[[0.0] if s_idx == 0 and a_idx == 0 else []
for a_idx in range(0, max_num_actions)]
for s_idx in range(0, num_states)],
dtype=np.object)
self.r = np.array(
object=[[[0.0] if s_idx == 0 and a_idx == 0 else []
for a_idx in range(0, max_num_actions)]
for s_idx in range(0, num_states)],
dtype=np.object)
self.s_a_ss_num_visits = np.array(
object=[[[0] if s_idx == 0 and a_idx == 0 else []
for a_idx in range(0, max_num_actions)]
for s_idx in range(0, num_states)],
dtype=np.object)
del self.sp_idx[0, 0][0]
del self.p[0, 0][0]
del self.r[0, 0][0]
del self.s_a_ss_num_visits[0, 0][0]
self.num_s_pred_s_a_pairs = np.zeros(
shape=[num_states], dtype=np.int64)
self.s_pred_s_a_pairs = {
state_index: []
for state_index in range(0, num_states)
}
def update_model_seen_state_actions(self, s_idx, a_idx):
"""Updates what state and actions the model has seen.
Args:
s_idx: int, current state index.
a_idx: int, current action index.
"""
# Check to see if state has already been visited
if (self.num_seen_non_term_states == 0 or
(self.seen_non_term_s_stack_rev_lu[s_idx] == 0 and
self.seen_non_term_s_stack[0] != s_idx)): # if new state
# Add to state stack
# 1, 3, 2, 0, 4
self.seen_non_term_s_stack[self.num_seen_non_term_states] = s_idx
# 3, 0, 2, 1, 4
num_seen = self.num_seen_non_term_states
self.seen_non_term_s_stack_rev_lu[s_idx] = num_seen
# Add to action stack
# 2, 0, 3, 1
action_idx = self.num_seen_non_term_s_a[s_idx]
self.seen_non_term_s_a_stack[s_idx][action_idx] = a_idx
# 1, 3, 0, 2
lookup = self.num_seen_non_term_s_a[s_idx]
self.seen_non_term_s_a_stack_rev_lu[s_idx][a_idx] = lookup
# Increment counters
self.num_seen_non_term_s_a[s_idx] += 1
self.num_seen_non_term_states += 1
else: # if already visited state
# Check to see if action has already been visited
if (self.seen_non_term_s_a_stack_rev_lu[s_idx][a_idx] == 0 and
self.seen_non_term_s_a_stack[s_idx][0] != a_idx):
# Add to action stack
# 2, 0, 3, 1
action_idx = self.num_seen_non_term_s_a[s_idx]
self.seen_non_term_s_a_stack[s_idx][action_idx] = a_idx
# 1, 3, 0, 2
num_seen = self.num_seen_non_term_s_a[s_idx]
self.seen_non_term_s_a_stack_rev_lu[s_idx][a_idx] = num_seen
# Increment counters
self.num_seen_non_term_s_a[s_idx] += 1
def update_model_of_environment_from_experience(
self, s_idx, a_idx, reward, next_s_idx):
"""Updates the model from environment experience.
Args:
s_idx: int, current state index.
a_idx: int, current action index.
reward: float, reward of taking action a_idx in state s_idx.
next_s_idx: int, next state index.
"""
# Update model successor arrays
if next_s_idx in self.sp_idx[s_idx, a_idx]:
self.suc_idx = self.sp_idx[s_idx, a_idx].index(next_s_idx)
self.s_a_ss_num_visits[s_idx, a_idx][self.suc_idx] += 1
else:
self.num_sp[s_idx, a_idx] += 1
self.sp_idx[s_idx, a_idx].append(next_s_idx)
self.r[s_idx, a_idx].append(reward)
self.s_a_ss_num_visits[s_idx, a_idx].append(1)
self.s_a_ss_num_visits_sum = np.sum(
a=np.asarray(a=self.s_a_ss_num_visits[s_idx, a_idx]))
self.p[s_idx, a_idx] = [
float(self.s_a_ss_num_visits[s_idx, a_idx][suc_idx]) /
self.s_a_ss_num_visits_sum
for suc_idx in range(0, self.num_sp[s_idx, a_idx])
]
# Update model state predecessors
if (s_idx, a_idx) not in self.s_pred_s_a_pairs[next_s_idx]:
self.s_pred_s_a_pairs[next_s_idx].append((s_idx, a_idx))
self.num_s_pred_s_a_pairs[next_s_idx] += 1
def model_simulate_planning(
self,
num_planning_steps,
num_non_term_states,
max_num_actions,
alpha,
gamma,
theta,
q,
priority_queue):
"""Uses model to simulate experience and plan best actions.
Args:
num_planning_steps: int, number of steps for the planning stage.
num_non_term_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
alpha: float, alpha > 0, learning rate.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
theta: float, small threshold for adding state-action pairs to
priority queue.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
priority_queue: instance of `PriorityQueue` class, an array of
`PriorityQueueNode`s that keep track of the state index,
action index, and priority of state-action pairs.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
priority_queue: instance of `PriorityQueue` class, an array of
`PriorityQueueNode`s that keep track of the state index,
action index, and priority of state-action pairs.
"""
for i in range(0, num_planning_steps):
# Check if priority queue is empty
if priority_queue.cur_p_q_size == 0:
break # break i loop since priority queue is empty
# Get max priority state-action pair from queue
s_idx, a_idx = priority_queue.pop_max_node_from_p_q()
# Get reward
reward, sst_idx = observe_reward(s_idx, a_idx, self)
# Get next state
next_s_idx = self.sp_idx[s_idx, a_idx][sst_idx]
# Check to see if we actioned into a terminal state
if next_s_idx >= num_non_term_states:
q[s_idx, a_idx] += alpha * (reward - q[s_idx, a_idx])
else:
# Get next action, max action of next state
next_a_idx = select_max_q_action(
next_s_idx, max_num_actions, q)
# Calculate state-action-function using quintuple
# SARSargmax(a,Q)
delta = gamma * q[next_s_idx, next_a_idx] - q[s_idx, a_idx]
q[s_idx, a_idx] += alpha * (reward + delta)
# Loop for all predicted Sbar and Abar to lead to S
for j in range(0, self.num_s_pred_s_a_pairs[s_idx]):
pred_s_idx = self.s_pred_s_a_pairs[s_idx][j][0]
pred_a_idx = self.s_pred_s_a_pairs[s_idx][j][1]
# Get reward
if s_idx in self.sp_idx[pred_s_idx, pred_a_idx]:
sst_idx = self.sp_idx[pred_s_idx, pred_a_idx].index(s_idx)
# Get reward from predecessor state and action
reward = self.r[s_idx, a_idx][sst_idx]
# Get next action, max action of next state
next_a_idx = select_max_q_action(s_idx, max_num_actions, q)
# Calculate priority
expected = gamma * q[s_idx, next_a_idx]
delta = expected - q[pred_s_idx, pred_a_idx]
priority = np.abs(reward + delta)
# Check if priority is over threshold to add to priority queue
if priority > theta:
priority_queue.search_and_update_p_q(
pred_s_idx, pred_a_idx, priority)
return q, priority_queue
###Output
_____no_output_____
###Markdown
Create priority queue
###Code
class PriorityQueueNode:
"""Class to create nodes of a priority queue.
Fields:
s_idx: int, state index.
a_idx: int, action index.
priority: float, priority of state-action pair node.
"""
def __init__(self, i):
# Create environment state-action successor state arrrays
self.s_idx = -i
self.a_idx = i
self.priority = np.finfo(float).min
class PriorityQueue:
"""Class to create a priority queue.
Fields:
p_q: array, priority queue that contains num_non_term_states *
max_num_actions `PriorityQueueNode`s
cur_p_q_size: int, current number of active nodes in priority queue.
"""
def __init__(self, num_non_term_states, max_num_actions):
self.p_q = np.empty(
shape=[num_non_term_states * max_num_actions], dtype=object)
for i in range(0, num_non_term_states * max_num_actions):
self.p_q[i] = PriorityQueueNode(i)
self.p_q[0].priority = np.finfo(float).max
self.cur_p_q_size = 0
def search_and_update_p_q(self, s_idx, a_idx, priority):
"""Searches for and updates a node in the priority queue.
Args:
s_idx: int, state index.
a_idx: int, action index.
priority: float, priority of state-action pair node.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
"""
p_q_idx = -1
p_q_idx = self.search_p_q(s_idx, a_idx)
# Check if node was found
if p_q_idx >= 0:
# Check if found node has a lower priority saved than new priority
if self.p_q[p_q_idx].priority < priority:
self.p_q_node_increase_priority(p_q_idx, priority)
else:
# Node wasn't found so insert into priority queue
self.insert_into_p_q(s_idx, a_idx, priority)
def search_p_q(self, s_idx, a_idx):
"""Searches for a node in the priority queue.
Args:
s_idx: int, state index.
a_idx: int, action index.
Returns:
p_q_idx: int, index of priority queue node.
"""
p_q_idx = -1
# Search up to all nodes in worst case
for i in range(0, self.cur_p_q_size):
if (self.p_q[i].s_idx == s_idx and self.p_q[i].a_idx == a_idx):
p_q_idx = i
break # break i loop since we found node
return p_q_idx
def p_q_node_increase_priority(self, p_q_idx, new_priority):
"""Increases priority of a node in the priority queue.
Increases priority at p_q_idx to new_priority, where it is assumed
that new_priority is greater than priority_queue[p_q_idx].
Args:
p_q_idx: int, index of priority queue node.
new_priority: float, new priority of state-action pair node.
"""
self.p_q[p_q_idx].priority = new_priority
while (p_q_idx != 0 and
self.p_q[self.get_par_idx(p_q_idx)].priority <
self.p_q[p_q_idx].priority):
(self.p_q[p_q_idx],
self.p_q[self.get_par_idx(p_q_idx)]) = self.swap_p_q_nodes(
self.p_q[p_q_idx],
self.p_q[self.get_par_idx(p_q_idx)])
p_q_idx = self.get_par_idx(p_q_idx)
def insert_into_p_q(self, s_idx, a_idx, priority):
"""Inserts a node into the priority queue.
Args:
s_idx: int, state index.
a_idx: int, action index.
priority: float, priority of state-action pair node.
"""
# First insert the new node at the end
self.cur_p_q_size += 1
p_q_idx = self.cur_p_q_size - 1
self.p_q[p_q_idx].s_idx = s_idx
self.p_q[p_q_idx].a_idx = a_idx
self.p_q[p_q_idx].priority = priority
# Fix the max heap property if it is violated
while (p_q_idx != 0 and
self.p_q[self.get_par_idx(p_q_idx)].priority <
self.p_q[p_q_idx].priority):
self.p_q[p_q_idx], self.p_q[self.get_par_idx(p_q_idx)] = \
self.swap_p_q_nodes(
self.p_q[p_q_idx], self.p_q[self.get_par_idx(p_q_idx)])
self.p_q_idx = self.get_par_idx(p_q_idx)
def pop_max_node_from_p_q(self):
"""Pops max node off from priority queue.
Returns:
s_idx: int, state index.
a_idx: int, action index.
"""
if self.cur_p_q_size == 1:
self.cur_p_q_size -= 1
return self.p_q[0].s_idx, self.p_q[0].a_idx
# Store the maximum value, and remove it from heap
s_idx = self.p_q[0].s_idx
a_idx = self.p_q[0].a_idx
self.p_q[0].s_idx = self.p_q[self.cur_p_q_size - 1].s_idx
self.p_q[0].a_idx = self.p_q[self.cur_p_q_size - 1].a_idx
self.p_q[0].priority = self.p_q[self.cur_p_q_size - 1].priority
self.cur_p_q_size -= 1
# Fix the max heap property if it is violated
self.max_heapify_p_q(0)
return s_idx, a_idx
def max_heapify_p_q(self, p_q_idx):
"""Max heapifies a subtree of priority queue.
Recursively heapifies a subtree with the root at given index, however
assumes that the subtrees are already heapified.
Args:
p_q_idx: int, index of priority queue node.
"""
l = self.get_left_idx(p_q_idx)
r = self.get_right_idx(p_q_idx)
biggest = p_q_idx
if (l < self.cur_p_q_size and
self.p_q[l].priority > self.p_q[p_q_idx].priority):
biggest = l
if (r < self.cur_p_q_size and
self.p_q[r].priority > self.p_q[biggest].priority):
biggest = r
if biggest != p_q_idx:
temp_s_idx = self.p_q[p_q_idx].s_idx
temp_a_idx = self.p_q[p_q_idx].a_idx
temp_priority = self.p_q[p_q_idx].priority
self.p_q[p_q_idx].s_idx = self.p_q[biggest].s_idx
self.p_q[p_q_idx].a_idx = self.p_q[biggest].a_idx
self.p_q[p_q_idx].priority = self.p_q[biggest].priority
self.p_q[biggest].s_idx = temp_s_idx
self.p_q[biggest].a_idx = temp_a_idx
self.p_q[biggest].priority = temp_priority
self.max_heapify_p_q(biggest)
def swap_p_q_nodes(self, x, y):
"""Swaps attributes between two `PriorityQueueNode`s.
Args:
x: instance of `PriorityQueueNode`.
y: instance of `PriorityQueueNode`.
Returns:
x: instance of `PriorityQueueNode`.
y: instance of `PriorityQueueNode`.
"""
temp_s_idx = x.s_idx
temp_a_idx = x.a_idx
temp_priority = x.priority
x.s_idx = y.s_idx
x.a_idx = y.a_idx
x.priority = y.priority
y.s_idx = temp_s_idx
y.a_idx = temp_a_idx
y.priority = temp_priority
return x, y
def get_par_idx(self, p_q_idx):
"""Gets the parent index of given priority queue node's index.
Args:
p_q_idx: int, index of priority queue node.
"""
return (p_q_idx - 1) // 2
def get_left_idx(self, p_q_idx):
"""Gets the left child index of given priority queue node's index.
Args:
p_q_idx: int, index of priority queue node.
"""
return (2 * p_q_idx + 1)
def get_right_idx(self, p_q_idx):
"""Gets the right child index of given priority queue node's index.
Args:
p_q_idx: int, index of priority queue node.
"""
return (2 * p_q_idx + 2)
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
def set_hyperparameters():
"""Sets hyperparameters.
Returns:
num_episodes: int, number of episodes to train over.
maximum_episode_length: int, max number of timesteps for an episode.
num_planning_steps: int, number of steps for the planning stage.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
theta: float, small threshold for adding state-action pairs to priority
queue.
"""
num_episodes = 10000
maximum_episode_length = 200
num_planning_steps = 1
alpha = 0.1
epsilon = 0.1
gamma = 1.0
theta = 0.0
return (num_episodes,
maximum_episode_length,
num_planning_steps,
alpha,
epsilon,
gamma,
theta)
###Output
_____no_output_____
###Markdown
Create value function and policy arrays
###Code
def create_value_function_arrays(num_states, max_num_actions):
"""Creates value function arrays.
Args:
num_states: int, number of states.
max_num_actions: int, max number of actions possible.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
"""
return np.zeros(shape=[num_states, max_num_actions], dtype=np.float64)
def create_policy_arrays(num_non_term_states, max_num_actions):
"""Creates policy arrays.
Args:
num_non_term_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
Returns:
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
policy = np.repeat(
a=1.0 / max_num_actions,
repeats=num_non_term_states * max_num_actions)
policy = np.reshape(
a=policy,
newshape=(num_non_term_states, max_num_actions))
return policy
###Output
_____no_output_____
###Markdown
Create algorithm
###Code
# Set random seed so that everything is reproducible
np.random.seed(seed=0)
def initialize_epsiode(num_non_term_states):
"""Initializes epsiode with initial state and initial action.
Args:
num_non_term_states: int, number of non terminal states.
Returns:
init_s_idx: int, initial state index from set of non terminal states.
"""
# Randomly choose an initial state from all non-terminal states
init_s_idx = np.random.randint(
low=0, high=num_non_term_states, dtype=np.int64)
return init_s_idx
def epsilon_greedy_policy_from_state_action_function(
max_num_actions, q, epsilon, s_idx, policy):
"""Create epsilon-greedy policy from state-action value function.
Args:
max_num_actions: int, max number of actions possible.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
s_idx: int, current state index.
policy: array[float], learned stochastic policy of which action a to
take in state s.
Returns:
policy: array[float], learned stochastic policy of which action a to
take in state s.
"""
# Save max state-action value and find the number of actions that have the
# same max state-action value
max_action_value = np.max(a=q[s_idx, :])
max_action_count = np.count_nonzero(a=q[s_idx, :] == max_action_value)
# Apportion policy probability across ties equally for state-action pairs
# that have the same value and zero otherwise
if max_action_count == max_num_actions:
max_policy_prob_per_action = 1.0 / max_action_count
remain_prob_per_action = 0.0
else:
max_policy_prob_per_action = (1.0 - epsilon) / max_action_count
remain_prob_per_action = epsilon / (max_num_actions - max_action_count)
policy[s_idx, :] = np.where(
q[s_idx, :] == max_action_value,
max_policy_prob_per_action,
remain_prob_per_action)
return policy
def loop_through_episode(
num_non_term_states,
max_num_actions,
environment,
model,
priority_queue,
q,
policy,
alpha,
epsilon,
gamma,
theta,
maximum_episode_length,
num_planning_steps,
s_idx):
"""Loops through episode to iteratively update policy.
Args:
num_non_term_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
environment: instance of `Environment` class that holds environment
properties that are hidden from us, but that we can sample.
model: instance of `Model` class that holds model properties
that we learn through experience.
priority_queue: instance of `PriorityQueue` class, an array of
`PriorityQueueNode`s that keep track of the state index,
action index, and priority of state-action pairs.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
theta: float, small threshold for adding state-action pairs to priority
queue.
maximum_episode_length: int, max number of timesteps for an episode.
num_planning_steps: int, number of steps for the planning stage.
s_idx: int, current state index.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
model: instance of `Model` class that holds model properties
that we learn through experience.
priority_queue: instance of `PriorityQueue` class, an array of
`PriorityQueueNode`s that keep track of the state index,
action index, and priority of state-action pairs.
"""
# Loop through episode steps until termination
for t in range(0, maximum_episode_length):
# Get epsilon-greedy action
a_idx, policy = select_action_from_epsilon_greedy_policy(
max_num_actions, q, epsilon, s_idx, policy)
# Update what state and actions the model has seen
model.update_model_seen_state_actions(s_idx, a_idx)
# Get reward
reward, sst_idx = observe_reward(s_idx, a_idx, environment)
# Get next state
next_s_idx = environment.sp_idx[s_idx, a_idx, sst_idx]
# Update model from environment experience
model.update_model_of_environment_from_experience(
s_idx, a_idx, reward, next_s_idx)
# Check to see if we actioned into a terminal state
if next_s_idx >= num_non_term_states:
# Calculate priority
priority = np.abs(reward - q[s_idx, a_idx])
else:
# Get next action, max action of next state
next_a_idx = select_max_q_action(next_s_idx, max_num_actions, q)
# Calculate priority
delta = gamma * q[next_s_idx][next_a_idx] - q[s_idx][a_idx]
priority = np.abs(reward + delta)
# Check if priority is over threshold to add to priority queue
if priority > theta:
priority_queue.search_and_update_p_q(
s_idx, a_idx, priority)
# Use updated model to simulate experience in planning phase
q, priority_queue = model.model_simulate_planning(
num_planning_steps,
num_non_term_states,
max_num_actions,
alpha,
gamma,
theta,
q,
priority_queue)
# Check to see if we actioned into a terminal state
if next_s_idx >= num_non_term_states:
break # break i loop
# Update state to next state
s_idx = next_s_idx
return q, policy, model, priority_queue
def select_action_from_epsilon_greedy_policy(
max_num_actions, q, epsilon, s_idx, policy):
"""Selects an action in state s_idx from epsilon-greedy policy.
Args:
max_num_actions: int, max number of actions possible.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
s_idx: int, current state index.
policy: array[float], learned stochastic policy of which
action a to take in state s.
Returns:
a_idx: int, current action index.
policy: array[float], learned stochastic policy of which
action a to take in state s.
"""
# Choose policy for chosen state by epsilon-greedy choosing from the
# state-action-value function
policy = epsilon_greedy_policy_from_state_action_function(
max_num_actions, q, epsilon, s_idx, policy)
# Get epsilon-greedy action
a_idx = np.random.choice(a=max_num_actions, p=policy[s_idx, :])
return a_idx, policy
def observe_reward(s_idx, a_idx, system):
"""Observes the reward from the given system (environment or model).
Args:
s_idx: int, current state index.
a_idx: int, current action index.
system: either an instance of `Environment` or `Model` class that
holds environment or model properties, respectively.
Returns:
reward: float, reward of taking action a_idx in state s_idx.
sst_idx: int, successor state transition index.
"""
sst_idx = np.random.choice(
a=system.num_sp[s_idx, a_idx],
p=system.p[s_idx, a_idx][:])
reward = system.r[s_idx, a_idx][sst_idx]
return reward, sst_idx
def select_max_q_action(s_idx, max_num_actions, q):
"""Selects action with max state-action-value function for given state.
Args:
s_idx: int, current state index.
max_num_actions: int, max number of actions possible.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
Returns:
next_a_idx: int, next action index.
"""
max_action_value = np.max(a=q[s_idx, :])
max_action_stack = np.extract(
condition=q[s_idx, :] == max_action_value,
arr=np.arange(max_num_actions))
next_a_idx = np.random.choice(a=max_action_stack)
return next_a_idx
def off_policy_planning_and_learning_prioritized_sweeping(
num_non_term_states,
max_num_actions,
environment,
model,
priority_queue,
q,
policy,
alpha,
epsilon,
gamma,
theta,
num_episodes,
maximum_episode_length,
num_planning_steps):
"""Loops through episodes to iteratively update policy.
Args:
num_non_term_states: int, number of non terminal states.
max_num_actions: int, max number of actions possible.
environment: instance of `Environment` class that holds environment
properties that are hidden from us, but that we can sample.
model: instance of `Model` class that holds model properties
that we learn through experience.
priority_queue: instance of `PriorityQueue` class, an array of
`PriorityQueueNode`s that keep track of the state index,
action index, and priority of state-action pairs.
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
alpha: float, alpha > 0, learning rate.
epsilon: float, 0 <= epsilon <= 1, exploitation-exploration trade-off,
higher means more exploration.
gamma: float, 0 <= gamma <= 1, amount to discount future reward.
theta: float, small threshold for adding state-action pairs to priority
queue.
num_episodes: int, number of episodes to train over.
maximum_episode_length: int, max number of timesteps for an episode.
num_planning_steps: int, number of steps for the planning stage.
s_idx: int, current state index.
Returns:
q: array[float], keeps track of the estimated value of each
state-action pair Q(s, a).
policy: array[float], learned stochastic policy of which
action a to take in state s.
model: instance of `Model` class that holds model properties
that we learn through experience.
priority_queue: instance of `PriorityQueue` class, an array of
`PriorityQueueNode`s that keep track of the state index,
action index, and priority of state-action pairs.
"""
for episode in range(0, num_episodes):
# Initialize episode to get initial state
init_s_idx = initialize_epsiode(num_non_term_states)
# Loop through episode and update the policy
q, policy, model, priority_queue = loop_through_episode(
num_non_term_states,
max_num_actions,
environment,
model,
priority_queue,
q,
policy,
alpha,
epsilon,
gamma,
theta,
maximum_episode_length,
num_planning_steps,
init_s_idx)
return q, policy, model, priority_queue
###Output
_____no_output_____
###Markdown
Run algorithm
###Code
def run_algorithm():
"""Runs the algorithm.
Returns:
model: instance of `Model` class that holds model properties
that we learn through experience.
"""
(num_states,
_,
num_non_term_states,
max_num_actions,
_) = create_known_environment()
environment = Environment(
num_states, num_non_term_states, max_num_actions)
model = Model(num_states, num_non_term_states, max_num_actions)
priority_queue = PriorityQueue(num_non_term_states, max_num_actions)
(num_episodes,
maximum_episode_length,
num_planning_steps,
alpha,
epsilon,
gamma,
theta) = set_hyperparameters()
q = create_value_function_arrays(num_states, max_num_actions)
policy = create_policy_arrays(num_non_term_states, max_num_actions)
# Print initial arrays
print("\nInitial state-action value function")
print(q)
print("\nInitial policy")
print(policy)
# Run off policy planning and learning prioritized sweeping
(q,
policy,
model,
priority_queue) = off_policy_planning_and_learning_prioritized_sweeping(
num_non_term_states,
max_num_actions,
environment,
model,
priority_queue,
q,
policy,
alpha,
epsilon,
gamma,
theta,
num_episodes,
maximum_episode_length,
num_planning_steps)
# Print final results
print("\nFinal state-action value function")
print(q)
print("\nFinal policy")
print(policy)
return model
model = run_algorithm()
# Print model seen arrays
print("model.num_seen_non_term_states")
print(model.num_seen_non_term_states)
print("model.seen_non_term_s_stack")
print(model.seen_non_term_s_stack)
print("model.seen_non_term_s_stack_rev_lu")
print(model.seen_non_term_s_stack_rev_lu)
print("model.num_seen_non_term_s_a")
print(model.num_seen_non_term_s_a)
print("model.seen_non_term_s_a_stack")
print(model.seen_non_term_s_a_stack)
print("model.seen_non_term_s_a_stack_rev_lu")
print(model.seen_non_term_s_a_stack_rev_lu)
# Print model successor arrays
print("model.num_sp")
print(model.num_sp)
print("model.sp_idx")
print(model.sp_idx)
print("model.p")
print(model.p)
print("model.r")
print(model.r)
print("model.s_a_ss_num_visits")
print(model.s_a_ss_num_visits)
###Output
model.num_sp
[[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[1 1 1 1]
[0 0 0 0]
[0 0 0 0]]
model.sp_idx
[[list([1]) list([0]) list([14]) list([4])]
[list([2]) list([1]) list([0]) list([5])]
[list([2]) list([2]) list([1]) list([6])]
[list([4]) list([14]) list([3]) list([7])]
[list([5]) list([0]) list([3]) list([8])]
[list([6]) list([1]) list([4]) list([9])]
[list([6]) list([2]) list([5]) list([10])]
[list([8]) list([3]) list([7]) list([11])]
[list([9]) list([4]) list([7]) list([12])]
[list([10]) list([5]) list([8]) list([13])]
[list([10]) list([6]) list([9]) list([15])]
[list([12]) list([7]) list([11]) list([11])]
[list([13]) list([8]) list([11]) list([12])]
[list([15]) list([9]) list([12]) list([13])]
[list([]) list([]) list([]) list([])]
[list([]) list([]) list([]) list([])]]
model.p
[[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([1.0]) list([1.0]) list([1.0]) list([1.0])]
[list([]) list([]) list([]) list([])]
[list([]) list([]) list([]) list([])]]
model.r
[[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([-1.0]) list([-1.0]) list([-1.0]) list([-1.0])]
[list([]) list([]) list([]) list([])]
[list([]) list([]) list([]) list([])]]
model.s_a_ss_num_visits
[[list([116]) list([118]) list([2770]) list([102])]
[list([70]) list([39]) list([1258]) list([46])]
[list([39]) list([35]) list([444]) list([349])]
[list([99]) list([2228]) list([77]) list([63])]
[list([52]) list([1004]) list([416]) list([58])]
[list([286]) list([122]) list([178]) list([320])]
[list([47]) list([52]) list([48]) list([1305])]
[list([50]) list([1227]) list([56]) list([36])]
[list([162]) list([391]) list([178]) list([212])]
[list([345]) list([51]) list([46]) list([929])]
[list([67]) list([89]) list([81]) list([2166])]
[list([500]) list([328]) list([20]) list([62])]
[list([1407]) list([46]) list([59]) list([48])]
[list([2836]) list([111]) list([101]) list([106])]
[list([]) list([]) list([]) list([])]
[list([]) list([]) list([]) list([])]]
|
case_studies/Qian2011.ipynb | ###Markdown
IMPORTANT: matplotlib.pyplot conflicts with garbage collection of peppercorns' objects. Whenever you enumerate multiple systems that use same-named domains, complexes, etc., make sure to import plotting libraries only after all your data has been generated. (You have to restart the kernel in order to modify your data once a plotting library has been importet, even if it is e.g. a shell script in the background importing that library. Yes, it's terrible.) Initialization
###Code
import pandas as pd; pd.set_option('display.max_colwidth', None)
from numpy import log10, sqrt
def sema(vect, fig):
"""Rewrite the semantics column into a more compact, human readable version.
"""
nv = []
for line in vect:
v = []
for (x,y) in fig.pepperargs[line].items():
if x == 'enumconc': continue
if x == 'max_complex_size': continue
if x == 'max_complex_count': continue
if x == 'max_reaction_count': continue
if x == 'condensed':
z = x if y is True else 'detailed'
else:
z = '{}={}'.format(x,y)
v.append(z)
nv.append(', '.join(v))
return nv
# Retrieve the list of FigureData Objects.
from qian2011 import data; q11 = data()
from qian2011sqrt import data as datasq; q11sq = datasq()
###Output
_____no_output_____
###Markdown
Time detailed vs condensed runs (that takes a while)
###Code
%%timeit
q11 = data()
for fig in q11:
fig.pepperargs['default']['condensed'] = False
fig.eval(verbose = 0, enumprofile = True)
%%timeit
q11 = data()
for fig in q11:
fig.pepperargs['default']['condensed'] = True
fig.eval(verbose = 0, enumprofile = True)
%%timeit
q11sq = datasq()
for fig in q11sq:
fig.pepperargs['default']['condensed'] = False
fig.eval(verbose = 1, enumprofile = True)
%%timeit
q11sq = datasq()
for fig in q11sq:
fig.pepperargs['default']['condensed'] = True
fig.eval(verbose = 1, enumprofile = True)
###Output
_____no_output_____
###Markdown
Get / Update Peppercorn estimates
###Code
# Generate Peppercorn estimates for every FigureData setup.
verb = 0
Table1 = pd.DataFrame()
for fig in q11:
print(fig.name)
if verb:
display(fig.pepperargs['default'])
fig.pepperargs['ddG'] = fig.pepperargs['default'].copy()
fig.pepperargs['ddG']['dG_bp'] = -1.3
fig.pepperargs['seesaw'] = {
'ssw_rxns': 'seesaw-T20-utbr-leak-reduced',
'ssw_conc': 100e-9,
'dry_run': True,
'ssw_expl': False,
'enumconc': 'nM'}
fig.eval('default', verbose = verb)
fig.eval('ddG', verbose = verb)
fig.eval('seesaw', verbose = verb)
for df in fig.get_dataframes():
df['Name']=fig.name# + '-' + df['pepperargs']
df['Semantics-tex']=sema(df['Semantics'], fig)
#display(df)
Table1 = Table1.append(df)
###Output
_____no_output_____
###Markdown
Squareroot circuit
###Code
def load_old_results(fig, basename, cmpfig):
fig._enumerated |= set([basename + '-enum.pil'])
fig._simulated |= set([basename + '-y1_0-1101-simu',
basename + '-y1_0-1100-simu',
basename + '-y1_0-0110-simu',
basename + '-y1_0-0010-simu',
basename + '-y1_0-0011-simu',
basename + '-y1_1-1010-simu',
basename + '-y1_0-0100-simu',
basename + '-y1_0-0000-simu',
basename + '-y1_1-0011-simu',
basename + '-y1_0-0111-simu',
basename + '-y1_0-1110-simu',
basename + '-y1_1-0100-simu',
basename + '-y1_1-0010-simu',
basename + '-y1_1-0001-simu',
basename + '-y1_0-1000-simu',
basename + '-y1_0-1010-simu',
basename + '-y1_1-1011-simu',
basename + '-y1_1-1101-simu',
basename + '-y1_1-0110-simu',
basename + '-y1_1-0101-simu',
basename + '-y1_0-0101-simu',
basename + '-y1_1-1001-simu',
basename + '-y1_1-1111-simu',
basename + '-y1_0-1011-simu',
basename + '-y1_0-1111-simu',
basename + '-y1_1-0111-simu',
basename + '-y1_1-1000-simu',
basename + '-y1_0-1001-simu',
basename + '-y1_1-0000-simu',
basename + '-y1_1-1100-simu',
basename + '-y1_0-0001-simu',
basename + '-y1_1-1110-simu',
basename + '-y2_0-1101-simu',
basename + '-y2_0-1100-simu',
basename + '-y2_0-0110-simu',
basename + '-y2_0-0010-simu',
basename + '-y2_0-0011-simu',
basename + '-y2_1-1010-simu',
basename + '-y2_0-0100-simu',
basename + '-y2_0-0000-simu',
basename + '-y2_1-0011-simu',
basename + '-y2_0-0111-simu',
basename + '-y2_0-1110-simu',
basename + '-y2_1-0100-simu',
basename + '-y2_1-0010-simu',
basename + '-y2_1-0001-simu',
basename + '-y2_0-1000-simu',
basename + '-y2_0-1010-simu',
basename + '-y2_1-1011-simu',
basename + '-y2_1-1101-simu',
basename + '-y2_1-0110-simu',
basename + '-y2_1-0101-simu',
basename + '-y2_0-0101-simu',
basename + '-y2_1-1001-simu',
basename + '-y2_1-1111-simu',
basename + '-y2_0-1011-simu',
basename + '-y2_0-1111-simu',
basename + '-y2_1-0111-simu',
basename + '-y2_1-1000-simu',
basename + '-y2_0-1001-simu',
basename + '-y2_1-0000-simu',
basename + '-y2_1-1100-simu',
basename + '-y2_0-0001-simu',
basename + '-y2_1-1110-simu'])
fig._simexecs |= set([basename + '-simu.py'])
fig.cmpfig[cmpfig] = basename + '-cmp.nxy'
if False:
load_old_results(q11sq[0], "tmp/Qian2011-SF31-00-default", "default")
load_old_results(q11sq[0], "tmp/Qian2011-SF31-00-ddG", "ddG")
load_old_results(q11sq[0], "tmp/Qian2011-SF31-00-seesaw", "seesaw")
# Generate Peppercorn estimates for every FigureData setup.
verb = 0
for fig in q11sq:
print(fig.name)
fig.pepperargs['seesaw'] = {'ssw_rxns':'seesaw-T25-utbr-leak-reduced', 'ssw_conc':50e-9, 'dry_run': True, 'ssw_expl': False, 'enumconc': 'nM'}
fig.pepperargs['ddG'] = fig.pepperargs['default'].copy()
fig.pepperargs['ddG']['dG_bp']=-1.3
fig.eval('default', verbose = verb, cmpfig=True)
fig.eval('seesaw', verbose = verb, cmpfig=True)
fig.eval('ddG', verbose=verb, cmpfig=True)
for df in fig.get_dataframes():
df['Name']=fig.name
df['Semantics-tex']=sema(df['Semantics'], fig)
Table1 = Table1.append(df)
###Output
_____no_output_____
###Markdown
Seesaw sytems comparison
###Code
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
# Get Data from File
SQ = q11sq[0]
df = pd.read_csv(SQ.cmpfig['ddG'], sep='\s+')
time = df.values[:,0]/3600
y1_0 = df.values[:,1:17]
y1_1 = df.values[:,17:33]
y2_0 = df.values[:,33:49]
y2_1 = df.values[:,49:]
plt.figure(figsize=(5.5,4.5))
# Plot the data
plt.plot(time, y1_0, color = 'blue', linestyle = ':')
plt.plot(time, y1_1, color = 'blue')
plt.plot(time, y2_0, color = 'red', linestyle = ':')
plt.plot(time, y2_1, color = 'red')
plt.plot([0,8], [40, 0], linewidth = 1, color = 'black', linestyle = '--', zorder = 1)
plt.xlim(-0.5,10.5)
#plt.xticks(np.arange(0, 21, step=5))
plt.ylim(-3,53)
#plt.yticks(np.arange(0, 21, step=5))
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color = 'blue', linestyle = ':', lw = 2),
Line2D([0], [0], color = 'blue', lw = 2),
Line2D([0], [0], color = 'red', linestyle = ':', lw = 2),
Line2D([0], [0], color = 'red', lw = 2)]
plt.legend(custom_lines,
['$Y_1 0$', '$Y_11$', '$Y_20$', '$Y_21$'],
loc = "center right")
plt.gca().set_title('Superimposed square-root circuit simulations.', fontsize = 15)
plt.gca().set_xlabel('Time [hours]', fontsize = 15)
plt.gca().set_ylabel('Concentration [nM]', fontsize = 15)
plt.savefig('qian2011_sqrt_simu_ddG.pdf', bbox_inches='tight')
plt.savefig('qian2011_sqrt_simu_ddG.svg', bbox_inches='tight')
# Get Data from File
SQ = q11sq[0]
df = pd.read_csv(SQ.cmpfig['seesaw'], sep='\s+')
time = df.values[:,0]/3600
y1_0 = df.values[:,1:17]
y1_1 = df.values[:,17:33]
y2_0 = df.values[:,33:49]
y2_1 = df.values[:,49:]
plt.figure(figsize=(5.5,4.5))
# Plot the data
plt.plot(time, y1_0, color='blue', linestyle=':')
plt.plot(time, y1_1, color='blue')
plt.plot(time, y2_0, color='red', linestyle=':')
plt.plot(time, y2_1, color='red')
plt.plot([0,8], [40, 0], linewidth=1, color='black', linestyle='--', zorder=1)
plt.xlim(-0.5,10.5)
plt.ylim(-3,53)
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color='blue', linestyle=':', lw=2),
Line2D([0], [0], color='blue', lw=2),
Line2D([0], [0], color='red', linestyle=':', lw=2),
Line2D([0], [0], color='red', lw=2)]
plt.legend(custom_lines, ['$Y_1 0$', '$Y_11$', '$Y_20$', '$Y_21$'], loc="upper left")
plt.gca().set_title('Superimposed square-root circuit simulations.', fontsize=15)
plt.gca().set_xlabel('Time [hours]', fontsize=15)
plt.gca().set_ylabel('Concentration [nM]', fontsize=15)
plt.savefig('qian2011_sqrt_simu_seesaw.pdf', bbox_inches='tight')
plt.savefig('qian2011_sqrt_simu_seesaw.svg', bbox_inches='tight')
tmpfig = Table1.copy()
tmpfig['Time (experiment)'] = log10(tmpfig['Time (experiment)'])
tmpfig['Time (simulation)'] = log10(tmpfig['Time (simulation)'])
#display(tmpfig)
g = sns.relplot(x="Time (experiment)", y="Time (simulation)", hue='Name', col="Semantics", data=tmpfig, col_order=['default', 'ddG'])
(mi, ma)=(2, 5)
plt.xlim(mi, ma)
plt.ylim(mi, ma)
for ax in plt.gcf().get_axes():
ax.plot([mi, ma], [mi, ma], linewidth=1, color='white',zorder=0)
ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x,y: "$10^{{ {:.1f} }}$".format(x)))
ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x,y: "$10^{{ {:.1f} }}$".format(x)))
ax.set(xlabel='Experimental diagonal crossing time [seconds]')
plt.gcf().get_axes()[0].set(ylabel='Peppercorn diagonal crossing time [seconds]')
axes = g.axes.flatten()
axes[0].set_title("Regular toehold binding energy: -1.7 kcal/mol/nucleotide.", fontsize = 11)
axes[1].set_title("Adjusted toehold binding energy: -1.3 kcal/mol/nucleotide.", fontsize = 11)
plt.savefig('seesaw_times_peppercorn.pdf', bbox_inches='tight')
plt.savefig('seesaw_times_peppercorn.svg', bbox_inches='tight')
tmpfig = Table1.copy()
tmpfig['Time (experiment)'] = log10(tmpfig['Time (experiment)'])
tmpfig['Time (simulation)'] = log10(tmpfig['Time (simulation)'])
g = sns.relplot(x="Time (experiment)", y="Time (simulation)", hue='Name', col="Semantics", data=tmpfig, col_order=['seesaw'])
(mi, ma)=(2, 5)
for ax in plt.gcf().get_axes():
#print(ax)
ax.plot([mi, ma], [mi, ma], linewidth=1, color='white',zorder=0)
ax.set(xlabel='Experimental diagonal crossing time [seconds]')
ax.xaxis.set_major_formatter(plt.FuncFormatter(lambda x,y: "$10^{{ {:.1f} }}$".format(x)))
ax.yaxis.set_major_formatter(plt.FuncFormatter(lambda x,y: "$10^{{ {:.1f} }}$".format(x)))
plt.xlim(mi, ma)
plt.ylim(mi, ma)
plt.gcf().get_axes()[0].set(ylabel='Seesaw model diagonal crossing time [seconds]')
g.set_titles(row_template="{row_name}", col_template="{col_name}")
axes = g.axes.flatten()
axes[0].set_title("Seesaw reaction model Qian & Winfree (2011)")
plt.savefig('seesaw_times_qian.pdf', bbox_inches='tight')
plt.savefig('seesaw_times_qian.svg', bbox_inches='tight')
# Write Data to tex files:
# Move Semantics-tex into Semantics column, delete semantics-tex column.
tmpfig = Table1.copy()
tmpfig['Semantics']=tmpfig['Semantics-tex']
tmpfig = tmpfig.drop(columns=['Semantics-tex'])
#display(tmpfig)
tmpfig.to_latex('SeesawData.tex', index=False, float_format='{:.3g}'.format)
###Output
_____no_output_____ |
exercises/ex_1_3.ipynb | ###Markdown
1. Functions and MethodsHave a look at the exercises on [DataCamp](https://learn.datacamp.com/):**1.1.** [Builtin functions](https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-3-functions-and-packages?ex=2)**1.2.** [Multiple Arguments](https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-3-functions-and-packages?ex=4)**1.3.** [String Methods](https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-3-functions-and-packages?ex=6)**1.4.** [List Methods](https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-3-functions-and-packages?ex=7)**1.5.** [List Methods](https://campus.datacamp.com/courses/intro-to-python-for-data-science/chapter-3-functions-and-packages?ex=8) 2. Control flow within a function A [leap year](https://en.wikipedia.org/wiki/Leap_year) contains one additional day and occurs every 4 years. Years that are evenly divisble by 100 are not leap years, except years that are evely divisible by 400 (which are leap years).Write a function `leap_year(n)`, that takes an integer `n` which represents the year, and returns `True` if `n` is a leap year and `False` if not. Test your function with the following years: `1600, 2020, 5, 2015, 2100`
###Code
def leap_year(n):
...
###Output
_____no_output_____
###Markdown
3. Calling a function within a loop Assume you have the three lists `cargo`, `actual` and `nominal` as defined in the cell below. First, write a function `commission` with three arguments `cargo`, `actual` and `nominal` that prints the cargo, determines the difference between actual and nominal values and then prints and returns the result.Now write a `for`-loop, that iterates over the three lists and calls the function in each iteration.Lastly, include a keyword-argument `warn` with the default `False`, that prints a short warning, only if the difference between actual and nominal is greater than 25, and if `warn` is `True`. Then, rerun the loop, with `warn=True`.
###Code
cargo = ['bananas', 'apples', 'milk']
actual = [24, 24, 12]
nominal = [4, 52, 0]
def commission(cargo, actual, nominal, warn=False):
...
###Output
_____no_output_____ |
examples/Nested Hierarchies.ipynb | ###Markdown
`ipydatagrid` support nested columns/rows out of the box. Just pass a MultiIndex pandas DataFrame and you're set!
###Code
import ipydatagrid as ipg
import pandas as pd
import numpy as np
# Columns
col_top_level = [
"VeryLongValueFactors",
"VeryLongValueFactors",
"Even Longer Momentum Factors",
"Even Longer Momentum Factors",
]
col_bottom_level = ["Factor_A", "Factor_B", "Factor_C", "Factor_D"]
# Rows
row_top_level = ["Sector 1", "Sector 1", "Sector 2", "Sector 2"]
row_bottom_level = ["Security A", "Security B", "Security C", "Security D"]
header_renderer = ipg.TextRenderer(
background_color="moccasin",
text_color="navy",
vertical_alignment="top",
horizontal_alignment="center",
)
default_renderer = ipg.TextRenderer(
text_color=ipg.VegaExpr("cell.value <= 0 ? 'purple' : 'green'")
)
nested_df = pd.DataFrame(
np.random.randn(4, 4).round(4),
columns=pd.MultiIndex.from_arrays([col_top_level, col_bottom_level]),
index=pd.MultiIndex.from_arrays(
[row_top_level, row_bottom_level], names=("Sector", "Ticker")
),
)
nested_grid = ipg.DataGrid(
nested_df,
base_column_size=80,
base_column_header_size=35,
base_row_header_size=80,
layout={"height": "180px"},
header_renderer=header_renderer,
default_renderer=default_renderer,
)
nested_grid
###Output
_____no_output_____ |
examples/autoencoder-exploration.ipynb | ###Markdown
Exploring an auto-encoder for FashionMNISTAn auto-encoder is a model that encodes some content (in this case an image), into a lower dimensional vector and then decodes it back to the original image. The encoder may be useful as a trained feature extractor. When evaluating the auto-encoder it is important to ensure the recreated content looks reasonable. This notebook shows an example of how one may visually inspect the recreated content from an auto-encoder.This example uses ipywidgets, which may be enabled by running:```jupyter nbextension enable --py widgetsnbextension``` Import dependencies for this notebook
###Code
import io
import urllib
import zipfile
import IPython.display as ipd
import numpy as np
import pandas as pd
import sidekick
from ipywidgets import interact_manual
from PIL import Image
###Output
_____no_output_____
###Markdown
Download FashionMNISTDownload FashionMNIST to a tempfile. For details including license please see: [knowledge-center/fashion-mnist](https://peltarion.com/knowledge-center/documentation/datasets-view/datasets-used-in-tutorials/fashion-mnist-dataset)
###Code
dataset_url = 'https://storage.googleapis.com/bucket-8732/fashion.zip'
with urllib.request.urlopen(dataset_url) as http:
fashion_mnist_bin = io.BytesIO(http.read())
###Output
_____no_output_____
###Markdown
Extract content from the dataset into a DataFrame
###Code
# Open Peltarion platform compatible zipfile
with zipfile.ZipFile(fashion_mnist_bin, 'r') as z:
index_file = z.open('fashion/index.csv')
dataset = pd.read_csv(index_file)
dataset['image'] = dataset['image'].apply(
lambda path: Image.open(z.open('fashion/' + path))
)
###Output
_____no_output_____
###Markdown
Connect to the auto-encoderThis auto-encoder takes both as in and output feature a greyscale image of shape 28x28 (i.e. matching the size of FashionMNIST images).
###Code
deployment = sidekick.Deployment(
url='<url>',
token='<token>',
)
###Output
_____no_output_____
###Markdown
Verify the features used for input and output in the model
###Code
{'input': deployment.feature_specs_in, 'output': deployment.feature_specs_out}
###Output
_____no_output_____
###Markdown
Create a generator which polls the deploymentFor interactive exploration of data it us useful to use the predict_lazy method, which returns a generator that lazily polls the deployment when needed. Sidekick will make sure to batch requests to the deployment.
###Code
# Get predictions for all images in dataset
predictions = deployment.predict_lazy(dataset.to_dict('records'))
###Output
_____no_output_____
###Markdown
Visualize a grid of outputSet the desired grid size and press `Run interact` to get a new grid of examples. Lazy predictions are convinient because they allow you to interactively explore your predictions while requesting new ones when needed. Here's a simple demo illustrating how to do that, it should look something like the image on the left. Try it out!
###Code
def show_next_grid(n_rows, n_columns):
grid = Image.new('L', (n_columns * 28, n_rows * 28))
for column in range(n_columns):
for row in range(n_rows):
grid.paste(next(predictions)['image'], (column * 28, row * 28))
return grid
interact_manual(show_next_grid, n_rows=(1, 10), n_columns=(1, 10));
###Output
_____no_output_____
###Markdown
Exploring an auto-encoder for FashionMNISTAn auto-encoder is a model that encodes some content (in this case an image), into a lower dimensional vector and then decodes it back to the original image. The encoder may be useful as a trained feature extractor. When evaluating the auto-encoder it is important to ensure the recreated content looks reasonable. This notebook shows an example of how one may visually inspect the recreated content from an auto-encoder.This example uses ipywidgets, which may be enabled by running:```jupyter nbextension enable --py widgetsnbextension``` Import dependencies for this notebook
###Code
import urllib
import zipfile
import IPython.display as ipd
import numpy as np
import pandas as pd
import sidekick
from ipywidgets import interact_manual
from PIL import Image
###Output
_____no_output_____
###Markdown
Download FashionMNISTDownload FashionMNIST to a tempfile. For details including license please see: [knowledge-center/fashion-mnist](https://peltarion.com/knowledge-center/documentation/datasets-view/datasets-used-in-tutorials/fashion-mnist-dataset)
###Code
fashion_mnist_path, _ = urllib.request.urlretrieve(
'https://storage.googleapis.com/bucket-8732/fashion.zip')
###Output
_____no_output_____
###Markdown
Extract content from the dataset into a DataFrame
###Code
# Open Peltarion platform compatible zipfile
with zipfile.ZipFile(fashion_mnist_path, 'r') as z:
index_file = z.open('index.csv')
dataset = pd.read_csv(index_file)
dataset['image'] = dataset['image'].apply(
lambda path: Image.open(z.open(path))
)
###Output
_____no_output_____
###Markdown
Connect to the auto-encoderThis auto-encoder takes both as in and output feature a greyscale image of shape 28x28 (i.e. matching the size of FashionMNIST images).
###Code
deployment = sidekick.Deployment(
url='<url>',
token='<token>',
dtypes_in={'image': 'Image (28x28x1)'},
dtypes_out={'image': 'Image (28x28x1)'}
)
###Output
_____no_output_____
###Markdown
Create a generator which polls the deploymentFor interactive exploration of data it us useful to use the predict_lazy method, which returns a generator that lazily polls the deployment when needed. Sidekick will make sure to batch requests to the deployment.
###Code
# Get predictions for all images in dataset
predictions = deployment.predict_lazy(dataset.to_dict('records'))
###Output
_____no_output_____
###Markdown
Visualize a grid of outputSet the desired grid size and press `Run interact` to get a new grid of examples.
###Code
def show_next_grid(n_rows, n_columns):
grid = Image.new('L', (n_columns * 28, n_rows * 28))
for column in range(n_columns):
for row in range(n_rows):
grid.paste(next(predictions)['image'], (column * 28, row * 28))
return grid
interact_manual(show_next_grid, n_rows=(1, 10), n_columns=(1, 10));
###Output
_____no_output_____ |
Equation of State.ipynb | ###Markdown
Overview Here we demonstrate how gravitational wave data from a set of observations might constrain nuclear parameters. These results were generated as follows:1. A unified equation of state (EoS) was defined in terms of $N_p = 18$ parameters $\vect{p}$ as follows: * An outer crust is fixed by tabulated data. (No parameters.) * A compressible liquid drop model (CLDM) modeling spherical nuclei embedded in a gas of neutrons and electrons. (2 parameters) * A homogeneous interior with a homogeneous nuclear equation of state $\mathcal{E}(n_n, n_p)$ in conjunction with $\beta$-equilibrium. (13 parameters = 4 neutron matter parameters + 3 proton polaron parameters + 6 symmetric nuclear matter parameters) * A core characterized by the speed of sound (3 parameters).2. The Tolman-Oppenheimer-Volkoff (TOV) equations were solved for non-rotating neutron stars, giving the neutron star masses $M(P_c, \vect{p})$, radii $R(P_c, \vect{p})$, second Love number $k_2(P_c, \vect{p})$ as functions of the central pressure $P_c$ and the 18 parameters $\vect{p}$.3. Numerical derivatives of these functions were obtained using Richardson extrapolation. These are tabulated for a set of $N_m$ masses spanning a range of neutron stars. These derivatives are stored in dimensionless form: $$ \frac{p_i}{M_j} \pdiff{M_j}{p_i} = p_j \pdiff \ln M_j. $$ 4. Using a post-Newtonian analysis, these derivatives are used to obtain a set of Fisher information matrices $\mat{F}$ corresponding to the expected aLIGO signal sensitivity for all pairs of masses $m_1$ and $m_2$. This is summarized in a $N_m\times N_m\times N_p\times N_p$ array where each entry $F[i, j,:,:]$ corresponds to the expected Fisher information matrix (FIM) obtained from the inspiral of a binary with masses $m_i$ and $m_j$ at a fiducial distance of $D_0=40$Mpc. For an event at distance $D$, this must be weighted by a factor $(D_0/D)^2$.5. From this set of FIM, a collective FIM can be obtained by summing over a sample population of binaries, weighed appropriately.6. Nuclear uncertainties can be input directly with an information matrix: $$ \mat{F}_{\text{nuclear}} = \mat{C}^{-1} \approx \diag\left(\frac{1}{\sigma_{0}^{2}}, \frac{1}{\sigma_{1}^2}, \cdots\right) $$ where $\sigma_i$ are the estimated 1-$\sigma$ relative Gaussian errors for the $i$th parameter $p_i$. If parameter covariances are known, they can be introduced directly into the covariance matrix $\mat{C}$. *(All of this analysis assumes that errors are small enough and Gaussian so that linear error analysis is a reasonable approximation. As above, the tabulated FIM is scaled by the parameter values to be dimensionless, so the errors $\sigma_i$ represent the relative errors in parameter.)*7. From this combined FIM, we perform various analyses such as a principal component analysis, looking at the largest eigenvalues and eigenvectors of $\mat{F}$, or computing the parameter covariance matrix $\mat{C} = \mat{F}^{-1}$.The summary of this analysis for a sample equation of state roughly matching th$ ALF4 equation of state is presented in the following notebook:* [`ALF4.ipynb`](ALF4.ipynb)*(As additional equations of state are added, they will be analyzed in appropriately named notebooks.)* Equation of State Outer Crust The outer crust is simply tabulated from results like those of Negele and Vautherin. There are no parameters introduced here. CLDM Once the nuclear equation of state $\mathcal{E}(n_n, n_p)$ is specified (see below), we make a unified transition from the outer crust to homogeneous matter using a compressible liquid drop model with the following components:* A spherical nucleus in a spherical Wigner-Seitz cell. (Energy density $\mathcal{E}(n_n^(i), n_p^(i))$ where $n_{n,p}^{i}$ are the average densities of the protons and neutrons inside the drop.)* A surrounding neutron gas. (Described by $\mathcal{E}(n_n^{o}, 0)$ where $n_n^{o}$ is the density of the neutron gas.)* A homogeneous electron cloud. (Described as a free Fermi gas, relativistic if needed, maintaining beta-equilibrium by establishing zero total charge.)* A surface term.The only parameters that enter this approximation are those describing the surface of the drop. For this we use the approach of [Lattimer:1985] (also in [Steiner:2012a]) which is characterized in terms of the proton fraction $x = n_p/n_b$ and the isospin asymmetry $\beta = (n_n - n_p)/(n_n + n_p)$:$$ \sigma = \sigma_0\mathcal{B}(x), \qquad \mathcal{B}(x) = \frac{16+b}{x^{-3} + b + \frac{1}{(1-x)^3}}, \qquad C_{\mathrm{sym}} = \frac{\sigma_{\delta}}{\sigma_0} = \frac{96}{b+16}.$$This introduces two parameters `sigma_delta`$=\sigma_\delta$ and `sigma_0`$=\sigma_0$. Note that for small asymmetries $x\approx 1$, $\beta \approx 0$, we have:$$ \sigma = \sigma_0\left( 1 - \frac{C_{\mathrm{sym}}} {C_{\mathrm{sym}} - \frac{12}{2 - \frac{1}{(1+\beta)^3} - \frac{1}{(1-\beta)^3}}} \right) \approx \sigma_0\left( 1 - C_{\mathrm{sym}}\beta^2 + \order(\beta^4) \right)$$Thus, we see that there are two independent parameters in this expression. We fix the parameter $\sigma_0$ so that the CLDM matches the baryon density $n_B$ and energy-density $\mathcal{E}(n_B)$ at the upper end of the tabulated set of outer-crust data, ensuring a smooth transition (at most, second-order).Following [Steiner:2012a], we introduce one more parameter `C_C`$=\mathcal{C}$ which is a suppression factor in the Coulomb energy intended to model the diffuseness of the proton distribution. Finally, one could in principle vary the dimension `d_C`$=d$ of the Wigner-Seitz cell to allow for a crude estimate of pasta. We have tried this in some cases, but generally leave $d=3$.Once these parameters are fixed, and $\sigma_0$ is found, we must find the transition from a droplet to homogeneous matter by finding the point where the chemical potentials and pressures are equal. This ensures that the transition from the CLDM to homogeneous matter remains convex. To summarize, we have the following parameters:**CLDM*** `C_C`: Coulomb suppression factor $\mathcal{C}$.* `d_C`: Dimension $d$ of the cells. (Generally held fixed at $d=3$.)* `sigma_delta`: Surface tension isospin dependence $\sigma_\delta$ [MeV/fm$^2$].[Lattimer:1985]: http://dx.doi.org/10.1016/0375-9474(85)90006-5 (J.M. Lattimer, C.J. Pethick, D.G. Ravenhall, and D.Q. Lamb, "Physical properties of hot, dense matter: The general case", Nucl. Phys. A 432(3), 646 - 742 (1985) )[Steiner:2012a]: http://dx.doi.org/10.1103/PhysRevC.85.055804 (Andrew W. Steiner, "Deep crustal heating in a multicomponent accreted neutron star crust", Phys. Rev. C 85, 055804 (2012) ) Homogeneous Matter As motivated by our work with fitting a nuclear energy density functional (NEDF) called SeaLL1:* [Bulgac, Forbes, Jin, Perez, and Schunck: A Minimal Nuclear Energy Density Functional (accepted for PRC)](https://arxiv.org/abs/1708.08771)we acknowledge that the connection between neutron matter and symmetric nuclear matter might be quite weak. We thus base our parameterization of homogeneous matter on an expansion in proton fraction starting from pure neutron matter. For pure neutron matter, we use the following energy per particle $E_n$ [MeV] as a function of neutron density $n_n$ [1/fm$^3$]: $$ E_n(n_n) = \frac{\mathcal{E}_{n}(n_n)}{n_n} = m_nc^2 + a\left(\frac{n_n}{\bar{n}_0}\right)^{\alpha} + b\left(\frac{n_n}{\bar{n}_0}\right)^{\beta}$$ where $m_n=939.565$MeV/$c^2$ is the neutron mass, $\bar{n}_0 = 0.16$/fm$^3$ is a constant (approximately the nucleon saturation density) and $a$, $b$, $\alpha$, and $\beta$ are four EoS parameters `a`, `alpha`, `b`, and `beta` in the code.Adding a small proton fraction, we extend this using the following form: \begin{align} \text{energy-per-particle}&& E_{np}(n_n, n_p) &= (1-x_p)E_n(n_n) + x_p\left(m_pc^2 + \Sigma^p(n_B)\right) + \frac{(2\pi^2)^{2/3}}{2m^*}x_p^{5/3}n_B^{2/3} +x_p^2f_2(n_B) + x_p^3f_3(n_B)+\cdots\\ &&n_B &= n_n + n_p, \qquad x_p = \frac{n_p}{n_B},&&\\ \text{proton self-energy}&&\Sigma^p(n_B) &= \mu_p\frac{n_B}{\bar{n}_0}\frac{2u_p-\frac{n_B}{n_0}}{2u_p - 1}&&\end{align} where $m_p = 938.272$MeV/$c^2$ is the bare proton mass, and $m^*$ is the effective proton mass. The properties of the proton polaron are characterized by the parameters $\mu_p$ [MeV], the proton chemical potential at $n_B = \bar{n}_0$ and $u_p$ which specifies at which $n_B = u_p\bar{n}_0$ the proton self-energy $\Sigma^p(n_B)$ reaches its minimum value as shown below:
###Code
%pylab inline --no-import-all
import constants as u
n_0 = 0.16/u.fm**3
mu_p = -104.5*u.MeV
u_p = 3.136
n_B = np.linspace(0, (u_p+1)*n_0)
Sigma_p = mu_p*n_B/n_0*(2*u_p - n_B/n_0)/(2*u_p-1)
plt.figure(figsize=(8,5))
plt.plot(n_B, Sigma_p)
plt.grid(True)
plt.xticks([0, n_0, u_p*n_0], ['0', r'$\bar{n}_0$', r'$u_p\bar{n}_0$'])
plt.yticks([0, mu_p], ['0', r'$\mu_p$'])
plt.xlabel(r'$n_B$')
plt.ylabel(r'$\Sigma^p(n_B)$')
plt.title("Demonstration of the parametrization of $\Sigma^p(n_B)$")
###Output
Populating the interactive namespace from numpy and matplotlib
|
docs/refactoring/performance/ipyparallel/direct.ipynb | ###Markdown
IPython’s `Direct`-Interface Erstellen eines `DirectView`
###Code
import ipyparallel as ipp
rc = ipp.Client()
rc = ipp.Client(profile='default')
rc.ids
###Output
_____no_output_____
###Markdown
Verwenden aller *Engines*:
###Code
dview = rc[:]
###Output
_____no_output_____
###Markdown
`map()`-FunktionPython’s builtin `map()`-Funktion kann auf eine Sequenz von Elementen angewendet werden und üblicherweise einfach zu parallelisieren. Beachtet bitte, dass die `DirectView`-Version von `map()` kein automatisches Load-Balancing macht. Hierfür müsst ihr ggf. `LoadBalancedView` verwenden.
###Code
serial_result = list(map(lambda x:x**10, range(32)))
parallel_result = dview.map_sync(lambda x: x**10, range(32))
serial_result == parallel_result
###Output
_____no_output_____
###Markdown
IPython’s `Direct` interface Create a `DirectView`
###Code
import ipyparallel as ipp
rc = ipp.Client()
rc = ipp.Client(profile='default')
rc.ids
###Output
_____no_output_____
###Markdown
Use all engines:
###Code
dview = rc[:]
###Output
_____no_output_____
###Markdown
`map()` functionPython’s builtin `map()` function can be applied to a sequence of elements and is usually easy to parallelise.Please note that the `DirectView` version of `map()` does not do automatic load balancing. You may have to use `LoadBalancedView` for this.
###Code
serial_result = list(map(lambda x:x**10, range(32)))
parallel_result = dview.map_sync(lambda x: x**10, range(32))
serial_result == parallel_result
###Output
_____no_output_____ |
Tarea01PrincipiosBásicos01.ipynb | ###Markdown
Universidad de Costa Rica Facultad de Ciencias Escuela de Química Sección de Fisicoquímica Qu-0560 Tópicos de Química Física: Herramientas Computacionales para Química Profesor Mauricio Gutiérrez Asistente no oficial: Yefry López Tarea 01 Principios Básicos IRealice los siguientes ejercicios usando lo aprendido en la Hoja principios Básicos Ejercicio 1. Área del Circulo1. Programe la ecuación del área de un círculo de rádio 5 cm . Debe crear las siguientes variables: __Radio__ para almacenar el valor del radio dado, __Area__ para almancenar la fórmula del área. Así mismo debe usar __PI__ del módulo math (recuerde importar primero el módulo).Por último, imprima en pantalla con print() el valor del área. Debe realizar un dialogo como "El valor del área de un circulo con radio de X es Y.HINT: Donde X y Y representan las variables. Recuerde usar la notación de suma dentro de print y usar la función str().
###Code
# Resuelva el ejercicio en esta celda
###Output
_____no_output_____
###Markdown
Ejercicio 2. Conversión de pies a millas.2. En una milla hay 5280 pies. Escriba una expresión en Python que calcule e imprima el número de pies en 1313 millas.
###Code
# Resuelva el ejercicio en esta celda
###Output
_____no_output_____
###Markdown
Ejercicio 3. Conversión a segundos3. Escriba una expresión en Python que calcule e imprima en pantalla el número de segundos que hay en 7 horas 21 minutos y 37 segundos.
###Code
# Resuelva el ejercicio en esta celda
###Output
_____no_output_____
###Markdown
Ejercicio 4. Ecuación de caida libre.4. Un tornillo cae accidentalmente desde la parte superior de un edificio. 45 segundos después está golpeando el suelo. ¿Cual será la altura del edificio?. Guarde el tiempo en una variable __t__ y utilice la ecuación de movimiento rectilineo uniformemente acelerado para calcular la altura __h__. Imprima su resultado en pantalla con dialogo
###Code
# Resuelva el ejercicio en esta celda
###Output
_____no_output_____
###Markdown
Ejercicio 5. 5. La siguiente expresión nos permite predecir de manera básica el comportamiento de p dolares despues de determinados __y__ años, con __r__ cantidad de intereses.$$ Precio = p(1+0.01r)^{y}$$Calcule el valor futuro de $1000 dentro de 10 años , al 7% de intereses. Imprima el valor en pantalla.
###Code
# Resuelva el ejercicio en esta celda
###Output
_____no_output_____
###Markdown
Ejercicio 6. Cálculo de la distancia entre puntos.6. La distancia de dos puntos en coordenadas cartesianas $(x_{0},y_{0})$ y $(x_{1},y_{1})$ se puede calcular mediante la siguiente expresión$$ distancia = \sqrt{ (x_{0} - x_{1} )^{2} + (y_{0} - y_{1})^{2}} $$Escribar una expresión en Python que calcule la distancia de los puntos (2 , 2) y (5 , 6 ). HINT: Recuerde que para usar raiz cuadrada el módulo de math ( que puede importar) tiene una función raiz y función potencia.
###Code
# Resuelva el ejercicio en esta celda
###Output
_____no_output_____
###Markdown
Ejercicio 7. Preparación de una disolución para medir su espectro visible en Química Inorgánica.__Figura.__ Serie isolectrónica de sacarinatos para los elementos del bloque 3d, sintetizados en el Laboratorio de Inorgánica II, durante el I Semestre del 2018. Foto Yefry Lopez.7. La ley de Beer-Lambert está dada por la siguiente expresión $$ A = \epsilon lc $$ Donde __A__ es la absorbancia , __l__ la longitud de onda y $\epsilon$ la absortividad molar.* En la __Práctica 05__ de su curso de Laboratorio de Química Inorgánica II. Usted sintetizará y caracterizará un complejo de sacarinato mediante espectroscopia de absorción visible. Usando la ley de Beer , calcule la concentración necesaria para que la Absorbancia y Absortividad molar tengan un valor de 1 y 100 respectivamente. La celda tiene una longitud __l__ de 1 cm * Una vez calculada la concentración, calcule la cantidad masa de sacarinato de zinc que debe medir en balanza analítica para preparar una disolución de 50 mL de dicha concentración. HINT1: La masa molar del sacarinato sintetizado es de 519.8102 HINT2: Vaya paso a paso declarando variables para no enrredarse
###Code
# Resuelva el ejercicio en esta celda
###Output
_____no_output_____
###Markdown
Comparación ecuación de estado del gas ideal con ecuación de estado de Redlich–Kwong.Para resolver este ejercicio, en la siguiente celda se le ofrece un machote de resolución. En su curso de Química General y Cualitativa en el capítulo de gases aprendió la ecuación de estado del Gas Ideal$$ P = \frac{RT}{V_{m}} $$Donde $V_{m}$ representa el volumen molar del gas. Para su primer curso de Fisicoquímica, aprenderá que existen otras ecuaciones de estado que toman en cuenta las fuerzas intermoleculares entre las moléculas o átomos que conforman el gas. Por ejemplo, la ecuación de estado Redlich–Kwong .$$ P = \frac{RT}{V_{m} - b} - \frac{a}{\sqrt{T}V_{m}(V_{m} + b)}$$ Donde a y b están dados por $$ a = \frac{0.427*R^{2}*(T_{c})^{2}}{P_{c}} $$ $$ b = \frac{0.0866*R*T_{c}}{P_{c}} $$ $T_{c} , P_{c}$ representan las variables críticas ( Calcularlas será un ejercicio divertido en Fisicoquímica.Recuerda en Cálculo I o II como sacar un punto crítico Máximo o mínimo ... Pero eso se lo dejamos al Fisicoquímica 8. Calcule la presión de una gas usando el gas ideal y despues a presión gas de Redlich–Kwong. Imprima los resultados en pantalla.Para imprimir, puede usar el siguiente código * print("The ideal gas pressure: " + str(VARIABLE P IDEAL) + " Pa")__ * print("The Redlich-Kwong pressure: " + str(VARIABLE P REDLICH-KWONG) + " Pa")Para esto use los siguientes parámetros * R = 0.0821 L-atm/K* T = 500 K* V = 5 L/mol* Pc = 37.2 atm* Tc = 132.5 K
###Code
## Guía de la Resolución
## Paso 1 importe el paquete math
## Paso 2. Declare y asigne las variables R, T, V, Pc y Tc
## Paso 3. Programe a y b usando los módulos math.pow(valor , potencia que quiere elevar)
## Paso 4. Programe P del gas ideal guardela Pideal
## Paso 5. Programe P del gas de Redlich–Kwong, guardela en Preal
## Paso 6. Imprima sus resultados
###Output
_____no_output_____
###Markdown
Fin de Tarea
###Code
###Output
_____no_output_____ |
curriculum/unit-1-statistics-fundamentals/sprint-1-data-wrangling-and-storytelling/module2-make-features/LS_DS_112_Make_Features_Assignment.ipynb | ###Markdown
Assignment:- Replicate the lesson code. - This means that if you haven't followed along already, type out the things that we did in class. Forcing your fingers to hit each key will help you internalize the syntax of what we're doing. Make sure you understand each line of code that you're writing, google things that you don't fully understand. - [Lambda Learning Method for DS - By Ryan Herr](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit?usp=sharing)- Convert the `term` column from string to integer.- Make a column named `loan_status_is_great`. It should contain the integer 1 if `loan_status` is "Current" or "Fully Paid." Else it should contain the integer 0.- Make `last_pymnt_d_month` and `last_pymnt_d_year` columns. Replicating lesson code
###Code
### Loading the file
!wget https://resources.lendingclub.com/LoanStats_2018Q4.csv.zip
### Unziping the file
!unzip LoanStats_2018Q4.csv.zip
### Initial inspection
!head LoanStats_2018Q4.csv
###Output
Notes offered by Prospectus (https://www.lendingclub.com/info/prospectus.action)
"id","member_id","loan_amnt","funded_amnt","funded_amnt_inv","term","int_rate","installment","grade","sub_grade","emp_title","emp_length","home_ownership","annual_inc","verification_status","issue_d","loan_status","pymnt_plan","url","desc","purpose","title","zip_code","addr_state","dti","delinq_2yrs","earliest_cr_line","inq_last_6mths","mths_since_last_delinq","mths_since_last_record","open_acc","pub_rec","revol_bal","revol_util","total_acc","initial_list_status","out_prncp","out_prncp_inv","total_pymnt","total_pymnt_inv","total_rec_prncp","total_rec_int","total_rec_late_fee","recoveries","collection_recovery_fee","last_pymnt_d","last_pymnt_amnt","next_pymnt_d","last_credit_pull_d","collections_12_mths_ex_med","mths_since_last_major_derog","policy_code","application_type","annual_inc_joint","dti_joint","verification_status_joint","acc_now_delinq","tot_coll_amt","tot_cur_bal","open_acc_6m","open_act_il","open_il_12m","open_il_24m","mths_since_rcnt_il","total_bal_il","il_util","open_rv_12m","open_rv_24m","max_bal_bc","all_util","total_rev_hi_lim","inq_fi","total_cu_tl","inq_last_12m","acc_open_past_24mths","avg_cur_bal","bc_open_to_buy","bc_util","chargeoff_within_12_mths","delinq_amnt","mo_sin_old_il_acct","mo_sin_old_rev_tl_op","mo_sin_rcnt_rev_tl_op","mo_sin_rcnt_tl","mort_acc","mths_since_recent_bc","mths_since_recent_bc_dlq","mths_since_recent_inq","mths_since_recent_revol_delinq","num_accts_ever_120_pd","num_actv_bc_tl","num_actv_rev_tl","num_bc_sats","num_bc_tl","num_il_tl","num_op_rev_tl","num_rev_accts","num_rev_tl_bal_gt_0","num_sats","num_tl_120dpd_2m","num_tl_30dpd","num_tl_90g_dpd_24m","num_tl_op_past_12m","pct_tl_nvr_dlq","percent_bc_gt_75","pub_rec_bankruptcies","tax_liens","tot_hi_cred_lim","total_bal_ex_mort","total_bc_limit","total_il_high_credit_limit","revol_bal_joint","sec_app_earliest_cr_line","sec_app_inq_last_6mths","sec_app_mort_acc","sec_app_open_acc","sec_app_revol_util","sec_app_open_act_il","sec_app_num_rev_accts","sec_app_chargeoff_within_12_mths","sec_app_collections_12_mths_ex_med","sec_app_mths_since_last_major_derog","hardship_flag","hardship_type","hardship_reason","hardship_status","deferral_term","hardship_amount","hardship_start_date","hardship_end_date","payment_plan_start_date","hardship_length","hardship_dpd","hardship_loan_status","orig_projected_additional_accrued_interest","hardship_payoff_balance_amount","hardship_last_payment_amount","debt_settlement_flag","debt_settlement_flag_date","settlement_status","settlement_date","settlement_amount","settlement_percentage","settlement_term"
"","","20000","20000","20000"," 36 months"," 14.47%","688.13","C","C2","bus driver","4 years","OWN","52000","Source Verified","Dec-2018","Current","n","","","debt_consolidation","Debt consolidation","681xx","NE","30.65","1","Jun-1979","2","15","","6","0","15048","73%","22","w","15777.74","15777.74","6104.74","6104.74","4222.26","1882.48","0.0","0.0","0.0","Sep-2019","688.13","Oct-2019","Sep-2019","0","","1","Individual","","","","0","0","33157","0","2","1","1","8","18109","44","1","2","8628","73","20700","1","1","4","3","5526","5175","73","0","0","141","474","10","8","0","10","15","1","15","0","4","4","11","12","8","4","14","4","6","","0","0","2","95","50","0","0","61699","33157","20700","40999","","","","","","","","","","","","N","","","","","","","","","","","","","","","N","","","","","",""
"","","25000","25000","25000"," 60 months"," 16.14%","609.82","C","C4","Production Manager","5 years","MORTGAGE","45000","Not Verified","Dec-2018","Fully Paid","n","","","debt_consolidation","Debt consolidation","703xx","LA","37.09","0","Sep-2003","0","","","7","0","8901","36.8%","21","w","0.00","0.00","26653.1675796436","26653.17","25000.00","1653.17","0.0","0.0","0.0","Apr-2019","24857.33","","May-2019","0","","1","Individual","","","","0","1303","49524","0","2","1","2","7","40623","82","0","0","7830","67","24200","2","0","1","2","7075","10465","43.7","0","0","161","162","45","7","4","96","","7","","0","2","3","3","5","7","5","10","3","7","0","0","0","1","100","33.3","0","0","73683","49524","18600","49483","","","","","","","","","","","","N","","","","","","","","","","","","","","","N","","","","","",""
"","","26500","26500","26500"," 60 months"," 11.31%","580.28","B","B3","Compliance Director","10+ years","MORTGAGE","134000","Source Verified","Dec-2018","Current","n","","","credit_card","Credit card refinancing","747xx","OK","18.91","0","Dec-2006","2","","","17","0","43640","65.4%","37","w","23410.70","23410.70","5197.54","5197.54","3089.30","2108.24","0.0","0.0","0.0","Sep-2019","580.28","Oct-2019","Sep-2019","0","","1","Individual","","","","0","0","381100","3","3","2","2","4","64335","46","1","3","10115","58","66700","2","1","7","6","22418","14577","70.4","0","0","114","144","3","3","4","22","","2","","0","8","10","9","10","7","13","26","10","17","0","0","0","4","100","66.7","0","0","430403","107975","49300","88875","","","","","","","","","","","","N","","","","","","","","","","","","","","","N","","","","","",""
"","","10400","10400","10400"," 36 months"," 12.98%","350.32","B","B5","Program Support Assistant ","10+ years","MORTGAGE","56099","Source Verified","Dec-2018","Current","n","","","credit_card","Credit card refinancing","800xx","CO","20.92","0","Jul-2013","2","32","67","8","1","1669","9.8%","10","w","8164.57","8164.57","3141.63","3141.63","2235.43","906.20","0.0","0.0","0.0","Sep-2019","350.32","Oct-2019","Sep-2019","0","","1","Individual","","","","0","0","39564","4","2","1","2","1","37895","92","3","6","725","36","17000","1","0","4","8","4946","15331","9.8","0","0","65","57","1","1","0","1","","0","32","0","3","3","6","6","3","6","7","3","8","0","0","0","4","90","0","1","0","60709","39564","17000","43709","","","","","","","","","","","","N","","","","","","","","","","","","","","","N","","","","","",""
"","","35000","35000","35000"," 60 months"," 12.98%","796","B","B5","Sr. Vice President -Risk Manager","7 years","MORTGAGE","211646","Source Verified","Dec-2018","Fully Paid","n","","","debt_consolidation","Debt consolidation","797xx","TX","15.09","0","Sep-1985","0","77","","13","0","55944","74.9%","39","w","0.00","0.00","36266.741551619","36266.74","35000.00","1266.74","0.0","0.0","0.0","Apr-2019","34712.6","","Sep-2019","0","","1","Joint App","272421","15.89","Source Verified","0","0","420931","1","3","1","2","5","26570","51","0","2","24795","65","74700","1","6","1","4","32379","8677","85.5","0","0","152","399","21","5","6","21","77","12","77","0","6","8","6","11","14","9","19","8","13","0","0","0","1","97.4","66.7","0","0","516204","82514","59700","51904","88364","Sep-1985","0","6","19","53.4","2","29","0","0","","N","","","","","","","","","","","","","","","N","","","","","",""
"","","20000","20000","20000"," 36 months"," 7.56%","622.68","A","A3","Teacher","10+ years","MORTGAGE","100000","Not Verified","Dec-2018","Fully Paid","n","","","credit_card","Credit card refinancing","982xx","WA","18.92","0","Feb-1999","0","48","","9","0","25416","29.9%","19","w","0.00","0.00","20215.79243","20215.79","20000.00","215.79","0.0","0.0","0.0","Feb-2019","20228.39","","Feb-2019","0","","1","Joint App","190000","11.75","Not Verified","0","0","515779","1","2","0","1","13","46153","71","1","2","9759","39","85100","2","2","0","5","57309","59684","29.9","0","0","171","238","1","1","5","1","","13","48","0","5","5","5","6","5","5","9","5","9","0","0","0","1","94.7","20","0","0","622183","71569","85100","74833","43287","Aug-1998","0","3","10","29.7","2","7","0","0","","N","","","","","","","","","","","","","","","N","","","","","",""
"","","13000","13000","13000"," 36 months"," 17.97%","469.79","D","D1","Help Tech Supervisor","10+ years","MORTGAGE","92360","Source Verified","Dec-2018","Current","n","","","home_improvement","Home improvement","300xx","GA","27.44","0","Sep-2006","1","68","","18","0","32020","49.6%","30","w","10370.35","10370.35","4208.64","4208.64","2629.65","1578.99","0.0","0.0","0.0","Sep-2019","469.79","Oct-2019","Sep-2019","0","","1","Individual","","","","0","0","131919","1","2","1","3","4","17310","60","0","2","6116","53","64600","1","4","1","5","7760","7140","70.7","0","0","120","147","16","4","2","18","","4","70","0","5","12","6","7","6","15","22","12","18","0","0","0","1","90","66.7","0","0","190803","49330","24400","28861","","","","","","","","","","","","N","","","","","","","","","","","","","","","N","","","","","",""
"","","12000","12000","12000"," 60 months"," 14.47%","282.16","C","C2","Assistant Athletic Director of Marketing","1 year","RENT","67000","Not Verified","Dec-2018","Fully Paid","n","","","credit_card","Credit card refinancing","231xx","VA","28.39","0","Aug-2009","1","","","9","0","10018","50.9%","34","w","0.00","0.00","13140.1266634624","13140.13","12000.00","1140.13","0.0","0.0","0.0","Sep-2019","11169.83","","Sep-2019","0","","1","Individual","","","","0","0","184578","1","4","2","3","5","174560","97","0","1","4615","87","19700","1","1","2","4","20509","9682","50.9","0","0","112","80","23","5","0","23","","5","","0","3","3","5","5","26","5","8","3","9","0","0","0","2","100","0","0","0","183897","184578","19700","164197","","","","","","","","","","","","N","","","","","","","","","","","","","","","N","","","","","",""
###Markdown
Load lending club data
###Code
import pandas as pd
pd.options.display.max_rows = 20
pd.options.display.max_columns = 100
df = pd.read_csv('LoanStats_2018Q4.csv', skiprows=1, skipfooter=2, engine='python')
print(df.shape)
df.head()
###Output
(128412, 144)
###Markdown
Checking for columns with all null and dropping
###Code
df.isnull().sum().sort_values(ascending=False)
df = df.drop(['url', 'member_id', 'desc', 'id'], axis=1)
df.head()
df.isnull().sum().sort_values(ascending=False)
###Output
_____no_output_____
###Markdown
Work with strings
###Code
### Defining a function to cast int_rate from str to float
def int_rate_to_float(mystring):
return float(mystring.strip().strip('%'))/100
### Apply the function to the column
df['int_rate'] = df['int_rate'].apply(int_rate_to_float)
df['int_rate'].head()
###Output
_____no_output_____
###Markdown
Clean employment title
###Code
### View the top 20 titles
df['emp_title'].value_counts(dropna=False, ascending=False)[:20]
### Check unique values
len(df['emp_title'].value_counts())
### Check top values
df['emp_title'].value_counts().reset_index().head()
import numpy as np
### Define a function to clean the employment titles
examples = ['owner', 'SuperVisor', 'project Manager', np.NaN]
def clean_title(title):
if isinstance(title, str):
return title.strip().title()
else:
return "Unknown"
### Check with list comprehensions
[clean_title(item) for item in examples]
df['emp_title'] = df['emp_title'].apply(clean_title)
df['emp_title'].head()
### Check to see its effect on the DF
len(df['emp_title'].value_counts())
### Create employee title manager
df['emp_title_manager'] = df['emp_title'].str.contains("Manager")
df.head()
###Output
_____no_output_____
###Markdown
DataFrame filtering
###Code
condition = (df['emp_title_manager'] == True)
managers = df[df['emp_title_manager'] == True]
managers.head()
(df['emp_title_manager'] == False) & (df['home_ownership']=="OWN")
plebians = df[(df['emp_title_manager'] == False) & (df['home_ownership']=="MORTGAGE")]
print(plebians.shape)
plebians.head()
### Graphing the difference in interest rates among the subsets
import seaborn as sns
sns.kdeplot(plebians['int_rate'])
sns.kdeplot(managers['int_rate']);
sns.distplot(plebians['int_rate'])
sns.distplot(managers['int_rate']);
###Output
_____no_output_____
###Markdown
Working with dates
###Code
### casting the issue_d column to DateTime
df['issue_d'] = pd.to_datetime(df['issue_d'], infer_datetime_format=True)
df['issue_d'].head()
# Casting the other datetime columns to type DateTime
df['issue_year'] = df["issue_d"].dt.year
df['issue_month'] = df["issue_d"].dt.month
df['earliest_cr_line'] = pd.to_datetime(df['earliest_cr_line'],
infer_datetime_format=True)
### Creating a feature to show amount of days since earliest credit line
df['days_from_earliest_credit_to_issue'] = (df['issue_d']
- df['earliest_cr_line']).dt.days
###Output
_____no_output_____
###Markdown
Assignment Portion Create the column loan_status_is_great
###Code
### Examining the loan_status column
df['loan_status'].value_counts(ascending=False)
### Creating the column
df['loan_status_is_great'] = df['loan_status'].str.contains("Current") | df['loan_status'].str.contains("Fully Paid")
df['loan_status_is_great'].head()
### Further examination
df['loan_status_is_great'].value_counts()
df['loan_status_is_great'].dtypes
### Casting bool to 1 and 0
df['loan_status_is_great'] = (df['loan_status_is_great']).astype(int )
df['loan_status_is_great'].value_counts()
###Output
_____no_output_____
###Markdown
Make last_pymnt_d_month and last_pymnt_d_year columns.
###Code
### Casting the last_pymnt_d to DateTime type
df['last_pymnt_d'] = pd.to_datetime(df['last_pymnt_d'],
infer_datetime_format=True)
### Checking
df['last_pymnt_d'].head()
### Creating the requested features
df['last_pymnt_d_month'] = df['last_pymnt_d'].dt.month
df['last_pymnt_d_year'] = df['last_pymnt_d'].dt.year
### Checking
df['last_pymnt_d_month'].head()
df['last_pymnt_d_year'].head()
###Output
_____no_output_____
###Markdown
Convert the term column from string to integer.
###Code
df['term'].value_counts()
df['term'].dtypes
### Defining a function to cast to int
### Example for testing
example_terms = ['36 months', '60 months']
def term_to_int(term):
if isinstance(term, str):
return int(term.strip('months').strip())
else:
return "Unknown"
### Checking to see function works
[term_to_int(item) for item in example_terms]
### Applying this function to the column
# df['term'] = df['term'].apply(term_to_int)
### Checking
# df['term'].head()
### OOPS
uh_oh_df = pd.read_csv('LoanStats_2018Q4.csv', skiprows=1, skipfooter=2, engine='python')
df['term'] = uh_oh_df['term']
df['term'].value_counts()
### Back to where we started
df['term'] = df['term'].apply(term_to_int)
df['term'].value_counts()
df['term'].dtypes
### What happened here is that I wasn't segregating my code adequately and when
# I went to check the dtypes originally it existed in the same cell where
# the function was applied and since the function returns unknown for non str
# values on its second loop all values being int were changed to unknown
# breaking the column
###Output
_____no_output_____
###Markdown
Stretch GoalsYou can do more with the LendingClub or Instacart datasets.LendingClub options:- There's one other column in the dataframe with percent signs. Remove them and convert to floats. You'll need to handle missing values.- Modify the `emp_title` column to replace titles with 'Other' if the title is not in the top 20. - Take initiatve and work on your own ideas!Instacart options:- Read [Instacart Market Basket Analysis, Winner's Interview: 2nd place, Kazuki Onodera](http://blog.kaggle.com/2017/09/21/instacart-market-basket-analysis-winners-interview-2nd-place-kazuki-onodera/), especially the **Feature Engineering** section. (Can you choose one feature from his bulleted lists, and try to engineer it with pandas code?)- Read and replicate parts of [Simple Exploration Notebook - Instacart](https://www.kaggle.com/sudalairajkumar/simple-exploration-notebook-instacart). (It's the Python Notebook with the most upvotes for this Kaggle competition.)- Take initiative and work on your own ideas! You can uncomment and run the cells below to re-download and extract the Instacart data
###Code
# !wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
# !tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
# %cd instacart_2017_05_01
###Output
_____no_output_____
###Markdown
Lending Club Options- There's one other column in the dataframe with percent signs. Remove them and convert to floats. You'll need to handle missing values.- Modify the emp_title column to replace titles with 'Other' if the title is not in the top 20.- Take initiatve and work on your own ideas! There's one other column in the dataframe with percent signs. Remove them and convert to floats. You'll need to handle missing values
###Code
df.head()
df['revol_util'].head()
df['revol_util'].value_counts()
df['revol_util'].isnull().value_counts()
df['revol_util'].dtypes
### Creating test variables
revol_util_examples = ['5%', '10%', np.NaN ]
### Defining the function
def revol_util_to_float(revol):
if isinstance(revol, str):
return float(revol.strip('%').strip())/100
else:
return np.NaN
[revol_util_to_float(item) for item in revol_util_examples]
### Applying to the column
df['revol_util'] = df['revol_util'].apply(revol_util_to_float)
### Checking
df['revol_util'].value_counts()
###Output
_____no_output_____
###Markdown
Modify the emp_title column to replace titles with 'Other' if the title is not in the top 20.
###Code
df['emp_title'].value_counts()[:20]
### Cleaning the data further let's start with nurses
nurse_examples = ['Rn', 'Registered Nurse', 'Nurse', 'Shaman']
# Define a function to combine the professions into registered nurse
def combine_nurse_titles(title):
if title == 'Rn' or title == 'Nurse':
return 'Registered Nurse'
else:
return title
[combine_nurse_titles(item) for item in nurse_examples]
### Applying it to the column
df['emp_title'] = df['emp_title'].apply(combine_nurse_titles)
df['emp_title'].value_counts()[:20]
### Create a list of the top 20 titles
top_titles = df['emp_title'].value_counts().index[:20].tolist()
print(top_titles)
### defining the function to parse titles
title_examples = ['Registered Nurse', 'Unknown', 'Manager', 'Bank Robber',]
def not_tops_to_other(title):
if title in top_titles:
return title
else:
return 'Other'
[not_tops_to_other(item) for item in title_examples]
### Applying to the column
df['emp_title'] = df['emp_title'].apply(not_tops_to_other)
df['emp_title'].value_counts()[:20]
###Output
_____no_output_____ |
3_time_split/altair figure.ipynb | ###Markdown
pat3 over nnranks
###Code
def pAt3_mask(ranks, nnranks):
pAt3 = list()
ci=list()
for i in range(1,251):
mask = nnranks>i
bootstrap = simple_bootstrap(ranks[mask], take=mask.sum())
mean = bootstrap.mean()
ci_ = simple_ci(bootstrap)
pAt3.append(mean)
ci.append(ci_)
return np.array(pAt3), np.array(ci)
import matplotlib.pyplot as plt
p = []
high=[]
low= []
n = []
nn = []
for count, name in enumerate(filenames):
#load
ranks = np.load('./processed_data/'+str(year)+'_'+name+'.npy')
pAt3, ci =pAt3_mask(ranks,nnranks)
p+= list(pAt3)
low += list(ci[:,0])
high += list(ci[:,1])
n += [niceNames[count]]*250
nn += list(range(1,251))
source = pd.DataFrame(columns=['pAt3','low', 'high', 'Algorithm', 'NN Rank'], data = np.array([p,low,high, n,nn]).T)
ch = alt.Chart(source).encode(
x=alt.X('NN Rank:Q', title='NN Rank'),
color=alt.Color('Algorithm:N',sort=niceNames)
).properties(height=200, width=200)
out = (ch.mark_line().encode(y=alt.Y('pAt3:Q',title='p@3'),) + ch.mark_area(opacity=0.2).encode(y='low:Q', y2='high:Q')).facet(
facet=alt.Facet('Algorithm',sort=alt.SortArray(niceNames)),
columns=3
).configure_axis(
#labelFontSize=14,
titleFontSize=14
).configure_header(
titleFontSize=14,
labelFontSize=14
)
out.save('./figures/pAt3_vs_nnrank.html')
out
###Output
_____no_output_____ |
_build/jupyter_execute/Lab5/Pandas.ipynb | ###Markdown
Pandas **CS1302 Introduction to Computer Programming**___ In this lab, we will analyze COVID19 data using a powerful package called [`pandas`](https://pandas.pydata.org/docs/user_guide/index.html). The package name comes from *panel data* and *Python for data analysis*. Loading CSV Files with Pandas [DATA.GOV.HK](https://data.gov.hk/en-data/dataset/hk-dh-chpsebcddr-novel-infectious-agent) provides an [API](https://data.gov.hk/en/help/api-spechistoricalAPI) to retrieve historical data on COVID-19 cases in Hong Kong. The following uses the `urlencode` function to create the url that links to a csv file containing probable and confirmed cases of COVID-19 by Aug 1st, 2020.
###Code
from urllib.parse import urlencode
url_data_gov_hk_get = 'https://api.data.gov.hk/v1/historical-archive/get-file'
url_covid_csv = 'http://www.chp.gov.hk/files/misc/enhanced_sur_covid_19_eng.csv'
time = '20200801-1204'
url_covid = url_data_gov_hk_get + '?' + urlencode({
'url': url_covid_csv,
'time': time
})
print(url_covid)
###Output
_____no_output_____
###Markdown
`urlencode` creates a string `'url=&time='` with some [special symbols encoded](https://www.w3schools.com/tags/ref_urlencode.ASP), e.g.:- `:` is replaced by `%3A`, and- `/` is replaced by `%2F`. **Exercise** Write a function `simple_encode` that takes in a string and return a string with `:` and `/` encoded as described above. *Hint:* Use the `replace` method of `str`.
###Code
def simple_encode(string):
'''Returns the string with : and / encoded to %3A and %2F respectively.'''
# YOUR CODE HERE
raise NotImplementedError()
# tests
assert simple_encode(
'http://www.chp.gov.hk/files/misc/enhanced_sur_covid_19_eng.csv'
) == 'http%3A%2F%2Fwww.chp.gov.hk%2Ffiles%2Fmisc%2Fenhanced_sur_covid_19_eng.csv'
###Output
_____no_output_____
###Markdown
Like the function `open` that loads a file into memory, `pandas` has a function `read_csv` that loads a csv file. The csv file can even reside on the web.
###Code
import pandas as pd
df_covid = pd.read_csv(url_covid)
print(type(df_covid))
df_covid
###Output
_____no_output_____
###Markdown
The above creates a [`DataFrame` object](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html?highlight=dataframepandas.DataFrame). The content of the csv file is displayed as an HTML table conveniently. (We can control how much information to show by setting the [display options](https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html).) **Exercise** Using the function `pd.read_csv`, load `building_list_eng.csv` as `df_building` from the url `url_building`.
###Code
url_building_csv = 'http://www.chp.gov.hk/files/misc/building_list_eng.csv'
time = '20200801-1203'
url_building = url_data_gov_hk_get + '?' + urlencode({
'url': url_building_csv,
'time': time
})
# YOUR CODE HERE
raise NotImplementedError()
df_building
# tests
assert all(df_building.columns == ['District', 'Building name', 'Last date of residence of the case(s)',
'Related probable/confirmed cases']) # check column names
###Output
_____no_output_____
###Markdown
Selecting and Removing columns We can obtain the column labels of a `Dataframe` using its `columns` attribute.
###Code
df_covid.columns
###Output
_____no_output_____
###Markdown
Using the indexing operator `[]`, a column of a `DataFrame` can be returned as a [`Series` object](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.html), which is essentially a named array. We can further use the method `value_counts` to return the counts of different values in another `Series` object.
###Code
series_gender_counts = df_covid['Gender'].value_counts() # return the number of male and female cases
print(type(series_gender_counts))
series_gender_counts
###Output
_____no_output_____
###Markdown
**Exercise** For `df_building`, use the operator `[]` and method `value_counts` to assign `series_district_counts` to a `Series` object that stores the counts of buildings in different district.
###Code
# YOUR CODE HERE
raise NotImplementedError()
series_district_counts
# tests
assert all(series_district_counts[['Wong Tai Sin', 'Kwun Tong']] == [313, 212])
###Output
_____no_output_____
###Markdown
In `df_covid`, it appears that the column `Name of hospital admitted` contains no information. We can confirm this by:1. Returning the column as a `Series` with `df_covid_cases['Name of hospital admitted']`, and1. printing an array of unique column values using the method `unique`.
###Code
df_covid['Name of hospital admitted'].unique()
###Output
_____no_output_____
###Markdown
**Exercise** Drop the column `Name of hospital admitted` using the `drop` method of the DataFrame. Use the keyword argument `inplace=True`, so that the method will - mutate the original DataFrame in place instead of - creating a copy of the DataFrame with the column dropped.
###Code
# YOUR CODE HERE
raise NotImplementedError()
df_covid
# tests
assert all(df_covid.columns == ['Case no.', 'Report date', 'Date of onset', 'Gender', 'Age',
'Hospitalised/Discharged/Deceased', 'HK/Non-HK resident',
'Case classification*', 'Confirmed/probable'])
###Output
_____no_output_____
###Markdown
Selecting Rows of DataFrame We can select the confirmed male cases using the attribute [`.loc`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html) and the indexing operator `[]`. `.loc` implements an advanced indexing method `__getitem__` that can take a boolean vector.
###Code
df_confirmed_male = df_covid.loc[(df_covid['Confirmed/probable']=='Confirmed') & (df_covid['Gender']=='M')]
df_confirmed_male
###Output
_____no_output_____
###Markdown
**Exercise** Assign `df_confirmed_local` to a `DataFrame` of confirmed cases that are local or epidemiologically linked with a local case.
###Code
# YOUR CODE HERE
raise NotImplementedError()
df_confirmed_local
# tests
assert set(df_confirmed_local['Case classification*'].unique()) == {
'Epidemiologically linked with local case', 'Local case'
}
###Output
_____no_output_____
###Markdown
Challenge **Exercise** Write a function `case_counts` that - takes an argument `district`, and- returns the number of cases in `district`. *Hint:* Be careful that there can be more than one case for each building and there may be multiple buildings associated with one case. You may want to use the `split` and `strip` methods of `str` to obtain a list of cases from the `Dataframe`.
###Code
def case_counts(district):
# YOUR CODE HERE
raise NotImplementedError()
# tests
assert case_counts('Kwai Tsing') == 109
###Output
_____no_output_____ |
notebooks/pySUMMA_Test_Case_3_Evapotranspiration.ipynb | ###Markdown
Modeling the Impact of Lateral Flow Parameterizations on Total Evapotranspiration in the Reynolds Mountain East catchment using pySUMMA 1. Introduction One part of the Clark et al. (2015) study explored the impact of the lateral flux of liquid water on total evapotranspiration (ET) using a SUMMA model for the Reynolds Mountain East catchment. This study looked at the sensitivity of the different model representation of the lateral flux of liquid water, which determines the availability of soil water.In this Jupyter Notebook, the pySUMMA library is used to reproduce this analysis. First, the latertal flux from the soil profile are described. Next, the Methods section describes how the pySUMMA can be used to create three different lateral model representation of the Reynolds Mountain East catchment model, 1d Richards', lumped topmodel, and distributed topmodel. The Results section shows how to use pySUMMA and the Pandas library to reproduce Figure 8(right) from Clark et al. (2015).Collectively, this Jupyter Notebook serves as an example of how hydrologic modeling can be conducted directly within a Jupyter Notebook by leveraging the pySUMMA library. | Method | 1dRichards' | Lumped Topmodel | Distributed Topmodel | |---------------------------------------------|-------------|-------------------|------------------------| | groundwater parameterization | noXplict | qTopmodl | qTopmodl | | hydraulic conductivity profile | constant | pow_prof | pow_prof | |lower boundary condition for soil hydrology | drainage | zeroFlux | zeroFlux | |thermal conductivity representation for soil | mixConstit | funcSoilWet | funcSoilWet | 2. Background The Transpiration from soil layers available in SUMMA
###Code
#import libraries to display equations within the notebook
from IPython.display import display, Math, Latex
###Output
_____no_output_____
###Markdown
Latertal flux from the soil profile The soil columns can be hydrologically connected, such that the lateral flux from upslope soil columns is the inflow to downslope soil columns, or hydrologically-disconnected (using one or many soil columns), in which case the lateral flux of water from soil columns is assumed to flow directly into the river network. The continuity equation for sub-surface storage (i.e., below the water table) can be written for a given model element as [Wigmosta et al., 1994]\begin{equation*}Q_{dr} = \frac{dz_{wt}}{dt} = \frac{Q_{out}-Q_{in}}{A} - q_{rchg}\end{equation*}$Q_{dr} = (\theta_{sat}^{soil} - \theta_{fc}^{soil}) $ : “drainable” porosity, $\theta_{fc}^{soil}$ : the field capacity of soil, $z_{wt}$ $(m)$ : the depth to the water table$Q_{out}$ and $Q_{in}$ $(m^{3}/s)$: the lateral inflow and outflow, $q_{rchg}$ $(m/s)$ : the vertical recharge rate, $A$ $(m^2)$ : the element area Storage-based implementation to represent lateral flow between soil columns The “drainable” water storage and the maximum drainable water storage can be given as\begin{equation*}W_{dr}^{soil} = \int_{z_{crit}}^{z_{soil}}\ [\theta_{liq}^{soil} (z) - \theta_{fc}^{soil} ] \mathrm{d}z, \ W_{dr,max}^{soil} = \phi_{dr}z_{soil}\end{equation*}$\theta_{liq}^{soil} (z)$ : the volumetric liquid water content at soil depth z, $z_{crit}$ : the lowest point in the soil profile where $\theta_{liq}^{soil}$ < $\theta_{fc}^{soil}$ The total lateral outflow \begin{equation*}Q_{out} = x_{len}tan(\beta) \frac{K_{sat}^{0} W_{dr,max}^{soil}}{\phi_{dr}n_{sf}}[\frac{W_{dr}^{soil}}{W_{dr,max}^{soil}}]^{n_{sf}}\end{equation*}$\beta$ : the gradient in the land surface, used to approximate the water table gradient The total lateral flux \begin{equation*}q_{base}^{soil} = \frac{Q_{out}-Q_{in}}{A}\end{equation*}The total lateral flux $q_{base}^{soil}$ can then be apportioned to individual soil layers, obtained after spatial discretization described in Clark et al. [2015b], to provide the lateral flow sink term \begin{equation*}(S_{lf})_{j} = (w_{tv})_{j} q_{base}^{soil}\end{equation*}$(w_{tv})_{j}$ : the ratio of the transmissivity of the $j$-th layer to the total transmissivity The above descriptions are taken from the lateral flux from the soil profile section(3.2.3.5) within the manual Structure for Unifying Multiple Modeling Alternatives (SUMMA), Version 1.0: Technical Description (April, 2015). 3. Methods 1) Study Area The Reynolds Mountain East catchment is located in southwestern Idaho as shown in the figure below.
###Code
from ipyleaflet import Map, GeoJSON
import json
m = Map(center=[43.06745, -116.75489], zoom=15)
with open('reynolds_geojson_latlon.geojson') as f:
data = json.load(f)
g = GeoJSON(data=data)
m.add_layer(g)
m
###Output
_____no_output_____
###Markdown
2) Download TestCases from HS and Installation to prepare SUMMA simulation
###Code
# To authenticate using HTTP Basic authentication.
from hs_restclient import HydroShare, HydroShareAuthBasic
# import utils.py to download TestCases from HS, unzip and installation
from pysumma.utils import utils
# define directory where you save SUMMA TestCases
save_filepath = '/media/sf_pysumma'
# call install_test_cases_hs method to download TestCase from HS, unzip and install the TestCase.
hs_path = utils.install_test_cases_hs(save_filepath)
# if you already have TestCase in your local, you can skip previous statement
hs_path = save_filepath+'/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents'
###Output
_____no_output_____
###Markdown
3) Create pySUMMA Simulation Object of 1d Richards method and Run SUMMA Model
###Code
from pysumma.Simulation import Simulation
from pysumma.Plotting import Plotting
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S_1dRichards = Simulation(hs_path + '/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_1dRichards.txt')
# set SUMMA executable file
excutable = '/media/sf_pysumma/a5dbd5b198c9468387f59f3fefc11e22/a5dbd5b198c9468387f59f3fefc11e22/data/contents/summa-master/bin'
S_1dRichards.executable = excutable +'/summa.exe'
# check the simulation start and finish times
S_1dRichards.decision_obj.simulStart.value, S_1dRichards.decision_obj.simulFinsh.value
# check option and selected method of (11) choice of groundwater parameterization in Decision file
S_1dRichards.decision_obj.groundwatr.options, S_1dRichards.decision_obj.groundwatr.value
# check option and selected method of (12) choice of hydraulic conductivity profile in Decision file
S_1dRichards.decision_obj.hc_profile.options, S_1dRichards.decision_obj.hc_profile.value
# check option and selected method of (16) type of lower boundary condition for soil hydrology in Decision file
S_1dRichards.decision_obj.bcLowrSoiH.options, S_1dRichards.decision_obj.bcLowrSoiH.value
# check option and selected method of (27) choice of thermal conductivity representation for soil in Decision file
S_1dRichards.decision_obj.thCondSoil.options, S_1dRichards.decision_obj.thCondSoil.value
# check Basin variable meta data in file manager file
S_1dRichards.meta_basinvar.filename
# check Basin Parameter info data in file manager file
S_1dRichards.basin_par.filename
# check Forcing list data in file manager file
S_1dRichards.forcing_list.filename
# check Initial condition data in file manager file
S_1dRichards.initial_cond.filename
# run the model giving the output the suffix "1dRichards_docker_develop" and get "results_1dRichards" object
results_1dRichards, output_R = S_1dRichards.execute(run_suffix="1dRichards_hs", run_option = 'local')
###Output
file_suffix is '1dRichards_hs'.
file_master is '/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_1dRichards.txt'.
start at 13:56:04
Name of Model Output control file: /media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/meta/Model_Output.txt
decisions file = /media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zDecisions_1dRichards.txt
1 simulStart: 2002-07-01 00:00
2 simulFinsh: 2008-09-30 00:00
3 soilCatTbl: ROSETTA
4 vegeParTbl: USGS
5 soilStress: NoahType
6 stomResist: BallBerry
7 num_method: itertive
8 fDerivMeth: analytic
9 LAI_method: specified
10 f_Richards: mixdform
11 groundwatr: noXplict
12 hc_profile: constant
13 bcUpprTdyn: nrg_flux
14 bcLowrTdyn: zeroFlux
15 bcUpprSoiH: liq_flux
16 bcLowrSoiH: drainage
17 veg_traits: CM_QJRMS1988
18 canopyEmis: difTrans
19 snowIncept: lightSnow
20 windPrfile: logBelowCanopy
21 astability: louisinv
22 canopySrad: BeersLaw
23 alb_method: varDecay
24 compaction: anderson
25 snowLayers: CLM_2010
26 thCondSnow: jrdn1991
27 thCondSoil: mixConstit
28 spatial_gw: localColumn
29 subRouting: timeDlay
startTime: iyyy, im, id, ih, imin = 2002 7 1 0 0
finshTime: iyyy, im, id, ih, imin = 2008 9 30 0 0
/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zLocalParamInfo-2mRoots.txt
/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zBasinParamInfo.txt
Skipping over SLTYPE = STAS
Skipping over SLTYPE = STAS-RUC
hruIndex
frozenPrecipMultip
theta_mp
theta_sat
theta_res
vGn_alpha
vGn_n
f_impede
k_soil
k_macropore
critSoilWilting
critSoilTranspire
winterSAI
summerLAI
heightCanopyTop
heightCanopyBottom
kAnisotropic
zScale_TOPMODEL
qSurfScale
fieldCapacity
Created output file:/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/output/wrrPaperTestCases/figure09/basinRunoff_output_1dRichards_hs_timestep.nc
initial date/time = 2018-07-18 13:56:04.592
final date/time = 2018-07-18 13:57:25.654
elapsed init = 0.3830000 s
fraction init = 4.7247786E-03 s
elapsed read = 1.601000 s
fraction read = 1.9750315E-02 s
elapsed write = 25.36300 s
fraction write = 0.3128840 s
elapsed physics = 53.58000 s
fraction physics = 0.6609755 s
elapsed time = 81.06200 s
or 1.351033 m
or 2.2517222E-02 h
or 9.3821759E-04 d
number threads = 1
FORTRAN STOP: finished simulation successfully.
###Markdown
4) Create pySUMMA Simulation Object of Lumped Topmodel method and Run SUMMA Model
###Code
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S_lumpedTopmodel = Simulation(hs_path + '/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_lumpedTopmodel.txt')
# set SUMMA executable file
excutable = '/media/sf_pysumma/a5dbd5b198c9468387f59f3fefc11e22/a5dbd5b198c9468387f59f3fefc11e22/data/contents/summa-master/bin'
S_lumpedTopmodel.executable = excutable +'/summa.exe'
# check the simulation start and finish times
S_lumpedTopmodel.decision_obj.simulStart.value, S_lumpedTopmodel.decision_obj.simulFinsh.value
# check option and selected method of (11) choice of groundwater parameterization in Decision file
S_lumpedTopmodel.decision_obj.groundwatr.options, S_lumpedTopmodel.decision_obj.groundwatr.value
# check option and selected method of (12) choice of hydraulic conductivity profile in Decision file
S_lumpedTopmodel.decision_obj.hc_profile.options, S_lumpedTopmodel.decision_obj.hc_profile.value
# check option and selected method of (16) type of lower boundary condition for soil hydrology in Decision file
S_lumpedTopmodel.decision_obj.bcLowrSoiH.options, S_lumpedTopmodel.decision_obj.bcLowrSoiH.value
# check option and selected method of (27) choice of thermal conductivity representation for soil in Decision file
S_lumpedTopmodel.decision_obj.thCondSoil.options, S_lumpedTopmodel.decision_obj.thCondSoil.value
# check Basin variable meta data in file manager file
S_lumpedTopmodel.meta_basinvar.filename
# check Basin Parameter info data in file manager file
S_lumpedTopmodel.basin_par.filename
# check Forcing list data in file manager file
S_lumpedTopmodel.forcing_list.filename
# check Initial condition data in file manager file
S_lumpedTopmodel.initial_cond.filename
# run the model giving the output the suffix "lumpedTopmodel_docker_develop" and get "results_lumpedTopmodel" object
results_lumpedTopmodel, output_LT = S_lumpedTopmodel.execute(run_suffix="lumpedTopmodel_hs", run_option = 'local')
###Output
file_suffix is 'lumpedTopmodel_hs'.
file_master is '/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_lumpedTopmodel.txt'.
start at 13:57:30
Name of Model Output control file: /media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/meta/Model_Output.txt
decisions file = /media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zDecisions_lumpedTopmodel.txt
1 simulStart: 2001-07-01 00:00
2 simulFinsh: 2008-09-30 00:00
3 soilCatTbl: ROSETTA
4 vegeParTbl: USGS
5 soilStress: NoahType
6 stomResist: BallBerry
7 num_method: itertive
8 fDerivMeth: analytic
9 LAI_method: specified
10 f_Richards: mixdform
11 groundwatr: qTopmodl
12 hc_profile: pow_prof
13 bcUpprTdyn: nrg_flux
14 bcLowrTdyn: zeroFlux
15 bcUpprSoiH: liq_flux
16 bcLowrSoiH: zeroFlux
17 veg_traits: CM_QJRMS1988
18 canopyEmis: difTrans
19 snowIncept: lightSnow
20 windPrfile: logBelowCanopy
21 astability: louisinv
22 canopySrad: BeersLaw
23 alb_method: varDecay
24 compaction: anderson
25 snowLayers: CLM_2010
26 thCondSnow: jrdn1991
27 thCondSoil: funcSoilWet
28 spatial_gw: localColumn
29 subRouting: timeDlay
startTime: iyyy, im, id, ih, imin = 2001 7 1 0 0
finshTime: iyyy, im, id, ih, imin = 2008 9 30 0 0
/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zLocalParamInfo.txt
/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zBasinParamInfo.txt
Skipping over SLTYPE = STAS
Skipping over SLTYPE = STAS-RUC
hruIndex
frozenPrecipMultip
theta_mp
theta_sat
theta_res
vGn_alpha
vGn_n
f_impede
k_soil
k_macropore
critSoilWilting
critSoilTranspire
winterSAI
summerLAI
heightCanopyTop
heightCanopyBottom
kAnisotropic
zScale_TOPMODEL
qSurfScale
fieldCapacity
Created output file:/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/output/wrrPaperTestCases/figure09/basinRunoff_output_lumpedTopmodel_hs_timestep.nc
initial date/time = 2018-07-18 13:57:30.940
final date/time = 2018-07-18 13:59:39.542
elapsed init = 1.097000 s
fraction init = 8.5301939E-03 s
elapsed read = 2.530000 s
fraction read = 1.9673100E-02 s
elapsed write = 42.70800 s
fraction write = 0.3320944 s
elapsed physics = 82.02300 s
fraction physics = 0.6378050 s
elapsed time = 128.6020 s
or 2.143367 m
or 3.5722778E-02 h
or 1.4884491E-03 d
number threads = 1
FORTRAN STOP: finished simulation successfully.
###Markdown
5) Create pySUMMA Simulation Object of Distributed Topmodel method and Run SUMMA Model
###Code
# create a pySUMMA simulation object using the SUMMA 'file manager' input file
S_distributedTopmodel = Simulation(hs_path + '/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_distributedTopmodel.txt')
# set SUMMA executable file
excutable = '/media/sf_pysumma/a5dbd5b198c9468387f59f3fefc11e22/a5dbd5b198c9468387f59f3fefc11e22/data/contents/summa-master/bin'
S_distributedTopmodel.executable = excutable +'/summa.exe'
# check the simulation start and finish times
S_distributedTopmodel.decision_obj.simulStart.value, S_distributedTopmodel.decision_obj.simulFinsh.value
# check option and selected method of (11) choice of groundwater parameterization in Decision file
S_distributedTopmodel.decision_obj.groundwatr.options, S_distributedTopmodel.decision_obj.groundwatr.value
# check option and selected method of (12) choice of hydraulic conductivity profile in Decision file
S_distributedTopmodel.decision_obj.hc_profile.options, S_distributedTopmodel.decision_obj.hc_profile.value
# check option and selected method of (16) type of lower boundary condition for soil hydrology in Decision file
S_distributedTopmodel.decision_obj.bcLowrSoiH.options, S_distributedTopmodel.decision_obj.bcLowrSoiH.value
# check option and selected method of (27) choice of thermal conductivity representation for soil in Decision file
S_distributedTopmodel.decision_obj.thCondSoil.options, S_distributedTopmodel.decision_obj.thCondSoil.value
# check Basin variable meta data in file manager file
S_distributedTopmodel.meta_basinvar.filename
# check Basin Parameter info data in file manager file
S_distributedTopmodel.basin_par.filename
# check Forcing list data in file manager file
S_distributedTopmodel.forcing_list.filename
# check Initial condition data in file manager file
S_distributedTopmodel.initial_cond.filename
# run the model giving the output the suffix "distributedTopmodel_docker_develop" and get "results_distributedTopmodel" object
results_distributedTopmodel, output_DT = S_distributedTopmodel.execute(run_suffix="distributedTopmodel_hs", run_option = 'local')
###Output
file_suffix is 'distributedTopmodel_hs'.
file_master is '/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_fileManager_distributedTopmodel.txt'.
start at 13:59:45
Name of Model Output control file: /media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/meta/Model_Output.txt
decisions file = /media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zDecisions_distributedTopmodel.txt
1 simulStart: 2001-07-01 00:00
2 simulFinsh: 2008-09-30 00:00
3 soilCatTbl: ROSETTA
4 vegeParTbl: USGS
5 soilStress: NoahType
6 stomResist: BallBerry
7 num_method: itertive
8 fDerivMeth: analytic
9 LAI_method: specified
10 f_Richards: mixdform
11 groundwatr: qTopmodl
12 hc_profile: pow_prof
13 bcUpprTdyn: nrg_flux
14 bcLowrTdyn: zeroFlux
15 bcUpprSoiH: liq_flux
16 bcLowrSoiH: zeroFlux
17 veg_traits: CM_QJRMS1988
18 canopyEmis: difTrans
19 snowIncept: lightSnow
20 windPrfile: logBelowCanopy
21 astability: louisinv
22 canopySrad: BeersLaw
23 alb_method: varDecay
24 compaction: anderson
25 snowLayers: CLM_2010
26 thCondSnow: jrdn1991
27 thCondSoil: funcSoilWet
28 spatial_gw: localColumn
29 subRouting: timeDlay
startTime: iyyy, im, id, ih, imin = 2001 7 1 0 0
finshTime: iyyy, im, id, ih, imin = 2008 9 30 0 0
/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zLocalParamInfo.txt
/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/settings/wrrPaperTestCases/figure09/summa_zBasinParamInfo.txt
Skipping over SLTYPE = STAS
Skipping over SLTYPE = STAS-RUC
hruIndex
frozenPrecipMultip
theta_mp
theta_sat
theta_res
vGn_alpha
vGn_n
f_impede
k_soil
k_macropore
critSoilWilting
critSoilTranspire
winterSAI
summerLAI
heightCanopyTop
heightCanopyBottom
kAnisotropic
zScale_TOPMODEL
qSurfScale
fieldCapacity
Created output file:/media/sf_pysumma/a0105d479c334764ba84633c5b9c1c01/a0105d479c334764ba84633c5b9c1c01/data/contents/summaTestCases_2.x/output/wrrPaperTestCases/figure09/basinRunoff_output_distributedTopmodel_hs_timestep.nc
initial date/time = 2018-07-18 13:59:45.349
final date/time = 2018-07-18 14:10:25.376
elapsed init = 0.9990000 s
fraction init = 1.5608717E-03 s
elapsed read = 4.956000 s
fraction read = 7.7434233E-03 s
elapsed write = 181.1990 s
fraction write = 0.2831115 s
elapsed physics = 452.5280 s
fraction physics = 0.7070452 s
elapsed time = 640.0270 s
or 10.66712 m
or 0.1777853 h
or 7.4077199E-03 d
number threads = 1
FORTRAN STOP: finished simulation successfully.
###Markdown
4. Results Recreate the Figure 8(right) plot from Clark et al., 2015: The total ET Sensitivity for the model representation of the lateral flux of liquid water
###Code
from pysumma.Plotting import Plotting
from jupyterthemes import jtplot
import matplotlib.pyplot as plt
import pandas as pd
jtplot.figsize(x=10, y=10)
###Output
_____no_output_____
###Markdown
4.1) Create function to calculate Total ET of hour of day from SUMMA output for the period 1 June to 20 August 2007
###Code
def calc_total_et(et_output_df):
# Total Evapotranspiration = Canopy Transpiration + Canopy Evaporation + Ground Evaporation
# Change unit from kgm-2s-1 to mm/hr (mulpitle 3600)
total_et_data = (et_output_df['scalarCanopyTranspiration'] + et_output_df['scalarCanopyEvaporation'] + et_output_df['scalarGroundEvaporation'])*3600
# create dates(X-axis) attribute from ouput netcdf
dates = total_et_data.coords['time'].data
# create data value(Y-axis) attribute from ouput netcdf
data_values = total_et_data.data
# create two dimensional tabular data structure
total_et_df = pd.DataFrame(data_values, index=dates)
# round time to nearest hour (ex. 2006-10-01T00:59:59.99 -> 2006-10-01T01:00:00)
total_et_df.index = total_et_df.index.round("H")
# set the time period to display plot
total_et_df = total_et_df.loc["2007-06-01":"2007-08-20"]
# resample data by the average value hourly
total_et_df_hourly = total_et_df.resample("H").mean()
# resample data by the average for hour of day
total_et_by_hour = total_et_df_hourly.groupby(total_et_df_hourly.index.hour).mean()
return total_et_by_hour
###Output
_____no_output_____
###Markdown
4.2) Get hour of day output of the Parameterization of Later Flux of Liquid water for the period 1 June to 20 August 2007
###Code
# get hour of day output using calc_total_et method (1d Richards method appied 1 hru)
hour_1dRichards = calc_total_et(results_1dRichards)
# get hour of day output using calc_total_et method (lumped Topmodel method appied 1 hru)
hour_lumpedTopmodel = calc_total_et(results_lumpedTopmodel)
# get hour of day output using calc_total_et method (lumped Topmodel method appied 6 hru)
hour_distributedTopmodel = calc_total_et(results_distributedTopmodel)
# check the area of each hru to calculate areal average ET
trial_parameter_nc = Plotting(S_distributedTopmodel.setting_path.filepath+S_distributedTopmodel.local_attr.value)
trial_parameter = trial_parameter_nc.open_netcdf()
# read the area of each hru
trial_parameter['HRUarea']
# calculate areal average ET for distributed Topmodel
hour_distributedTopmodel_average = (hour_distributedTopmodel[0]*78300 + hour_distributedTopmodel[1]*32700 + hour_distributedTopmodel[2]*18600 + hour_distributedTopmodel[3]*32800 + hour_distributedTopmodel[4]*168200 + hour_distributedTopmodel[5]*45400)/(78300+32700+18600+32800+168200+45400)
###Output
_____no_output_____
###Markdown
4.3) Combine the Parameterization of the Lateral Flux of Liquid Water into a single Pandas Dataframe
###Code
# Combine ET for model representation of the lateral flux of liquid water
ET_Combine = pd.concat([hour_1dRichards, hour_lumpedTopmodel, hour_distributedTopmodel_average], axis=1)
# add label
ET_Combine.columns = ["Baseflow = 1D Richards'", 'Baseflow = Topmodel(lumped)', 'Baseflow = Topmodel(distributed)']
ET_Combine
###Output
_____no_output_____
###Markdown
4.4) Add obervation data in Aspen station in Reynolds Mountain East to the plot
###Code
# create pySUMMA Plotting Object
Val_eddyFlux = Plotting(hs_path + '/summaTestCases_2.x/testCases_data/validationData/ReynoldsCreek_eddyFlux.nc')
# read Total Evapotranspiration(LE-wpl) from validation netcdf file
Obs_Evapotranspitaton = Val_eddyFlux.ds['LE-wpl']
# create dates(X-axis) attribute from validation netcdf file
dates = Obs_Evapotranspitaton.coords['time'].data
# Change unit from Wm-2 to mm/hr (1 Wm-2 = 0.0864 MJm-2day-1, 1 MJm-2day-1 = 0.408 mmday-1, 1day = 24h)
data_values = Obs_Evapotranspitaton.data*0.0864*0.408/24
# create two dimensional tabular data structure
df = pd.DataFrame(data_values, index=dates)
# set the time period to display plot
df_filt = df.loc["2007-06-01":"2007-08-20"]
# select aspen obervation station among three different stations
df_filt.columns = ['-','Observation (aspen)','-']
# resample data by the average for hour of day
df_gp_hr = df_filt.groupby([df_filt.index.hour, df_filt.index.minute]).mean()
# reset index so each row has an hour an minute column
df_gp_hr.reset_index(inplace=True)
# add hour and minute columns for plotting
xvals = df_gp_hr.reset_index()['level_0'] + df_gp_hr.reset_index()['level_1']/60.
###Output
_____no_output_____
###Markdown
4.5) Plotting output of the Parameterization of the Lateral Flux of Liquid Water and observation data
###Code
# create plot with the Parameterization of model representation of the lateral flux of liquid water
ET_Combine_Graph = ET_Combine.plot(legend=False)
# invert y axis
ET_Combine_Graph.invert_yaxis()
ET_Combine_Graph.plot(ET_Combine["Baseflow = 1D Richards'"],color='b', marker='^')
ET_Combine_Graph.plot(ET_Combine['Baseflow = Topmodel(lumped)'], color='g', marker='o')
ET_Combine_Graph.plot(ET_Combine['Baseflow = Topmodel(distributed)'], color='y', marker='d')
ET_Combine_Graph.tick_params(labelsize = 15)
# plot scatter with x='xvals', y='Observation (aspen)'
ET_Combine_Graph.scatter(xvals, df_gp_hr['Observation (aspen)'], color='black')
# add x, y label
ET_Combine_Graph.set_xlabel("Time of day (hr)", fontsize=18)
ET_Combine_Graph.set_ylabel("Total evapotranspiration (mm h-1)", fontsize=18)
handles, labels = ET_Combine_Graph.get_legend_handles_labels()
# show up the legend
ET_Combine_Graph.legend(handles[3:7], labels[3:7])
###Output
_____no_output_____
###Markdown
4.6) Validation between the observation and simulation data.
###Code
from sklearn.metrics import mean_absolute_error, mean_squared_error
from math import sqrt
from pysumma.Validation import validation
# defind simulation data
richard_simulation = ET_Combine ["Baseflow = 1D Richards'"]
lumped_simulation = ET_Combine ['Baseflow = Topmodel(lumped)']
distributed_simulation = ET_Combine ['Baseflow = Topmodel(distributed)']
# defind observation data
obs = df_gp_hr.groupby('level_0').mean()
observation_data = obs['Observation (aspen)']
# analyze validtation between BallBerry simulation and observation data.
validation.analysis(observation_data, richard_simulation)
# analyze validtation between BallBerry simulation and observation data.
validation.analysis(observation_data, lumped_simulation)
# analyze validtation between BallBerry simulation and observation data.
validation.analysis(observation_data, distributed_simulation)
###Output
Mean Absolute Error: 0.021015
Mean Squared Error: 0.000966
Root Mean Squared Error: 0.031081
###Markdown
5. Discussion As stated in Clark et al., 2015, the following insights can be gained from this analysis:* The simulation in Figure 8 illustrates the model representation of the lateral flux of liquid water, which determines (in part) the availability of soil water.* The results in Figure 8 demonstrate strong sensitivities the lateral flow parameterization. The parameterizations based on power-law transmissivity profiles (both lumped and distributed) have more drainage of soil water at deeper soil layers; however, the distributed simulations include inflow from upslope, resulting in more plant-available soil water and an increase in transpiration. Taken together, the results in Figure 8 illustrate the strong interdependencies among different modeling decisions, which of course complicate discriminating among competing process parameterizations. 6. Post notebook and simulation results back to Hydroshare
###Code
from pysumma.hydroshare import hydroshare
hs = hydroshare.hydroshare()
# write meta data such as abstract, title, keywords, rtype
abstract = 'output of SUMMA(Fig8, Right)'
title = 'output of SUMMA(Fig8, Right)'
keywords = ('SUMMA', 'Hydrologic Model')
rtype = 'GenericResource'
files = output_R, output_LT, output_DT
metadata = '[{"creator":{"name":"Youngdon Choi"}}, {"creator":{"name":"Jeffrey Sadler"}}]'
extra_metadata = '{"key-1": "value-1", "key-2": "value-2"}'
# post simulation results of simpleResistance back to HS
resource_id = hs.createHydroShareResource(title=title, content_files=files, keywords=keywords, abstract=abstract, resource_type='genericresource', public=False)
###Output
Resource Created Successfully
Successfully Added Content Files
###Markdown
Save this notebook file to add the notebook to rhe resource of summa output
###Code
# add a notebook to the resource of summa output
npath = save_filepath+'/pysumma/sopron_2018_notebooks/pySUMMA_Demo_Example_Fig8_right_Using_TestCase_from_Hydroshare.ipynb'
# check the resource id on HS that created.
hs.addContentToExistingResource(resource_id, [npath])
###Output
Successfully Added Content Files
###Markdown
7. make this resource public
###Code
# check the resource id
C_resource_id = '3087695efdec4ca7a36306d8153214aa'
# make a resource public
hs.setAccessRules(C_resource_id, public=True)
###Output
_____no_output_____ |
fine_tunning_covid-Copy1.ipynb | ###Markdown
Initialize and Reshape the Networks
###Code
model_ft = xrv.models.DenseNet(weights="nih")
set_parameter_requires_grad(model_ft, feature_extract)
model_ft.classifier = nn.Linear(1024, num_classes)
model_ft.pathologies = ['normal', "pneumonia", 'COVID-19']
model_ft.op_threshs = None
input_size = 224
# Print the model we just instantiated
# print(model_ft)
###Output
_____no_output_____
###Markdown
Load Data
###Code
# Data augmentation and normalization for training
# Just normalization for validation
std = 0.24671278988052675
mean = 0.4912771402827791
data_transforms = {
'train': transforms.Compose([
# transforms.RandomResizedCrop(input_size),
# transforms.RandomHorizontalFlip(),
# transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize([mean], [std]),
]),
'test': transforms.Compose([
# transforms.Resize(input_size),
# transforms.CenterCrop(input_size),
# transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize([mean], [std]),
]),
}
print("Initializing Datasets and Dataloaders...")
# # # Create training and validation datasets
# image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'test']}
# # Create training and validation dataloaders
# dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'test']}
tmp_dataset_train = dataset.COVID19_Dataset(list_img_train, lbl_train, transform = data_transforms['train'])
tmp_dataset_val = dataset.COVID19_Dataset(list_img_val, lbl_val, transform = data_transforms['test'])
# Create a sampler by samples weights
sampler = torch.utils.data.sampler.WeightedRandomSampler(
weights=tmp_dataset_train.samples_weights,
num_samples=tmp_dataset_train.len)
dataloaders_dict = {}
dataloaders_dict['train'] = torch.utils.data.DataLoader(tmp_dataset_train,
batch_size=batch_size,
sampler=sampler,
num_workers=4)
dataloaders_dict['test'] = torch.utils.data.DataLoader(tmp_dataset_val,
batch_size=batch_size,
num_workers=4)
print(tmp_dataset_train.classes)
print(1. / np.unique(np.array(lbl_train), return_counts=True)[1])
tmp_dataset_train.class_to_idx
# Send the model to GPU
model_ft = model_ft.to(device)
# Gather the parameters to be optimized/updated in this run. If we are
# finetuning we will be updating all parameters. However, if we are
# doing feature extract method, we will only update the parameters
# that we have just initialized, i.e. the parameters with requires_grad
# is True.
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
# Observe that all parameters are being optimized
optimizer_ft = optim.Adam(params_to_update, lr=lr, weight_decay=1e-5, amsgrad=True)
scheduler = StepLR(optimizer_ft, step_size=20, gamma=0.5)
# %tensorboard --logdir=runs --host 0.0.0.0
# weights = torch.Tensor(weights).to(device)
# Setup the loss fxn
criterion = nn.CrossEntropyLoss(weight=torch.Tensor(dataloaders_dict['train'].dataset.weight_class).to(device))
# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, scheduler, num_epochs=num_epochs, is_inception=(model_name=="inception"))
torch.save(model_ft.state_dict(), './xrayvision_nih_ft.pt')
###Output
_____no_output_____ |
qutip-notebooks-master/development/development-smesolve-tests.ipynb | ###Markdown
Development notebook: Tests for QuTiP's stochastic master equation solverCopyright (C) 2011 and later, Paul D. Nation & Robert J. JohanssonIn this notebook we test the qutip stochastic master equation solver (smesolve) with a few textbook examples taken from the book Quantum Optics, by Walls and Milburn, section 6.7..rendered_html { font-family: Liberation Serif;}.rendered_html h1 { font-family: Liberation Sans; margin: 0 0;}.rendered_html h2 { font-family: Liberation Sans; margin: 0 0;}
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from qutip import *
###Output
_____no_output_____
###Markdown
Photo-count detection TheoryStochastic master equation in Milburn's formulation$\displaystyle d\rho(t) = dN(t) \mathcal{G}[a] \rho(t) - dt \gamma \mathcal{H}[\frac{1}{2}a^\dagger a] \rho(t)$where$\displaystyle \mathcal{G}[A] \rho = \frac{A\rho A^\dagger}{\mathrm{Tr}[A\rho A^\dagger]} - \rho$$\displaystyle \mathcal{H}[A] \rho = \frac{1}{2}(A\rho + \rho A^\dagger - \mathrm{Tr}[A\rho + \rho A^\dagger] \rho) $and $dN(t)$ is a Poisson distributed increment with $E[dN(t)] = \gamma \langle a^\dagger a\rangle (t)dt$. Formulation in QuTiPIn QuTiP we write the stochastic master equation on the form (in the interaction picture, with no deterministic dissipation):$\displaystyle d\rho(t) = D_{1}[A]\rho(t) dt + D_{2}[A]\rho(t) dW$where $A = \sqrt{\gamma} a$, so we can identify$\displaystyle D_{1}[A]\rho(t) = - \frac{1}{2}\gamma \mathcal{H}[a^\dagger a] \rho(t)= -\gamma \frac{1}{2}\left( a^\dagger a\rho + \rho a^\dagger a - \mathrm{Tr}[a^\dagger a\rho + \rho a^\dagger a] \rho \right)= -\frac{1}{2}\left( A^\dagger A\rho + \rho A^\dagger A - \mathrm{Tr}[A^\dagger A\rho + \rho A^\dagger A] \rho \right)$$\displaystyle D_{2}[A]\rho(t) = \mathcal{G}[a] \rho = \frac{A\rho A^\dagger}{\mathrm{Tr}[A\rho A^\dagger]} - \rho$and $dW = dN(t)$and $A = \sqrt{\gamma} a$ is the collapse operator including the rate of the process as a coefficient in the operator. Reference solution: deterministic master equation
###Code
N = 10
w0 = 0.5 * 2 * np.pi
times = np.linspace(0, 15, 150)
dt = times[1] - times[0]
gamma = 0.25
A = 2.5
ntraj = 50
nsubsteps = 50
a = destroy(N)
x = a + a.dag()
H = w0 * a.dag() * a
#rho0 = coherent(N, 5)
rho0 = fock(N, 5)
c_ops = [np.sqrt(gamma) * a]
e_ops = [a.dag() * a, x]
result_ref = mesolve(H, rho0, times, c_ops, e_ops)
plot_expectation_values(result_ref);
###Output
_____no_output_____
###Markdown
Solve using stochastic master equation $\displaystyle D_{1}[a, \rho] = -\gamma \frac{1}{2}\left( a^\dagger a\rho + \rho a^\dagger a - \mathrm{Tr}[a^\dagger a\rho + \rho a^\dagger a] \right)\rightarrow - \frac{1}{2}(\{A^\dagger A\}_L + \{A^\dagger A\}_R)\rho_v + \mathrm{E}[(\{A^\dagger A\}_L + \{A^\dagger A\}_R)\rho_v]$ $\displaystyle D_{2}[A, \rho(t)] = \frac{A\rho A^\dagger}{\mathrm{Tr}[A\rho A^\dagger]} - \rho \rightarrow \frac{A_LA^\dagger_R \rho_v}{\mathrm{E}[A_LA^\dagger_R \rho_v]} - \rho_v$ Using QuTiP built-in photo-current detection functions for $D_1$ and $D_2$
###Code
result = photocurrent_mesolve(H, rho0, times, c_ops=[], sc_ops=c_ops, e_ops=e_ops,
ntraj=ntraj, nsubsteps=nsubsteps,
store_measurement=True, noise=1234)
plot_expectation_values([result, result_ref]);
for m in result.measurement:
plt.step(times, dt * m.real)
###Output
_____no_output_____
###Markdown
Solve problem again, with the same noise as the previous runphotocurrentmesolve does not take custom noise, but you can set the seed.
###Code
result = photocurrent_mesolve(H, rho0, times, c_ops=[], sc_ops=c_ops, e_ops=e_ops,
ntraj=ntraj, nsubsteps=nsubsteps, store_measurement=True, noise=1234)
plot_expectation_values([result, result_ref]);
for m in result.measurement:
plt.step(times, dt * m.real)
###Output
_____no_output_____
###Markdown
Homodyne detection
###Code
H = w0 * a.dag() * a + A * (a + a.dag())
result_ref = mesolve(H, rho0, times, c_ops, e_ops)
###Output
_____no_output_____
###Markdown
TheoryStochastic master equation for homodyne in Milburn's formulation$\displaystyle d\rho(t) = -i[H, \rho(t)]dt + \gamma\mathcal{D}[a]\rho(t) dt + dW(t) \sqrt{\gamma} \mathcal{H}[a] \rho(t)$where $\mathcal{D}$ is the standard Lindblad dissipator superoperator, and $\mathcal{H}$ is defined as above,and $dW(t)$ is a normal distributed increment with $E[dW(t)] = \sqrt{dt}$.In QuTiP format we have:$\displaystyle d\rho(t) = -i[H, \rho(t)]dt + D_{1}[A]\rho(t) dt + D_{2}[A]\rho(t) dW$where $A = \sqrt{\gamma} a$, so we can identify $\displaystyle D_{1}[A]\rho(t) = \gamma \mathcal{D}[a]\rho(t) = \mathcal{D}[A]\rho(t)$
###Code
L = liouvillian(H, c_ops=c_ops).data
def d1_rho_func(t, rho_vec):
return cy.spmv(L, rho_vec)
###Output
_____no_output_____
###Markdown
$\displaystyle D_{2}[A]\rho(t) = \sqrt{\gamma} \mathcal{H}[a]\rho(t) = A\rho + \rho A^\dagger - \mathrm{Tr}[A\rho + \rho A^\dagger] \rho\rightarrow (A_L + A_R^\dagger)\rho_v - \mathrm{Tr}[(A_L + A_R^\dagger)\rho_v] \rho_v$
###Code
n_sum = spre(c_ops[0]) + spost(c_ops[0].dag())
n_sum_data = n_sum.data
def d2_rho_func(t, rho_vec):
e1 = cy.cy_expect_rho_vec(n_sum_data, rho_vec, False)
out = np.zeros((1,len(rho_vec)),dtype=complex)
out += cy.spmv(n_sum_data, rho_vec) - e1 * rho_vec
return out
result = general_stochastic(ket2dm(rho0), times, d1=d1_rho_func, d2=d2_rho_func,
e_ops=[spre(op) for op in e_ops], ntraj=ntraj, solver="platen",
m_ops=[spre(a + a.dag())], dW_factors=[1/np.sqrt(gamma)],
nsubsteps=nsubsteps, store_measurement=True, map_func=parallel_map)
plot_expectation_values([result, result_ref]);
for m in result.measurement:
plt.plot(times, m[:, 0].real, 'b', alpha=0.025)
plt.plot(times, result_ref.expect[1], 'k', lw=2);
plt.ylim(-15, 15)
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0].real, 'r', lw=2);
###Output
_____no_output_____
###Markdown
Using QuTiP built-in homodyne detection functions for $D_1$ and $D_2$
###Code
result = smesolve(H, rho0, times, [], c_ops, e_ops, ntraj=ntraj, nsubsteps=nsubsteps, solver="pc-euler",
method='homodyne', store_measurement=True)
plot_expectation_values([result, result_ref]);
for m in result.measurement:
plt.plot(times, m[:, 0].real / np.sqrt(gamma), 'b', alpha=0.025)
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0].real / np.sqrt(gamma), 'r', lw=2);
plt.plot(times, result_ref.expect[1], 'k', lw=2)
###Output
_____no_output_____
###Markdown
Solve problem again, this time with a specified noise (from previous run)
###Code
result = smesolve(H, rho0, times, [], c_ops, e_ops, ntraj=ntraj, nsubsteps=nsubsteps, solver="pc-euler",
method='homodyne', store_measurement=True, noise=result.noise)
plot_expectation_values([result, result_ref]);
for m in result.measurement:
plt.plot(times, m[:, 0].real / np.sqrt(gamma), 'b', alpha=0.025)
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0].real / np.sqrt(gamma), 'r', lw=2);
plt.plot(times, result_ref.expect[1], 'k', lw=2)
###Output
_____no_output_____
###Markdown
Heterodyne detection
###Code
e_ops = [a.dag() * a, a + a.dag(), -1j * (a - a.dag())]
result_ref = mesolve(H, rho0, times, c_ops, e_ops)
###Output
_____no_output_____
###Markdown
Stochastic master equation for heterodyne in Milburn's formulation$\displaystyle d\rho(t) = -i[H, \rho(t)]dt + \gamma\mathcal{D}[a]\rho(t) dt + \frac{1}{\sqrt{2}} dW_1(t) \sqrt{\gamma} \mathcal{H}[a] \rho(t) + \frac{1}{\sqrt{2}} dW_2(t) \sqrt{\gamma} \mathcal{H}[-ia] \rho(t)$where $\mathcal{D}$ is the standard Lindblad dissipator superoperator, and $\mathcal{H}$ is defined as above,and $dW_i(t)$ is a normal distributed increment with $E[dW_i(t)] = \sqrt{dt}$.In QuTiP format we have:$\displaystyle d\rho(t) = -i[H, \rho(t)]dt + D_{1}[A]\rho(t) dt + D_{2}^{(1)}[A]\rho(t) dW_1 + D_{2}^{(2)}[A]\rho(t) dW_2$where $A = \sqrt{\gamma} a$, so we can identify $\displaystyle D_{1}[A]\rho = \gamma \mathcal{D}[a]\rho = \mathcal{D}[A]\rho$
###Code
#def d1_rho_func(A, rho_vec):
# return A[7] * rho_vec
L = liouvillian(H, c_ops=c_ops).data
def d1_rho_func(t, rho_vec):
return cy.spmv(L, rho_vec)
###Output
_____no_output_____
###Markdown
$D_{2}^{(1)}[A]\rho = \frac{1}{\sqrt{2}} \sqrt{\gamma} \mathcal{H}[a] \rho =\frac{1}{\sqrt{2}} \mathcal{H}[A] \rho =\frac{1}{\sqrt{2}}(A\rho + \rho A^\dagger - \mathrm{Tr}[A\rho + \rho A^\dagger] \rho)\rightarrow \frac{1}{\sqrt{2}} \left\{(A_L + A_R^\dagger)\rho_v - \mathrm{Tr}[(A_L + A_R^\dagger)\rho_v] \rho_v\right\}$$D_{2}^{(2)}[A]\rho = \frac{1}{\sqrt{2}} \sqrt{\gamma} \mathcal{H}[-ia] \rho = \frac{1}{\sqrt{2}} \mathcal{H}[-iA] \rho =\frac{-i}{\sqrt{2}}(A\rho - \rho A^\dagger - \mathrm{Tr}[A\rho - \rho A^\dagger] \rho)\rightarrow \frac{-i}{\sqrt{2}} \left\{(A_L - A_R^\dagger)\rho_v - \mathrm{Tr}[(A_L - A_R^\dagger)\rho_v] \rho_v\right\}$
###Code
n_sump = spre(c_ops[0]) + spost(c_ops[0].dag())
n_sump_data = n_sump.data/np.sqrt(2)
n_summ = spre(c_ops[0]) - spost(c_ops[0].dag())
n_summ_data = -1.0j*n_summ.data/np.sqrt(2)
def d2_rho_func(A, rho_vec):
out = np.zeros((2,len(rho_vec)),dtype=complex)
e1 = cy.cy_expect_rho_vec(n_sump_data, rho_vec, False)
out[0,:] += cy.spmv(n_sump_data, rho_vec) - e1 * rho_vec
e1 = cy.cy_expect_rho_vec(n_summ_data, rho_vec, False)
out[1,:] += cy.spmv(n_summ_data, rho_vec) - e1 * rho_vec
return out
#def d2_rho_func(t, rho_vec):
# e1 = cy.cy_expect_rho_vec(n_sum_data, rho_vec, False)
# out = np.zeros((1,len(rho_vec)),dtype=complex)
# out += cy.spmv(n_sum_data, rho_vec) - e1 * rho_vec
# return out
result = general_stochastic(ket2dm(rho0), times, d1=d1_rho_func, d2=d2_rho_func,
e_ops=[spre(op) for op in e_ops], solver="platen", # order=1
ntraj=ntraj, nsubsteps=nsubsteps, len_d2=2,
m_ops=[spre(a + a.dag()), (-1j)*spre(a - a.dag())],
dW_factors=[2/np.sqrt(gamma), 2/np.sqrt(gamma)],
store_measurement=True, map_func=parallel_map)
plot_expectation_values([result, result_ref])
for m in result.measurement:
plt.plot(times, m[:, 0].real, 'r', alpha=0.025)
plt.plot(times, m[:, 1].real, 'b', alpha=0.025)
plt.ylim(-20, 20)
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0].real, 'r', lw=2);
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,1].real, 'b', lw=2);
plt.plot(times, result_ref.expect[1], 'k', lw=2);
plt.plot(times, result_ref.expect[2], 'k', lw=2);
###Output
_____no_output_____
###Markdown
Using QuTiP built-in heterodyne detection functions for $D_1$ and $D_2$
###Code
result = smesolve(H, rho0, times, [], c_ops, e_ops, ntraj=ntraj, nsubsteps=nsubsteps, solver="milstein", # order=1
method='heterodyne', store_measurement=True)
plot_expectation_values([result, result_ref]);
for m in result.measurement:
plt.plot(times, m[:, 0, 0].real / np.sqrt(gamma), 'r', alpha=0.025)
plt.plot(times, m[:, 0, 1].real / np.sqrt(gamma), 'b', alpha=0.025)
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0,0].real / np.sqrt(gamma), 'r', lw=2);
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0,1].real / np.sqrt(gamma), 'b', lw=2);
plt.plot(times, result_ref.expect[1], 'k', lw=2);
plt.plot(times, result_ref.expect[2], 'k', lw=2);
###Output
_____no_output_____
###Markdown
Solve problem again, this time with a specified noise (from previous run)
###Code
result = smesolve(H, rho0, times, [], c_ops, e_ops, ntraj=ntraj, nsubsteps=nsubsteps, solver="milstein", # order=1
method='heterodyne', store_measurement=True, noise=result.noise)
plot_expectation_values([result, result_ref]);
for m in result.measurement:
plt.plot(times, m[:, 0, 0].real / np.sqrt(gamma), 'r', alpha=0.025)
plt.plot(times, m[:, 0, 1].real / np.sqrt(gamma), 'b', alpha=0.025)
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0,0].real / np.sqrt(gamma), 'r', lw=2);
plt.plot(times, np.array(result.measurement).mean(axis=0)[:,0,1].real / np.sqrt(gamma), 'b', lw=2);
plt.plot(times, result_ref.expect[1], 'k', lw=2);
plt.plot(times, result_ref.expect[2], 'k', lw=2);
plt.axis('tight')
plt.ylim(-25, 25);
###Output
_____no_output_____
###Markdown
Software version
###Code
from qutip.ipynbtools import version_table
version_table()
###Output
_____no_output_____ |
catboost/benchmarks/quality_benchmarks/notebooks/comparison_kdd98.ipynb | ###Markdown
Comparing algorithms on the Kdd98 datasetThe data is taken from [this source](https://kdd.ics.uci.edu/databases/kddcup98/kddcup98.html). For details on running the comparison, see [this article](https://github.com/catboost/benchmarks/blob/master/comparison_description.pdf).
###Code
from experiment import Experiment
from xgboost_experiment import XGBExperiment
from lightgbm_experiment import LGBExperiment
from catboost_experiment import CABExperiment
###Output
_____no_output_____
###Markdown
Load the dataset and set parameters for the experiment.
###Code
learning_task = 'classification'
dataset_path = '/media/pool_storage/tsnet-pools-converted/kdd98_part/'
n_estimators = 5000
max_hyperopt_evals = 50
experiment = Experiment(learning_task, train_path=dataset_path + 'train_full3',
test_path=dataset_path + 'test3', cd_path = dataset_path + 'train_full3.cd')
X_train, y_train, X_test, y_test, cat_cols = experiment.read_data()
###Output
_____no_output_____
###Markdown
Write a function that does the following:**Preprocess the dataset:**calculate counter values on the training set, transform cat features (for the `XGBoost` and `LightGBM` algorithms), and convert the data to the format of the algorithm. Do the same thing with all pairs after splitting them into cross validation folds.**Select the optimal number of trees for the algorithm with the default parameters:** for each fold and each number of trees, get the result of the algorithm trained on the other four folds, average the results for each number of trees, and choose the best one. **Assess the quality of the algorithm with the default parameters on a test dataset:** train the algorithm with the number of trees obtained in the previous step, and calculate the metric value on the test dataset.**Tune parameters for the algorithm using `Hyperopt`:** on each `Hyperopt` iteration of the algorithm, the best value of trees is selected and the metric for cross validation is calculated.**Show results on the test dataset for the algorithm with the tuned parameters:** train the algorithm with the optimal parameters and the number of trees obtained in the previous step, and calculate the metric value on the test dataset.
###Code
import numpy as np
def run_experiment(Experiment, title):
experiment = Experiment(learning_task, max_hyperopt_evals=max_hyperopt_evals,
n_estimators=n_estimators)
cv_pairs, (dtrain, dtest) = experiment.split_and_preprocess(X_train.copy(), y_train,
X_test.copy(), y_test,
cat_cols, n_splits=5)
default_cv_result = experiment.run_cv(cv_pairs)
experiment.print_result(default_cv_result, 'Default {} result on cv'.format(title))
print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
default_test_losses = []
for seed in range(5):
test_result = experiment.run_test(dtrain, dtest, X_test, params=default_cv_result['params'],
n_estimators=default_cv_result['best_n_estimators'], seed=seed)
default_test_losses.append(test_result['loss'])
print 'For seed=%d Test\'s %s : %.5f' % (seed, experiment.metric, default_test_losses[-1])
print '\nTest\'s %s mean: %.5f, Test\'s %s std: %.5f' % (experiment.metric, np.mean(default_test_losses),
experiment.metric, np.std(default_test_losses))
print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
print('Hyperopt iterations:\n\n')
tuned_cv_result = experiment.optimize_params(cv_pairs)
print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
experiment.print_result(tuned_cv_result, 'Tuned {} result on cv'.format(title))
print('\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
tuned_test_losses = []
for seed in range(5):
test_result = experiment.run_test(dtrain, dtest, X_test, params=tuned_cv_result['params'],
n_estimators=tuned_cv_result['best_n_estimators'], seed=seed)
tuned_test_losses.append(test_result['loss'])
print 'For seed=%d Test\'s %s : %.5f' % (seed, experiment.metric, tuned_test_losses[-1])
print '\nTest\'s %s mean: %.5f, Test\'s %s std: %.5f' % (experiment.metric, np.mean(tuned_test_losses),
experiment.metric, np.std(tuned_test_losses))
return np.mean(default_test_losses), np.mean(tuned_test_losses)
###Output
_____no_output_____
###Markdown
`XGBoost`
###Code
xgb_default_test_result, xgb_tuned_test_result = run_experiment(XGBExperiment, "XGBoost")
###Output
Default XGBoost result on cv:
logloss = 0.1975232
best_n_estimators = 14
params = {'colsample_bytree': 1.0, 'silent': 1, 'eval_metric': 'logloss', 'colsample_bylevel': 1.0, 'min_child_weight': 1, 'subsample': 1.0, 'eta': 0.3, 'objective': 'binary:logistic', 'alpha': 0, 'seed': 0, 'max_depth': 6, 'gamma': 0, 'lambda': 1}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Default XGBoost result on test:
For seed=0 Test's logloss : 0.197949
For seed=1 Test's logloss : 0.197949
For seed=2 Test's logloss : 0.197949
For seed=3 Test's logloss : 0.197949
For seed=4 Test's logloss : 0.197949
Test's logloss mean: 0.197949, Test's logloss std: 0.000000
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hyperopt iterations:
[1/50] eval_time=4295.28 sec current_logloss=0.195393 min_logloss=0.195393
[2/50] eval_time=4436.20 sec current_logloss=0.195671 min_logloss=0.195393
[3/50] eval_time=2845.32 sec current_logloss=0.199949 min_logloss=0.195393
[4/50] eval_time=1688.25 sec current_logloss=0.195488 min_logloss=0.195393
[5/50] eval_time=2317.07 sec current_logloss=0.195236 min_logloss=0.195236
[6/50] eval_time=2891.34 sec current_logloss=0.195278 min_logloss=0.195236
[7/50] eval_time=4427.36 sec current_logloss=0.201480 min_logloss=0.195236
[8/50] eval_time=4201.03 sec current_logloss=0.195576 min_logloss=0.195236
[9/50] eval_time=2779.64 sec current_logloss=0.195191 min_logloss=0.195191
[10/50] eval_time=8354.08 sec current_logloss=0.196146 min_logloss=0.195191
[11/50] eval_time=1716.99 sec current_logloss=0.196787 min_logloss=0.195191
[12/50] eval_time=5226.02 sec current_logloss=0.197976 min_logloss=0.195191
[13/50] eval_time=2587.79 sec current_logloss=0.197473 min_logloss=0.195191
[14/50] eval_time=3180.63 sec current_logloss=0.195337 min_logloss=0.195191
[15/50] eval_time=6125.45 sec current_logloss=0.196336 min_logloss=0.195191
[16/50] eval_time=3129.36 sec current_logloss=0.195259 min_logloss=0.195191
[17/50] eval_time=4668.34 sec current_logloss=0.195887 min_logloss=0.195191
[18/50] eval_time=1357.13 sec current_logloss=0.195391 min_logloss=0.195191
[19/50] eval_time=1105.39 sec current_logloss=0.195591 min_logloss=0.195191
[20/50] eval_time=2150.24 sec current_logloss=0.198659 min_logloss=0.195191
[21/50] eval_time=3087.80 sec current_logloss=0.195537 min_logloss=0.195191
[22/50] eval_time=3785.04 sec current_logloss=0.195180 min_logloss=0.195180
[23/50] eval_time=3602.85 sec current_logloss=0.195181 min_logloss=0.195180
[24/50] eval_time=4158.88 sec current_logloss=0.195219 min_logloss=0.195180
[25/50] eval_time=3598.84 sec current_logloss=0.195649 min_logloss=0.195180
[26/50] eval_time=4265.58 sec current_logloss=0.195572 min_logloss=0.195180
[27/50] eval_time=3098.82 sec current_logloss=0.195266 min_logloss=0.195180
[28/50] eval_time=5198.82 sec current_logloss=0.195236 min_logloss=0.195180
[29/50] eval_time=3181.04 sec current_logloss=0.196250 min_logloss=0.195180
[30/50] eval_time=2762.82 sec current_logloss=0.195246 min_logloss=0.195180
[31/50] eval_time=6814.80 sec current_logloss=0.195396 min_logloss=0.195180
[32/50] eval_time=1953.02 sec current_logloss=0.200207 min_logloss=0.195180
[33/50] eval_time=3524.44 sec current_logloss=0.195820 min_logloss=0.195180
[34/50] eval_time=1902.91 sec current_logloss=0.195628 min_logloss=0.195180
[35/50] eval_time=2097.94 sec current_logloss=0.195190 min_logloss=0.195180
[36/50] eval_time=6011.27 sec current_logloss=0.196081 min_logloss=0.195180
[37/50] eval_time=4713.44 sec current_logloss=0.196215 min_logloss=0.195180
[38/50] eval_time=3910.68 sec current_logloss=0.203723 min_logloss=0.195180
[39/50] eval_time=2614.22 sec current_logloss=0.195435 min_logloss=0.195180
[40/50] eval_time=3911.61 sec current_logloss=0.195335 min_logloss=0.195180
[41/50] eval_time=2926.58 sec current_logloss=0.195794 min_logloss=0.195180
[42/50] eval_time=3444.69 sec current_logloss=0.195205 min_logloss=0.195180
[43/50] eval_time=2246.88 sec current_logloss=0.195584 min_logloss=0.195180
[44/50] eval_time=5175.95 sec current_logloss=0.195236 min_logloss=0.195180
[45/50] eval_time=3286.98 sec current_logloss=0.195252 min_logloss=0.195180
[46/50] eval_time=3024.96 sec current_logloss=0.194989 min_logloss=0.194989
[47/50] eval_time=3245.81 sec current_logloss=0.195369 min_logloss=0.194989
[48/50] eval_time=2241.56 sec current_logloss=0.195271 min_logloss=0.194989
[49/50] eval_time=2952.45 sec current_logloss=0.195302 min_logloss=0.194989
[50/50] eval_time=3686.31 sec current_logloss=0.195145 min_logloss=0.194989
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tuned XGBoost result on cv:
logloss = 0.1949888
best_n_estimators = 1428
params = {'colsample_bytree': 0.9272709253076927, 'silent': 1, 'eval_metric': 'logloss', 'colsample_bylevel': 0.9428687661260641, 'min_child_weight': 55.20383095723314, 'subsample': 0.5375380743608249, 'eta': 0.008667600654220845, 'objective': 'binary:logistic', 'alpha': 0, 'seed': 0, 'max_depth': 3, 'gamma': 0, 'lambda': 0.004382881503917905}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tuned XGBoost result on test:
For seed=0 Test's logloss : 0.195637
For seed=1 Test's logloss : 0.195632
For seed=2 Test's logloss : 0.195661
For seed=3 Test's logloss : 0.195687
For seed=4 Test's logloss : 0.195769
Test's logloss mean: 0.195677, Test's logloss std: 0.000050
###Markdown
`LightGBM`
###Code
lgb_default_test_result, lgb_tuned_test_result = run_experiment(LGBExperiment, "LightGBM")
###Output
Default LightGBM result on cv:
logloss = 0.197665542842
best_n_estimators = 43
params = {'feature_fraction_seed': 0, 'num_leaves': 127, 'verbose': -1, 'bagging_seed': 0, 'lambda_l1': 0, 'drop_seed': 0, 'learning_rate': 0.1, 'lambda_l2': 0, 'data_random_seed': 0, 'bagging_fraction': 1.0, 'min_data_in_leaf': 100, 'max_bin': 255, 'objective': 'binary', 'bagging_freq': 1, 'metric': 'binary_logloss', 'min_sum_hessian_in_leaf': 10, 'feature_fraction': 1.0}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Default LightGBM result on test:
For seed=0 Test's logloss : 0.198369
For seed=1 Test's logloss : 0.198369
For seed=2 Test's logloss : 0.198369
For seed=3 Test's logloss : 0.198369
For seed=4 Test's logloss : 0.198369
Test's logloss mean: 0.198369, Test's logloss std: 0.000000
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hyperopt iterations:
[1/50] eval_time=420.49 sec current_logloss=0.196272 min_logloss=0.196272
[2/50] eval_time=178.56 sec current_logloss=0.195450 min_logloss=0.195450
[3/50] eval_time=616.20 sec current_logloss=0.195454 min_logloss=0.195450
[4/50] eval_time=145.88 sec current_logloss=0.196070 min_logloss=0.195450
[5/50] eval_time=419.04 sec current_logloss=0.200589 min_logloss=0.195450
[6/50] eval_time=3615.08 sec current_logloss=0.196989 min_logloss=0.195450
[7/50] eval_time=714.16 sec current_logloss=0.195516 min_logloss=0.195450
[8/50] eval_time=176.85 sec current_logloss=0.195466 min_logloss=0.195450
[9/50] eval_time=373.51 sec current_logloss=0.195742 min_logloss=0.195450
[10/50] eval_time=496.53 sec current_logloss=0.199978 min_logloss=0.195450
[11/50] eval_time=398.77 sec current_logloss=0.195265 min_logloss=0.195265
[12/50] eval_time=575.91 sec current_logloss=0.209175 min_logloss=0.195265
[13/50] eval_time=484.16 sec current_logloss=0.210376 min_logloss=0.195265
[14/50] eval_time=216.68 sec current_logloss=0.195476 min_logloss=0.195265
[15/50] eval_time=238.00 sec current_logloss=0.198932 min_logloss=0.195265
[16/50] eval_time=341.03 sec current_logloss=0.227824 min_logloss=0.195265
[17/50] eval_time=240.18 sec current_logloss=0.200574 min_logloss=0.195265
[18/50] eval_time=1985.81 sec current_logloss=0.196324 min_logloss=0.195265
[19/50] eval_time=1960.27 sec current_logloss=0.197655 min_logloss=0.195265
[20/50] eval_time=825.63 sec current_logloss=0.206486 min_logloss=0.195265
[21/50] eval_time=341.25 sec current_logloss=0.195222 min_logloss=0.195222
[22/50] eval_time=349.65 sec current_logloss=0.195193 min_logloss=0.195193
[23/50] eval_time=286.30 sec current_logloss=0.195197 min_logloss=0.195193
[24/50] eval_time=258.09 sec current_logloss=0.195335 min_logloss=0.195193
[25/50] eval_time=237.57 sec current_logloss=0.195257 min_logloss=0.195193
[26/50] eval_time=325.98 sec current_logloss=0.195748 min_logloss=0.195193
[27/50] eval_time=253.78 sec current_logloss=0.195274 min_logloss=0.195193
[28/50] eval_time=201.21 sec current_logloss=0.197183 min_logloss=0.195193
[29/50] eval_time=337.12 sec current_logloss=0.195247 min_logloss=0.195193
[30/50] eval_time=277.74 sec current_logloss=0.195407 min_logloss=0.195193
[31/50] eval_time=212.93 sec current_logloss=0.195292 min_logloss=0.195193
[32/50] eval_time=278.63 sec current_logloss=0.195092 min_logloss=0.195092
[33/50] eval_time=167.50 sec current_logloss=0.195440 min_logloss=0.195092
[34/50] eval_time=253.88 sec current_logloss=0.195270 min_logloss=0.195092
[35/50] eval_time=438.59 sec current_logloss=0.195651 min_logloss=0.195092
[36/50] eval_time=714.21 sec current_logloss=0.195650 min_logloss=0.195092
[37/50] eval_time=193.24 sec current_logloss=0.195653 min_logloss=0.195092
[38/50] eval_time=233.01 sec current_logloss=0.195202 min_logloss=0.195092
[39/50] eval_time=826.79 sec current_logloss=0.198292 min_logloss=0.195092
[40/50] eval_time=429.70 sec current_logloss=0.195198 min_logloss=0.195092
[41/50] eval_time=294.75 sec current_logloss=0.195344 min_logloss=0.195092
[42/50] eval_time=163.62 sec current_logloss=0.195237 min_logloss=0.195092
[43/50] eval_time=805.45 sec current_logloss=0.195780 min_logloss=0.195092
[44/50] eval_time=369.92 sec current_logloss=0.196972 min_logloss=0.195092
[45/50] eval_time=287.20 sec current_logloss=0.196146 min_logloss=0.195092
[46/50] eval_time=283.91 sec current_logloss=0.195237 min_logloss=0.195092
[47/50] eval_time=391.56 sec current_logloss=0.195242 min_logloss=0.195092
[48/50] eval_time=125.08 sec current_logloss=0.195350 min_logloss=0.195092
[49/50] eval_time=314.37 sec current_logloss=0.196905 min_logloss=0.195092
[50/50] eval_time=1637.33 sec current_logloss=0.195874 min_logloss=0.195092
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tuned LightGBM result on cv:
logloss = 0.195092218127
best_n_estimators = 485
params = {'num_leaves': 5, 'verbose': -1, 'bagging_seed': 0, 'metric': 'binary_logloss', 'data_random_seed': 0, 'min_data_in_leaf': 5, 'bagging_fraction': 0.7801172267397591, 'min_sum_hessian_in_leaf': 132.9945857111621, 'feature_fraction_seed': 0, 'lambda_l1': 0.0022903323397730152, 'bagging_freq': 1, 'lambda_l2': 0, 'objective': 'binary', 'drop_seed': 0, 'learning_rate': 0.029609632447460447, 'feature_fraction': 0.7235330841303137}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tuned LightGBM result on test:
For seed=0 Test's logloss : 0.195671
For seed=1 Test's logloss : 0.195861
For seed=2 Test's logloss : 0.195678
For seed=3 Test's logloss : 0.195752
For seed=4 Test's logloss : 0.195833
Test's logloss mean: 0.195759, Test's logloss std: 0.000078
###Markdown
`CatBoost`
###Code
cab_default_test_result, cab_tuned_test_result = run_experiment(CABExperiment, "CatBoost")
###Output
Default CatBoost result on cv:
logloss = 0.19485599806
best_n_estimators = 683
params = {'rsm': 1.0, 'iterations': 683, 'random_seed': 4, 'verbose': False, 'thread_count': 16, 'learning_rate': 0.03, 'ctr_description': ['Borders', 'CounterMax'], 'depth': 6, 'fold_len_multiplier': 2, 'loss_function': 'Logloss', 'ctr_border_count': 16, 'l2_leaf_reg': 3, 'leaf_estimation_method': 'Newton', 'gradient_iterations': 10, 'border_count': 128, 'used_ram_limit': 100000000000}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Default CatBoost result on test:
For seed=0 Test's logloss : 0.194616
For seed=1 Test's logloss : 0.194756
For seed=2 Test's logloss : 0.194924
For seed=3 Test's logloss : 0.194871
For seed=4 Test's logloss : 0.194803
Test's logloss mean: 0.194794, Test's logloss std: 0.000106
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Hyperopt iterations:
[1/50] eval_time=4944.20 sec current_logloss=0.194964 min_logloss=0.194964
[2/50] eval_time=12153.96 sec current_logloss=0.195515 min_logloss=0.194964
[3/50] eval_time=4926.19 sec current_logloss=0.195328 min_logloss=0.194964
[4/50] eval_time=4860.61 sec current_logloss=0.195022 min_logloss=0.194964
[5/50] eval_time=11125.28 sec current_logloss=0.194852 min_logloss=0.194852
[6/50] eval_time=4935.25 sec current_logloss=0.194910 min_logloss=0.194852
[7/50] eval_time=5022.16 sec current_logloss=0.195246 min_logloss=0.194852
[8/50] eval_time=5165.75 sec current_logloss=0.195226 min_logloss=0.194852
[9/50] eval_time=12323.07 sec current_logloss=0.196396 min_logloss=0.194852
[10/50] eval_time=12086.84 sec current_logloss=0.194955 min_logloss=0.194852
[11/50] eval_time=11813.09 sec current_logloss=0.194851 min_logloss=0.194851
[12/50] eval_time=12888.27 sec current_logloss=0.195012 min_logloss=0.194851
[13/50] eval_time=4958.62 sec current_logloss=0.195102 min_logloss=0.194851
[14/50] eval_time=5118.79 sec current_logloss=0.195017 min_logloss=0.194851
[15/50] eval_time=12527.99 sec current_logloss=0.194928 min_logloss=0.194851
[16/50] eval_time=5290.68 sec current_logloss=0.195691 min_logloss=0.194851
[17/50] eval_time=12650.82 sec current_logloss=0.195398 min_logloss=0.194851
[18/50] eval_time=12112.05 sec current_logloss=0.194885 min_logloss=0.194851
[19/50] eval_time=4974.16 sec current_logloss=0.195140 min_logloss=0.194851
[20/50] eval_time=5243.92 sec current_logloss=0.195416 min_logloss=0.194851
[21/50] eval_time=11052.89 sec current_logloss=0.194912 min_logloss=0.194851
[22/50] eval_time=11729.43 sec current_logloss=0.194931 min_logloss=0.194851
[23/50] eval_time=11034.25 sec current_logloss=0.194923 min_logloss=0.194851
[24/50] eval_time=12850.55 sec current_logloss=0.196783 min_logloss=0.194851
[25/50] eval_time=10830.28 sec current_logloss=0.194827 min_logloss=0.194827
[26/50] eval_time=11637.06 sec current_logloss=0.194853 min_logloss=0.194827
[27/50] eval_time=12334.73 sec current_logloss=0.195484 min_logloss=0.194827
[28/50] eval_time=12303.96 sec current_logloss=0.194930 min_logloss=0.194827
[29/50] eval_time=12331.19 sec current_logloss=0.195006 min_logloss=0.194827
[30/50] eval_time=11835.68 sec current_logloss=0.194976 min_logloss=0.194827
[31/50] eval_time=12241.54 sec current_logloss=0.195969 min_logloss=0.194827
[32/50] eval_time=12011.55 sec current_logloss=0.194856 min_logloss=0.194827
[33/50] eval_time=11022.14 sec current_logloss=0.194902 min_logloss=0.194827
[34/50] eval_time=11249.97 sec current_logloss=0.194754 min_logloss=0.194754
[35/50] eval_time=12030.79 sec current_logloss=0.194820 min_logloss=0.194754
[36/50] eval_time=11984.47 sec current_logloss=0.194891 min_logloss=0.194754
[37/50] eval_time=11957.04 sec current_logloss=0.194861 min_logloss=0.194754
[38/50] eval_time=12210.76 sec current_logloss=0.195421 min_logloss=0.194754
[39/50] eval_time=12020.76 sec current_logloss=0.194788 min_logloss=0.194754
[40/50] eval_time=5343.53 sec current_logloss=0.196141 min_logloss=0.194754
[41/50] eval_time=12044.76 sec current_logloss=0.195008 min_logloss=0.194754
[42/50] eval_time=12192.10 sec current_logloss=0.195191 min_logloss=0.194754
[43/50] eval_time=5297.86 sec current_logloss=0.195678 min_logloss=0.194754
[44/50] eval_time=12022.92 sec current_logloss=0.195031 min_logloss=0.194754
[45/50] eval_time=12982.45 sec current_logloss=0.196409 min_logloss=0.194754
[46/50] eval_time=5191.68 sec current_logloss=0.195108 min_logloss=0.194754
[47/50] eval_time=12855.87 sec current_logloss=0.195243 min_logloss=0.194754
[48/50] eval_time=12082.07 sec current_logloss=0.194896 min_logloss=0.194754
[49/50] eval_time=5188.55 sec current_logloss=0.195176 min_logloss=0.194754
[50/50] eval_time=12414.69 sec current_logloss=0.195716 min_logloss=0.194754
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tuned CatBoost result on cv:
logloss = 0.19475353524
best_n_estimators = 3834
params = {'random_seed': 4, 'verbose': False, 'thread_count': 16, 'learning_rate': 0.010062560376588203, 'ctr_border_count': 16, 'ctr_description': ('Borders', 'CounterMax'), 'iterations': 3834, 'one_hot_max_size': 0, 'bagging_temperature': 0.5591407616034347, 'gradient_iterations': 10, 'random_strength': 20, 'depth': 6, 'loss_function': 'Logloss', 'l2_leaf_reg': 1.2090961094676416, 'border_count': 128, 'used_ram_limit': 100000000000}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tuned CatBoost result on test:
For seed=0 Test's logloss : 0.194552
For seed=1 Test's logloss : 0.194744
For seed=2 Test's logloss : 0.194694
For seed=3 Test's logloss : 0.194605
For seed=4 Test's logloss : 0.194747
Test's logloss mean: 0.194668, Test's logloss std: 0.000077
###Markdown
Comparing resultsThe final table with metric values on the test dataset.
###Code
import pandas as pd
import numpy as np
from IPython.display import HTML, display
%pylab inline --no-import-all
test_results = np.array([
(xgb_default_test_result, xgb_tuned_test_result),
(lgb_default_test_result, lgb_tuned_test_result),
(cab_default_test_result, cab_tuned_test_result)
])
diff = 100 * test_results / test_results[2,1] - 100
res = [['{:.6f} ({:+.2f}%)'.format(test_results[i, j], diff[i, j]) for j in range(2)] for i in range(3)]
display(HTML(pd.DataFrame(res, columns=['default', 'tuned'], index=['xgboost', 'lightgbm', 'catboost']).to_html()))
results = [
('Tuned CatBoost', cab_tuned_test_result),
('Default CatBoost', cab_default_test_result),
('Tuned XGBoost', xgb_tuned_test_result),
('Default XGBoost', xgb_default_test_result),
('Tuned LightGBM', lgb_tuned_test_result),
('Default LightGBM', lgb_default_test_result),
]
names = ['%s\n%.5f' % (name, loss) for name, loss in results]
plt.figure(figsize=(20, 7))
plt.scatter(range(6), zip(*results)[1], s=150)
plt.xticks(range(6), names, fontsize=15)
plt.yticks(fontsize=12)
plt.title('Comparison', fontsize=20)
plt.ylabel(experiment.metric, fontsize=16);
###Output
Populating the interactive namespace from numpy and matplotlib
|
ml/Investigate inception v3 different vectors.ipynb | ###Markdown
Investigate InceptionV3 feature extractionHere we compare tensorflow and keras implementations of trained inception_v3 feature extractor.The result of both is will be the 2048 feature vector from the last conv layer. Questions* Why does Keras give different results in different sessions? E.g. a bug?* Why tf.hub and keras.applications feature vectors differ?* What is the pooling used in the tensorflow model? In Keras we use max pooling.
###Code
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow_hub as hub
from tensorflow.python.client import device_lib
tf.reset_default_graph()
#tf.set_random_seed(1234)
print(tf.__version__)
print(keras.__version__)
devices = [x.name for x in device_lib.list_local_devices()]
print(devices)
batch_size = 5
batch = np.zeros(shape=(batch_size,299,299,3),dtype=np.float32)
for i in range(batch_size):
batch[i] = batch[i] + i
with tf.name_scope("Inputs"):
images = tf.placeholder(shape=(None,299,299,3),dtype=tf.float32)
with tf.name_scope("Tensorflow"):
tf_inception_v3 = hub.Module("https://tfhub.dev/google/imagenet/inception_v3/feature_vector/1")
tf_features = tf_inception_v3(images)
with tf.name_scope("Keras"):
keras_inception_v3 = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(299,299,3), pooling='max')
keras_features = keras_inception_v3(images)
for device in devices:
with tf.device(device):
print('Using device: ',device)
for i in range(3):
print('Session ',i)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print('first run')
tf_val,keras_val = sess.run([tf_features, keras_features],feed_dict={images:batch})
for b in range(batch_size):
print('image{}: tf={} keras={}'.format(b,np.sum(tf_val[b]),np.sum(keras_val[b])))
print('second run')
tf_val,keras_val = sess.run([tf_features, keras_features],feed_dict={images:batch})
for b in range(batch_size):
print('image{}: tf={} keras={}'.format(b,np.sum(tf_val[b]),np.sum(keras_val[b])))
print('')
###Output
WARNING:tensorflow:From c:\users\i040924\git\notebooks\.venv\lib\site-packages\tensorflow\python\ops\control_flow_ops.py:3632: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
###Markdown
How to fix it?You can load the Keras model weights within the scope of an existing session. It appears that the keras model weights are materialized into the existing session as soon as the model is created.
###Code
for device in devices:
with tf.device(device):
print('Using device: ',device)
for i in range(3):
print('Session ',i)
with tf.Session() as sess:
tf.keras.backend.set_session(sess)
keras_inception_v3 = keras.applications.inception_v3.InceptionV3(include_top=False, weights='imagenet', input_shape=(299,299,3), pooling='max')
keras_features = keras_inception_v3(images)
#sess.run(tf.global_variables_initializer())
print('first run')
keras_val = sess.run(keras_features,feed_dict={images:batch})
for b in range(batch_size):
print('image{}: keras={}'.format(b,np.sum(keras_val[b])))
print('second run')
keras_val = sess.run(keras_features,feed_dict={images:batch})
for b in range(batch_size):
print('image{}: keras={}'.format(b,np.sum(keras_val[b])))
print('')
###Output
Using device: /device:CPU:0
Session 0
first run
image0: keras=1059.464599609375
image1: keras=1179.5281982421875
image2: keras=1332.2630615234375
image3: keras=1777.465087890625
image4: keras=2174.394775390625
second run
image0: keras=1059.464599609375
image1: keras=1179.5281982421875
image2: keras=1332.2630615234375
image3: keras=1777.465087890625
image4: keras=2174.394775390625
Session 1
first run
image0: keras=1059.464599609375
image1: keras=1179.5281982421875
image2: keras=1332.2630615234375
image3: keras=1777.465087890625
image4: keras=2174.394775390625
second run
image0: keras=1059.464599609375
image1: keras=1179.5281982421875
image2: keras=1332.2630615234375
image3: keras=1777.465087890625
image4: keras=2174.394775390625
Session 2
first run
image0: keras=1059.464599609375
image1: keras=1179.5281982421875
image2: keras=1332.2630615234375
image3: keras=1777.465087890625
image4: keras=2174.394775390625
second run
image0: keras=1059.464599609375
image1: keras=1179.5281982421875
image2: keras=1332.2630615234375
image3: keras=1777.465087890625
image4: keras=2174.394775390625
Using device: /device:GPU:0
Session 0
first run
image0: keras=1059.4658203125
image1: keras=1179.5284423828125
image2: keras=1332.263671875
image3: keras=1777.4642333984375
image4: keras=2174.392578125
second run
image0: keras=1059.4658203125
image1: keras=1179.5284423828125
image2: keras=1332.263671875
image3: keras=1777.4642333984375
image4: keras=2174.392578125
Session 1
first run
image0: keras=1059.4658203125
image1: keras=1179.5284423828125
image2: keras=1332.263671875
image3: keras=1777.4642333984375
image4: keras=2174.392578125
second run
image0: keras=1059.4658203125
image1: keras=1179.5284423828125
image2: keras=1332.263671875
image3: keras=1777.4642333984375
image4: keras=2174.392578125
Session 2
first run
image0: keras=1059.4658203125
image1: keras=1179.5284423828125
image2: keras=1332.263671875
image3: keras=1777.4642333984375
image4: keras=2174.392578125
second run
image0: keras=1059.4658203125
image1: keras=1179.5284423828125
image2: keras=1332.263671875
image3: keras=1777.4642333984375
image4: keras=2174.392578125
|
src/movieLensDataExploration/LowestRatedPopularMovieSpark.ipynb | ###Markdown
Or you can look for movies that had been rated at least ten times
###Code
# The main script - create our SparkContext
conf = SparkConf().setAppName("WorstMovies")
sc = SparkContext(conf = conf)
# Load up our movie ID -> movie name lookup table
movieNames = loadMovieNames()
# Load up the raw u.data file
lines = sc.textFile("ml-100k/u.data")
# Convert to (movieID, (rating, 1.0))
movieRatings = lines.map(parseInput)
# Reduce to (movieID, (sumOfRatings, totalRatings))
ratingTotalsAndCount = movieRatings.reduceByKey(lambda movie1, movie2: ( movie1[0] + movie2[0], movie1[1] + movie2[1] ) )
# Filter out movies rated 10 or fewer times
popularTotalsAndCount = ratingTotalsAndCount.filter(lambda x: x[1][1] > 10)
# Map to (rating, averageRating)
averageRatings = popularTotalsAndCount.mapValues(lambda totalAndCount : totalAndCount[0] / totalAndCount[1])
# Sort by average rating
sortedMovies = averageRatings.sortBy(lambda x: x[1])
# Take the top 10 results
results = sortedMovies.take(10)
# Print them out:
for result in results:
print(movieNames[result[0]], result[1])
###Output
Children of the Corn: The Gathering (1996) 1.3157894736842106
Body Parts (1991) 1.6153846153846154
Amityville II: The Possession (1982) 1.6428571428571428
Lawnmower Man 2: Beyond Cyberspace (1996) 1.7142857142857142
Robocop 3 (1993) 1.7272727272727273
Free Willy 3: The Rescue (1997) 1.7407407407407407
Gone Fishin' (1997) 1.8181818181818181
Ready to Wear (Pret-A-Porter) (1994) 1.8333333333333333
Solo (1996) 1.8333333333333333
Vampire in Brooklyn (1995) 1.8333333333333333
|
tree/invertTree.ipynb | ###Markdown
Tree 를 좌우로 invert 하여라
###Code
class TreeNode:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
from collections import deque
def treeLevelPrint(node):
if node is None:
return
q = deque()
q.append(node)
while 0<len(q):
level_count = len(q)
for _ in range(level_count):
crnt_node = q.popleft()
print(crnt_node.val, end = ' ')
if crnt_node.left:
q.append(crnt_node.left)
if crnt_node.right:
q.append(crnt_node.right)
print('')
root = TreeNode(1)
node3 = TreeNode(3)
node5 = TreeNode(5)
node7 = TreeNode(7)
node2 = TreeNode(2)
node4 = TreeNode(4)
node6 = TreeNode(6)
root.left = node3
root.right = node5
node3.left = node7
node3.right = node2
node5.left = node4
node5.right = node6
treeLevelPrint(root)
def recurInvertTree(node: TreeNode) -> TreeNode:
if node is None:
return
tmp_node = node.left
node.left = node.right
node.right = tmp_node
recurInvertTree(node.left)
recurInvertTree(node.right)
return node
root = recurInvertTree(root)
treeLevelPrint(root)
def iterInvertTree(node: TreeNode) -> TreeNode:
if node is None:
return
stack = []
stack.append(node)
while 0<len(stack):
crnt_node = stack.pop()
if crnt_node is None:
continue
tmp_node = crnt_node.left
crnt_node.left = crnt_node.right
crnt_node.right = tmp_node
stack.append(crnt_node.right)
stack.append(crnt_node.left)
return root
root = iterInvertTree(root) #re-invert to original
treeLevelPrint(root)
###Output
_____no_output_____ |
MultiResUNetMain.ipynb | ###Markdown
Importing ModulesThe necessary modules are : os, opencv, numpy, tqdm, matplotlib, keras and sklearn
###Code
import os
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from keras.layers import Input, Conv2D, MaxPooling2D, Conv2DTranspose, concatenate, BatchNormalization, Activation, add
from keras.models import Model, model_from_json
from keras.optimizers import Adam
from keras.layers.advanced_activations import ELU, LeakyReLU
from keras.utils.vis_utils import plot_model
from keras import backend as K
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
###Output
Using TensorFlow backend.
###Markdown
Constructing Training and Test DatasetsWe first load our images from Google Drive, and change directories for future operations.
###Code
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
os.chdir("/content/drive/My Drive/CelebAMaskHQ/")
!ls
###Output
_____no_output_____
###Markdown
Loading the ImagesWe first load all the images and the corresponding segmentation masks.**Kindly store your data in the following format inside your CelebAMaskHQ folder:**- All the original images in the 'images' folder- The left eye masks in the 'l_eye' folder- The right eye masks in the 'r_eye' folder- The lower lip masks in the 'l_lip' folder- The upper lip masks in the 'u_lip' folderFor image '0.jpg', the left eye mask should be '0_l_eye.png' and stored in the l_eye folder, the right eye mask should be '0_r_eye.png' and stored in the r_eye folder, the lower lip mask should be '0_l_lip.png' and stored in the l_lip folder, the upper lip mask should be '0_u_lip.png' and stored in the u_lip folder.In order to format the images in the manner provided, just paste the images in their corresponding folders, and run the “EXTRA : Pre processing of input files” section at the bottom of the notebook. The code snippet will do the rest.They are stored in two lists X, Y and respectivelyMoreover, the images are resized to 256x192NOTE : In colab, StopIteration errors will occur in the start. The errors are caused by Drive timeouts, and will be handled themselves after repeated running due to increase of data stored in our cache.
###Code
img_files = next(os.walk('images/'))[2]
leye_files = next(os.walk('l_eye/'))[2]
reye_files = next(os.walk('r_eye/'))[2]
llip_files = next(os.walk('l_lip/'))[2]
ulip_files = next(os.walk('u_lip/'))[2]
# Used to display the differences between the number of masks available
# For each folder
print(len(img_files))
print(len(leye_files))
print(len(reye_files))
print(len(llip_files))
print(len(ulip_files))
# We create a checkpoint mechanism in order to compensate for
# Google's 12 hour runtime restrictions. It stores the arrays after
# every 2000 iterations, and will load them each time we run our data
try:
X = np.load('X.npy')
Y = np.load('Y.npy')
i = np.load('i.npy')
count = np.load('count.npy')
except IOError:
X = []
Y = []
i = 0
count = 0
pbar = tqdm(total = 30000-i)
while(i < 30000):
img_f1 = "/content/drive/My Drive/CelebAMaskHQ/images/"+str(i)+".jpg"
if(os.path.exists(img_f1)):
print("Image "+img_f1+" exists")
if(os.path.exists('/content/drive/My Drive/CelebAMaskHQ/l_eye/'+str(i)+'_l_eye.png') and os.path.exists('/content/drive/My Drive/CelebAMaskHQ/r_eye/'+str(i)+'_r_eye.png') and os.path.exists('/content/drive/My Drive/CelebAMaskHQ/l_lip/'+str(i)+'_l_lip.png') and os.path.exists('/content/drive/My Drive/CelebAMaskHQ/u_lip/'+str(i)+'_u_lip.png') ):
# NOTE : The above if case is made in order to compensate for incomplete data
# i.e there is a lack of masks for certain images, as noted by the
# difference of number of file objects per mask in the dataset
# Taking main image as input and resizing
img = cv2.imread('images/'+str(i)+'.jpg', cv2.IMREAD_COLOR)
resized_img = cv2.resize(img,(256, 192), interpolation = cv2.INTER_CUBIC)
X.append(resized_img)
# We are reading each and every mask available
# In accordance to the dataset provided
leye = cv2.imread('l_eye/{}'.format(str(i)+'_l_eye.png'), cv2.IMREAD_GRAYSCALE)
reye = cv2.imread('r_eye/{}'.format(str(i)+'_r_eye.png'), cv2.IMREAD_GRAYSCALE)
llip = cv2.imread('l_lip/{}'.format(str(i)+'_l_lip.png'), cv2.IMREAD_GRAYSCALE)
ulip = cv2.imread('u_lip/{}'.format(str(i)+'_u_lip.png'), cv2.IMREAD_GRAYSCALE)
# We resize our data to fit params of MultiResUNet
resized_leye = cv2.resize(leye,(256, 192), interpolation = cv2.INTER_CUBIC)
resized_reye = cv2.resize(reye,(256, 192), interpolation = cv2.INTER_CUBIC)
resized_llip = cv2.resize(llip,(256, 192), interpolation = cv2.INTER_CUBIC)
resized_ulip = cv2.resize(ulip,(256, 192), interpolation = cv2.INTER_CUBIC)
# The below statements combine all the four masks into one picture
# In order to satisfy question requirements(preprocessing)
op = np.maximum(np.array(resized_leye),np.array(resized_reye))
op = np.maximum(op, np.array(resized_llip))
op = np.maximum(op,np.array(resized_ulip))
count += 1
Y.append(op)
pbar.update(1)
print("Picture "+str(i)+" is valid")
i += 1
print("Iteration "+str(i)+" complete")
# Aids in checkpointing data, overwrites after every 2000 iters
# To protect against runtime issues
if(i % 2000 == 0):
np.save('X',X)
np.save('Y',Y)
np.save('i',i)
np.save('count',count)
print("The number of valid images(images entered) : "+str(count))
# Saving arrays after completion of loop
np.save('X',X)
np.save('Y',Y)
np.save('i',i)
np.save('count',count)
###Output
_____no_output_____
###Markdown
Run in order to derive data from original parsing method :NOTE - In order to compensate for time and handle the parsing process within colab, I had originally divided the dataset into 3 parts, and ran 3 different notebooks simultaneously across the datasets for faster pre processing.The links to the original (workaround) notebooks are provided below :https://colab.research.google.com/drive/1xz2f6bkIe7HQG5TAKSxdThYj7DrquULFhttps://colab.research.google.com/drive/19Q_rWBzq8Jy7zdJCiz1xIVbaw8JVAyq9https://colab.research.google.com/drive/13zogbjdIvnSFygFAxuosUsl0yFrgS_YrOr, alternatively check the /workaround section of the repository.**In order to attain the split, just run the notebooks mentioned above, the resulting numpy arrays will be stored in your drive automatically.**PS: This section may require a high RAM runtime implementation of the notebook. Colab may crash and reallocate resources automatically.In order to accomodate the size of the dataset, we only use the first 20,000 images extracted.In order to use the whole extracted dataset, just uncomment the sections involving X3 and Y3In order to further ease access of resources, do not run the following code and check out the 'improved' sub-section of our 'Define Model, Train, Evaluate' part of the notebook.
###Code
# Loading our arrays
X1 = np.load('X1.npy')
X2 = np.load('X2.npy')
#X3 = np.load('X3.npy')
Y1 = np.load('Y1.npy')
Y2 = np.load('Y2.npy')
#Y3 = np.load('Y3.npy')
# Printing array shapes for confirmation
print(X1.shape)
print(X2.shape)
#print(X3.shape)
print(Y1.shape)
print(Y2.shape)
#print(Y3.shape)
# We use the append function with axis=0 in order to preserve the
# original shape of the arrays
X = np.append(X1,X2,axis=0)
#X = np.append(X,X3,axis=0)
Y = np.append(Y1,Y2,axis=0)
#Y = np.append(Y,Y3,axis=0)
# NOTE : The first dimension of both the arrays should be the same
# Else data may be corrupted/contaminated
print(X.shape)
print(Y.shape)
#Saving arrays for future use
np.save('X',X)
np.save('Y',Y)
###Output
_____no_output_____
###Markdown
Train-Test SplitThe X, Y lists are converted to numpy arrays for convenience. Furthermore, the images are divided by 255 to bring down the pixel values to [0...1] range. On the other hand the segmentations masks are converted to binary (0 or 1) values.Using Sklearn *train_test_split* we split the data randomly into 80% training and 20% testing dataNOTE: Due to colab's constraints, we are using the first ten thousand images only.In order to use the first twenty thousand processed images, run the above code snippet and comment out the lines involving X3 and Y3 (if they are not commented out already) ,then, for the snippet below, replace 'X1.npy' with 'X.npy' in the first line, and replace 'Y1.npy' with 'Y.npy'In order to use ALL the processed images:(if using pre processed data from the parallely ran programs)Follow the above step and make the changes required in the above code snippet Else you simply replace 'X1.npy' with 'X.npy' in the first line, and replace 'Y1.npy' with 'Y.npy'
###Code
def LoadData(xloc='X1.npy', yloc='Y1.npy'):
X = np.load(xloc)
Y = np.load(yloc)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=3)
Y_train = Y_train.reshape((Y_train.shape[0],Y_train.shape[1],Y_train.shape[2],1))
Y_test = Y_test.reshape((Y_test.shape[0],Y_test.shape[1],Y_test.shape[2],1))
X_train = X_train / 255
X_test = X_test / 255
Y_train = Y_train / 255
Y_test = Y_test / 255
Y_train = np.round(Y_train,0)
Y_test = np.round(Y_test,0)
print(X_train.shape)
print(Y_train.shape)
print(X_test.shape)
print(Y_test.shape)
return X_train,Y_train,X_test,Y_test
###Output
_____no_output_____
###Markdown
MultiResUNet Model Model DefinitionThe MultiResUNet model as described in the [paper](https://arxiv.org/abs/1902.04049) can be found [here](https://github.com/nibtehaz/MultiResUNet/blob/master/MultiResUNet.py)
###Code
def conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(1, 1), activation='relu', name=None):
'''
2D Convolutional layers
Arguments:
x {keras layer} -- input layer
filters {int} -- number of filters
num_row {int} -- number of rows in filters
num_col {int} -- number of columns in filters
Keyword Arguments:
padding {str} -- mode of padding (default: {'same'})
strides {tuple} -- stride of convolution operation (default: {(1, 1)})
activation {str} -- activation function (default: {'relu'})
name {str} -- name of the layer (default: {None})
Returns:
[keras layer] -- [output layer]
'''
x = Conv2D(filters, (num_row, num_col), strides=strides, padding=padding, use_bias=False)(x)
x = BatchNormalization(axis=3, scale=False)(x)
if(activation == None):
return x
x = Activation(activation, name=name)(x)
return x
def trans_conv2d_bn(x, filters, num_row, num_col, padding='same', strides=(2, 2), name=None):
'''
2D Transposed Convolutional layers
Arguments:
x {keras layer} -- input layer
filters {int} -- number of filters
num_row {int} -- number of rows in filters
num_col {int} -- number of columns in filters
Keyword Arguments:
padding {str} -- mode of padding (default: {'same'})
strides {tuple} -- stride of convolution operation (default: {(2, 2)})
name {str} -- name of the layer (default: {None})
Returns:
[keras layer] -- [output layer]
'''
x = Conv2DTranspose(filters, (num_row, num_col), strides=strides, padding=padding)(x)
x = BatchNormalization(axis=3, scale=False)(x)
return x
def MultiResBlock(U, inp, alpha = 1.67):
'''
MultiRes Block
Arguments:
U {int} -- Number of filters in a corrsponding UNet stage
inp {keras layer} -- input layer
Returns:
[keras layer] -- [output layer]
'''
W = alpha * U
shortcut = inp
shortcut = conv2d_bn(shortcut, int(W*0.167) + int(W*0.333) +
int(W*0.5), 1, 1, activation=None, padding='same')
conv3x3 = conv2d_bn(inp, int(W*0.167), 3, 3,
activation='relu', padding='same')
conv5x5 = conv2d_bn(conv3x3, int(W*0.333), 3, 3,
activation='relu', padding='same')
conv7x7 = conv2d_bn(conv5x5, int(W*0.5), 3, 3,
activation='relu', padding='same')
out = concatenate([conv3x3, conv5x5, conv7x7], axis=3)
out = BatchNormalization(axis=3)(out)
out = add([shortcut, out])
out = Activation('relu')(out)
out = BatchNormalization(axis=3)(out)
return out
def ResPath(filters, length, inp):
'''
ResPath
Arguments:
filters {int} -- [description]
length {int} -- length of ResPath
inp {keras layer} -- input layer
Returns:
[keras layer] -- [output layer]
'''
shortcut = inp
shortcut = conv2d_bn(shortcut, filters, 1, 1,
activation=None, padding='same')
out = conv2d_bn(inp, filters, 3, 3, activation='relu', padding='same')
out = add([shortcut, out])
out = Activation('relu')(out)
out = BatchNormalization(axis=3)(out)
for i in range(length-1):
shortcut = out
shortcut = conv2d_bn(shortcut, filters, 1, 1,
activation=None, padding='same')
out = conv2d_bn(out, filters, 3, 3, activation='relu', padding='same')
out = add([shortcut, out])
out = Activation('relu')(out)
out = BatchNormalization(axis=3)(out)
return out
def MultiResUnet(height, width, n_channels):
'''
MultiResUNet
Arguments:
height {int} -- height of image
width {int} -- width of image
n_channels {int} -- number of channels in image
Returns:
[keras model] -- MultiResUNet model
'''
inputs = Input((height, width, n_channels))
mresblock1 = MultiResBlock(32, inputs)
pool1 = MaxPooling2D(pool_size=(2, 2))(mresblock1)
mresblock1 = ResPath(32, 4, mresblock1)
mresblock2 = MultiResBlock(32*2, pool1)
pool2 = MaxPooling2D(pool_size=(2, 2))(mresblock2)
mresblock2 = ResPath(32*2, 3, mresblock2)
mresblock3 = MultiResBlock(32*4, pool2)
pool3 = MaxPooling2D(pool_size=(2, 2))(mresblock3)
mresblock3 = ResPath(32*4, 2, mresblock3)
mresblock4 = MultiResBlock(32*8, pool3)
pool4 = MaxPooling2D(pool_size=(2, 2))(mresblock4)
mresblock4 = ResPath(32*8, 1, mresblock4)
mresblock5 = MultiResBlock(32*16, pool4)
up6 = concatenate([Conv2DTranspose(
32*8, (2, 2), strides=(2, 2), padding='same')(mresblock5), mresblock4], axis=3)
mresblock6 = MultiResBlock(32*8, up6)
up7 = concatenate([Conv2DTranspose(
32*4, (2, 2), strides=(2, 2), padding='same')(mresblock6), mresblock3], axis=3)
mresblock7 = MultiResBlock(32*4, up7)
up8 = concatenate([Conv2DTranspose(
32*2, (2, 2), strides=(2, 2), padding='same')(mresblock7), mresblock2], axis=3)
mresblock8 = MultiResBlock(32*2, up8)
up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(
2, 2), padding='same')(mresblock8), mresblock1], axis=3)
mresblock9 = MultiResBlock(32, up9)
conv10 = conv2d_bn(mresblock9, 1, 1, 1, activation='sigmoid')
model = Model(inputs=[inputs], outputs=[conv10])
return model
###Output
_____no_output_____
###Markdown
Auxiliary Functions Custom MetricsSince Keras does not have build-in support for computing Dice Coefficient or Jaccard Index (at the time of writing), the following functions are declared
###Code
def dice_coef(y_true, y_pred):
smooth = 0.0
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def jacard(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum ( y_true_f * y_pred_f)
union = K.sum ( y_true_f + y_pred_f - y_true_f * y_pred_f)
return intersection/union
###Output
_____no_output_____
###Markdown
Saving Model Function to save the model
###Code
def saveModel(model):
model_json = model.to_json()
try:
os.makedirs('models')
except:
pass
fp = open('models/modelP5A.json','w')
fp.write(model_json)
model.save_weights('models/modelWA.h5')
###Output
_____no_output_____
###Markdown
Evaluate the ModelWe evaluate the model on test data (X_test, Y_test). We compute the values of Jaccard Index and Dice Coeficient, and save the predicted segmentation of first 10 images. The best model is also saved(This could have been done using keras call-backs as well)**NOTE : The files saved using this method do not follow the format specified in the task. For proper input and output specifications, kindly refer to the Demonstrations linked at the bottom of this book.**
###Code
def evaluateModel(model, X_test, Y_test, batchSize):
try:
os.makedirs('results')
except:
pass
yp = model.predict(x=X_test, batch_size=batchSize, verbose=1)
yp = np.round(yp,0)
for i in range(10):
plt.figure(figsize=(20,10))
plt.subplot(1,3,1)
plt.imshow(np.array(X_test[i]))
plt.title('Input')
plt.subplot(1,3,2)
plt.imshow(Y_test[i].reshape(Y_test[i].shape[0],Y_test[i].shape[1]))
plt.title('Ground Truth')
plt.subplot(1,3,3)
plt.imshow(yp[i].reshape(yp[i].shape[0],yp[i].shape[1]))
plt.title('Prediction')
intersection = yp[i].ravel() * Y_test[i].ravel()
union = yp[i].ravel() + Y_test[i].ravel() - intersection
jacard = (np.sum(intersection)/np.sum(union))
plt.suptitle('Jacard Index'+ str(np.sum(intersection)) +'/'+ str(np.sum(union)) +'='+str(jacard))
plt.savefig('results/'+str(i)+'fdbl.png',format='png')
plt.close()
jacard = 0
dice = 0
for i in range(len(Y_test)):
yp_2 = yp[i].ravel()
y2 = Y_test[i].ravel()
intersection = yp_2 * y2
union = yp_2 + y2 - intersection
jacard += (np.sum(intersection)/np.sum(union))
dice += (2. * np.sum(intersection) ) / (np.sum(yp_2) + np.sum(y2))
jacard /= len(Y_test)
dice /= len(Y_test)
print('Jacard Index : '+str(jacard))
print('Dice Coefficient : '+str(dice))
fp = open('models/logfdbl.txt','a')
fp.write(str(jacard)+'\n')
fp.close()
fp = open('models/bestfdbl.txt','r')
best = fp.read()
fp.close()
if(jacard>float(best)):
print('***********************************************')
print('Jacard Index improved from '+str(best)+' to '+str(jacard))
print('***********************************************')
fp = open('models/bestfdbl.txt','w')
fp.write(str(jacard))
fp.close()
saveModel(model)
###Output
_____no_output_____
###Markdown
Training the ModelThe model is trained and evaluated after each epoch
###Code
def trainStep(model, X_train, Y_train, X_test, Y_test, epochs, batchSize):
for epoch in tqdm(range(epochs)):
print('Epoch : {}'.format(epoch+1))
model.fit(x=X_train, y=Y_train, batch_size=batchSize, epochs=1, verbose=1)
evaluateModel(model,X_test, Y_test,batchSize)
return model
###Output
_____no_output_____
###Markdown
Define Model, Train and Evaluate The basic version:Loads the entire dataset and runs it for the given amount of epochs. Kindly change the names of the files as per requirements.
###Code
os.chdir("/content/drive/My Drive/CelebAMaskHQ/")
!ls
model = MultiResUnet(height=192, width=256, n_channels=3)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[dice_coef, jacard, 'accuracy'])
saveModel(model)
fp = open('models/logfdbl.txt','w')
fp.close()
fp = open('models/bestfdbl.txt','w')
fp.write('-1.0')
fp.close()
X_train,Y_train,X_test,Y_test = LoadData('X.npy','Y.npy')
# NOTE : BATCH SIZE MAY NEED TO BE REDUCED IN ORDER FOR IT TO WORK ACROSS DEVICES
trainStep(model, X_train, Y_train, X_test, Y_test, epochs=20, batchSize=10)
###Output
_____no_output_____
###Markdown
Improved Version This section specializes in using the split dataset mentioned above (currently accommodates the 3*10000 split used for training).It runs each split for the required amount of epochs, then deletes the variables in the memory in order to accomodate certain overload.
###Code
#fd - full dataset
os.chdir("/content/drive/My Drive/CelebAMaskHQ/")
!ls
model = MultiResUnet(height=192, width=256, n_channels=3)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[dice_coef, jacard, 'accuracy'])
saveModel(model)
fp = open('models/logfdbl.txt','w')
fp.close()
fp = open('models/bestfdbl.txt','w')
fp.write('-1.0')
fp.close()
for i in tqdm(range(1,4)):
print("Using dataset number :"+str(i))
X_train,Y_train,X_test,Y_test = LoadData('X'+str(i)+'.npy','Y'+str(i)+'.npy')
# NOTE : BATCH SIZE MAY NEED TO BE REDUCED IN ORDER FOR IT TO WORK ACROSS DEVICES
trainStep(model, X_train, Y_train, X_test, Y_test, epochs=20, batchSize=10)
# Done in order to accomodate for RAM overload
del X_train
del Y_train
del X_test
del Y_test
###Output
_____no_output_____
###Markdown
EXTRA : Pre processing of input files We pre process our input files in order to make it work with our current implementation.Note : This may cause Google drive timeouts due to the large size of the file, and show an I/O error. In this case just re run the snipppet again.
###Code
import os
# Function to rename multiple files
def renameFiles(folderName):
for filename in os.listdir(folderName):
filenamenew = filename.split("_"+folderName)
ipstring = filenamenew[0]
opstring = ipstring.lstrip('0')
if(opstring == ''):
opstring = '0'
dst =opstring + "_" + folderName + ".png"
src =folderName +'/' + filename
dst =folderName + '/' + dst
print("Original "+src)
print("Final "+dst)
# rename() function will
# rename all the files
os.rename(src, dst)
print("Done")
# Driver Code
if __name__ == '__main__':
os.chdir("/content/drive/My Drive/CelebAMaskHQ/")
!ls
# Calling main() function
# Uncomment the code below to run it, with specific features you want modified
# renameFiles("r_eye")
###Output
_____no_output_____ |
notebooks/Mass function.ipynb | ###Markdown
Read node masses
###Code
file = h5.File('../output/lcdm.h5')
nodes = [file[str(s)]["nodes"][:] for s in range(3)]
t = [file[str(s)].attrs["time"] for s in range(3)]
L = file.attrs["L"]
file.close()
###Output
_____no_output_____
###Markdown
Define log-normal function
###Code
def log_normal(A, mu, sigma):
def f(x):
y = np.log(x / mu)
return A * np.exp(-y**2 / (2 * sigma**2))
return f
###Output
_____no_output_____
###Markdown
Set parameters
###Code
lower_bound = -1.3
upper_bound = 2.7
N = 52
V = L**3
###Output
_____no_output_____
###Markdown
Produce plot
###Code
bins = np.logspace(lower_bound, upper_bound, N)
bin_width = bins[1:] - bins[:-1]
logbin_width = np.log(bins[1:] / bins[:-1])
x = np.sqrt(bins[:-1]) * np.sqrt(bins[1:])
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
axes[1].set_yscale("log")
for ax in axes:
ax.set_xscale("log")
ax.set_xlim(10**lower_bound, 10**upper_bound)
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.set_xlabel("m [$h^{-3}{\\rm Mpc}^3\\ \\rho_u$]")
ax.set_ylabel("N/(V $\Delta$ log m) [$h^3{\\rm Mpc}^{-3}$]")
for i in range(3):
masses = nodes[i][np.where(nodes[i]['node_type'] == 4)]['mass']
hist = np.histogram(masses, bins=bins)
y = hist[0] / logbin_width / V
with np.errstate(invalid='ignore'):
fit = least_squares(
lambda args: log_normal(*args)(x) - y,
np.ones(3))
for ax in axes:
ax.bar(x, y, width=bin_width,
label="D = {}".format(t[i]),
alpha=0.5, edgecolor='k', zorder=-i)
ax.plot(x, log_normal(*fit.x)(x), c='k', lw=0.8, zorder=10)
axes[1].legend()
axes[1].set_ylim(1e-5, 1e-1)
fig.savefig('mass-functions.svg', bbox_inches='tight')
###Output
_____no_output_____ |
locale/examples/00-load/create-point-cloud.ipynb | ###Markdown
Create Point CloudCreate a :class:`pyvista.PolyData` object from a point cloud of vertices andscalar arrays for those points.
###Code
import numpy as np
import pyvista as pv
from pyvista import examples
###Output
_____no_output_____
###Markdown
Point clouds are generally constructed in the :class:`pyvista.PolyData` classand can easily have scalar/vector data arrays associated with the pointcloud. In this example, we'll work a bit backwards using a point cloud thatthat is available from our ``examples`` module. This however is no differentthan creating a PyVista mesh with your own NumPy arrays of vertice locations.
###Code
# Define some helpers - ignore these and use your own data!
def generate_points(subset=0.02):
"""A helper to make a 3D NumPy array of points (n_points by 3)"""
dataset = examples.download_lidar()
ids = np.random.randint(low=0, high=dataset.n_points-1,
size=int(dataset.n_points * subset))
return dataset.points[ids]
points = generate_points()
# Print first 5 rows to prove its a numpy array (n_points by 3)
# Columns are (X Y Z)
points[0:5, :]
###Output
_____no_output_____
###Markdown
Now that you have a NumPy array of points/vertices either from our sampledata or your own project, creating a PyVista mesh of those points is simply:
###Code
point_cloud = pv.PolyData(points)
point_cloud
###Output
_____no_output_____
###Markdown
And we can even do a sanity check
###Code
np.allclose(points, point_cloud.points)
###Output
_____no_output_____
###Markdown
And now that we have a PyVista mesh, we can plot it. Note that we add anoption to use eye dome lighting - this is a shading technique to improvedepth perception with point clouds (learn more in `ref_edl`).
###Code
point_cloud.plot(eye_dome_lighting=True)
###Output
_____no_output_____
###Markdown
Now what if you have data attributes (scalar/vector arrays) that you'd liketo associate with every node of your mesh? You can easily add NumPy dataarrays that have a length equal to the number of points in the mesh along thefirst axis. For example, lets add a few arrays to this new ``point_cloud``mesh.Make an array of scalar values with the same length as the points array.Each element in this array will correspond to points at the same index:
###Code
# Make data array using z-component of points array
data = points[:,-1]
# Add that data to the mesh with the name "uniform dist"
point_cloud["elevation"] = data
###Output
_____no_output_____
###Markdown
And now we can plot the point cloud with that random data. PyVista is smartenough to plot the scalar array you added by default. Note that this time,we specify to render every point as its own sphere.
###Code
point_cloud.plot(render_points_as_spheres=True)
###Output
_____no_output_____
###Markdown
That data is kind of boring, right? You can also add data arrays withmore than one scalar value - perhaps a vector with three elements? Let'smake a little function that will compute vectors for every node in the pointcloud and add those vectors to the mesh.This time, we're going to create a totally new, random point cloud.
###Code
# Create random XYZ points
points = np.random.rand(100, 3)
# Make PolyData
point_cloud = pv.PolyData(points)
def compute_vectors(mesh):
origin = mesh.center
vectors = mesh.points - origin
vectors = vectors / np.linalg.norm(vectors, axis=1)[:, None]
return vectors
vectors = compute_vectors(point_cloud)
vectors[0:5, :]
point_cloud['vectors'] = vectors
###Output
_____no_output_____
###Markdown
Now we can make arrows using those vectors using the glyph filter(see `glyph_example` for more details).
###Code
arrows = point_cloud.glyph(orient='vectors', scale=False, factor=0.15,)
# Display the arrows
plotter = pv.Plotter()
plotter.add_mesh(point_cloud, color='maroon', point_size=10.,
render_points_as_spheres=True)
plotter.add_mesh(arrows, color='lightblue')
# plotter.add_point_labels([point_cloud.center,], ['Center',],
# point_color='yellow', point_size=20)
plotter.show_grid()
plotter.show()
###Output
_____no_output_____ |
vlp/CLIP_retrieval_experiments/CLIP_encodings.ipynb | ###Markdown
A List of jobs in this notebookPrepare question encodings Qimg x 512, Qtxt x 512Prepare snippet encodings 540K x 512Prepare image encodings 390K x 512Matrix multiplications: Qimg x 540K, Qimg x 390K, Qtxt x 540K, Qtxt x 390KSelect and rank topk: Qimgx540K_top2000_i, Qimgx390K_top2000_i, Qtxtx540K_top2000_i, Qtxtx390K_top2000_iSome plots showing that question-img similarities are systematically lower than question-snippet similaritiesCompute Recall curve (recall@[2, 10, 20, 50, 100, ... 2000]) for zero-shot full-scale CLIP dense retrieval.Compute retrieval F1 for CLIP's top2Write to eval.ai submission files for CLIP's top2Write to pred_dataset files for 1. having VLP filter rerank CLIP's top20, 2. having VLP QA run on CLIP's top2After getting VLP's reranking of CLIP's top20, 1. compute retrieval F1 for CLIP(20)-->VLP(2), 2. write to pred_dataset files for QA
###Code
from tqdm import tqdm
import json, random, time, os, base64, copy
import clip, pickle
import numpy as np
from pprint import pprint
from io import BytesIO
from collections import Counter, defaultdict
import matplotlib.pyplot as plt
%matplotlib inline
np.set_printoptions(precision=4)
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch
data_dir = "/home/yingshac/CYS/WebQnA/WebQnA_data_new/"
large_matrices_dir = "/data/yingshac/WebQA/large_matrices/CLIP_retrieval"
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/16", device=device)
clip.available_models()
#dataset = json.load(open("/home/yingshac/CYS/WebQnA/WebQnA_data_new/WebQA_train_val.json", "r"))
dataset = json.load(open("/home/yingshac/CYS/WebQnA/WebQnA_data_new/WebQA_0904_concat_newimgid_newguid.json", "r"))
print(Counter([dataset[k]['split'] for k in dataset]))
print(len(set([dataset[k]['Guid'] for k in dataset])))
print(Counter([dataset[k]['Qcate'] for k in dataset]))
# Read val_imgguid2qid, val_txtguid2qid
val_imgguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/val_imgguid2qid.pkl"), "rb"))
val_txtguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/val_txtguid2qid.pkl"), "rb"))
val_txtqid2guid = {i:guid for guid, i in val_txtguid2qid.items()}
val_imgqid2guid = {i:guid for guid, i in val_imgguid2qid.items()}
print(len(val_imgqid2guid), len(val_txtqid2guid), val_imgqid2guid[999], val_txtqid2guid[888])
### Generate question encodings for val
### Generate Qimg x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(val_imgqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[val_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(val_imgqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(val_imgqid2guid)))
text_input = clip.tokenize([dataset[val_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qimgx512 = torch.cat(rows)
print(Qimgx512.size())
torch.save(Qimgx512, os.path.join(data_dir, "CLIP_retrieval_experiments/Qimgx512_val.pt"))
### Generate Qtxt x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(val_txtqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[val_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(val_txtqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(val_txtqid2guid)))
text_input = clip.tokenize([dataset[val_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qtxtx512 = torch.cat(rows)
print(Qtxtx512.size())
torch.save(Qtxtx512, os.path.join(data_dir, "CLIP_retrieval_experiments/Qtxtx512_val.pt"))
# Read test_imgguid2qid, test_txtguid2qid
test_imgguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/test_imgguid2qid.pkl"), "rb"))
test_txtguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/test_txtguid2qid.pkl"), "rb"))
test_txtqid2guid = {i:guid for guid, i in test_txtguid2qid.items()}
test_imgqid2guid = {i:guid for guid, i in test_imgguid2qid.items()}
print(len(test_imgqid2guid), len(test_txtqid2guid), test_imgqid2guid[999], test_txtqid2guid[888])
### Generate question encodings for test
### Generate Qimg x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(test_imgqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[test_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(test_imgqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(test_imgqid2guid)))
text_input = clip.tokenize([dataset[test_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qimgx512 = torch.cat(rows)
print(Qimgx512.size())
torch.save(Qimgx512, os.path.join(data_dir, "CLIP_retrieval_experiments/Qimgx512_test.pt"))
### Generate Qtxt x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(test_txtqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[test_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(test_txtqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(test_txtqid2guid)))
text_input = clip.tokenize([dataset[test_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qtxtx512 = torch.cat(rows)
print(Qtxtx512.size())
torch.save(Qtxtx512, os.path.join(large_matrices_dir, "Qtxtx512_test.pt"))
# Read fact2uniid
fact2uniid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/fact2uniid.pkl"), "rb"))
uniid2fact = {i:fact for fact, i in fact2uniid.items()}
print(len(uniid2fact), uniid2fact[199999])
### Generate 540k x 512 matrix
rows = []
bs = 128
batched_ids = []
num_bs = len(uniid2fact)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([uniid2fact[i] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(uniid2fact) % bs == 0:
batched_ids = list(range(num_bs*bs, len(uniid2fact)))
text_input = clip.tokenize([uniid2fact[i] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
T540Kx512 = torch.cat(rows)
print(T540Kx512.size())
torch.save(T540Kx512, os.path.join(data_dir, "CLIP_retrieval_experiments/T540Kx512.pt"))
with open("/data/yingshac/WebQA/base64_0904/imgs.lineidx", "r") as fp_img:
img_lineidx = [int(i.strip()) for i in fp_img.readlines()]
print(len(img_lineidx))
def get_image_input(i):
with open("/data/yingshac/WebQA/base64_0904/imgs.tsv", "r") as fp_img:
fp_img.seek(img_lineidx[i])
imgid, img_base64 = fp_img.readline().strip().split('\t')
image = Image.open(BytesIO(base64.b64decode(img_base64)))
image_input = preprocess(image)
return image_input
### Generate 390k x 512 matrix
rows = []
bs = 512
num_bs = 389750//bs
for j in tqdm(range(111,num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
image_input = torch.tensor(np.stack([get_image_input(i) for i in batched_ids])).to(device)
with torch.no_grad():
image_features = model.encode_image(image_input)
rows.append(image_features)
if not 389750 % bs == 0:
batched_ids = list(range(num_bs*bs, 389750))
image_input = torch.tensor(np.stack([get_image_input(i) for i in batched_ids])).to(device)
with torch.no_grad():
image_features = model.encode_image(image_input)
rows.append(image_features)
I390Kx512 = torch.cat(rows)
print(I390Kx512.size())
torch.save(I390Kx512, os.path.join(large_matrices_dir, "I390Kx512.pt"))
###Output
_____no_output_____
###Markdown
Matrix Multiplication
###Code
split = 'test'
### Load CLIP encodings for 390K images, 540K snippets and 5K val/test Queries
I390Kx512 = torch.load(os.path.join(large_matrices_dir, "I390Kx512.pt"))
T540Kx512 = torch.load(os.path.join(large_matrices_dir, "T540Kx512.pt"))
Qtxtx512 = torch.load(os.path.join(large_matrices_dir, "Qtxtx512_{}.pt".format(split)))
Qimgx512 = torch.load(os.path.join(large_matrices_dir, "Qimgx512_{}.pt".format(split)))
print(I390Kx512.size(), T540Kx512.size())
print(Qimgx512.size(), Qtxtx512.size())
### Normalize encodings
I390Kx512 = I390Kx512 / I390Kx512.norm(dim=1)[:, None]
T540Kx512 = T540Kx512 / T540Kx512.norm(dim=1)[:, None]
Qtxtx512 = Qtxtx512 / Qtxtx512.norm(dim=1)[:, None]
Qimgx512 = Qimgx512 / Qimgx512.norm(dim=1)[:, None]
### Compute 2.5K x 540K
torch.cuda.synchronize()
%time Qtxtx540K = torch.matmul(Qtxtx512.to("cuda"), T540Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qtxtx540K.size())
torch.save(Qtxtx540K.cpu(), os.path.join(large_matrices_dir, "Qtxtx540K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qtxtx540K_top2000_i = torch.topk(Qtxtx540K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qtxtx540K_top2000_v = torch.topk(Qtxtx540K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qtxtx540K_top2000_i.size(), Qtxtx540K_top2000_v.size())
torch.save(Qtxtx540K_top2000_i, os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_i.pt".format(split)))
torch.save(Qtxtx540K_top2000_v, os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_v.pt".format(split)))
# ------------------------- #
torch.cuda.synchronize()
%time Qimgx540K = torch.matmul(Qimgx512.to("cuda"), T540Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qimgx540K.size())
torch.save(Qimgx540K.cpu(), os.path.join(large_matrices_dir, "Qimgx540K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qimgx540K_top2000_i = torch.topk(Qimgx540K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qimgx540K_top2000_v = torch.topk(Qimgx540K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qimgx540K_top2000_i.size(), Qimgx540K_top2000_v.size())
torch.save(Qimgx540K_top2000_i, os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_i.pt".format(split)))
torch.save(Qimgx540K_top2000_v, os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_v.pt".format(split)))
### Compute 2.5K x 390K
torch.cuda.synchronize()
%time Qtxtx390K = torch.matmul(Qtxtx512.to("cuda"), I390Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qtxtx390K.size())
torch.save(Qtxtx390K.cpu(), os.path.join(large_matrices_dir, "Qtxtx390K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qtxtx390K_top2000_i = torch.topk(Qtxtx390K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qtxtx390K_top2000_v = torch.topk(Qtxtx390K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qtxtx390K_top2000_i.size(), Qtxtx390K_top2000_v.size())
torch.save(Qtxtx390K_top2000_i, os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_i.pt".format(split)))
torch.save(Qtxtx390K_top2000_v, os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_v.pt".format(split)))
# ------------------------- #
torch.cuda.synchronize()
%time Qimgx390K = torch.matmul(Qimgx512.to("cuda"), I390Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qimgx390K.size())
torch.save(Qimgx390K.cpu(), os.path.join(large_matrices_dir, "Qimgx390K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qimgx390K_top2000_i = torch.topk(Qimgx390K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qimgx390K_top2000_v = torch.topk(Qimgx390K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qimgx390K_top2000_i.size(), Qimgx390K_top2000_v.size())
torch.save(Qimgx390K_top2000_i, os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_i.pt".format(split)))
torch.save(Qimgx390K_top2000_v, os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_v.pt".format(split)))
## Load top2000 indices and values
Qimgx390K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_i.pt".format(split)))
Qimgx390K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_v.pt".format(split)))
Qimgx540K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_i.pt".format(split)))
Qimgx540K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_v.pt".format(split)))
Qtxtx390K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_i.pt".format(split)))
Qtxtx390K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_v.pt".format(split)))
Qtxtx540K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_i.pt".format(split)))
Qtxtx540K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_v.pt".format(split)))
j = random.choice(list(range(2511+2455)))
if j >= 2511:
print("This is a text query")
y1 = Qtxtx390K_top100_v[j-2511]
y2 = Qtxtx540K_top100_v[j-2511]
else:
print("This is an img query")
y1 = Qimgx390K_top100_v[j]
y2 = Qimgx540K_top100_v[j]
x = list(range(100))
plt.scatter(x, y1, c ="gold", label="similarities with img sources")
plt.scatter(x, y2, c ="lightskyblue", label="similarities with text sources")
plt.legend()
plt.show()
avg_sim_w_img = torch.cat([torch.mean(Qimgx390K_top100_v, 1), torch.mean(Qtxtx390K_top100_v, 1)]).cpu()
print(avg_sim_w_img.size())
avg_sim_w_txt = torch.cat([torch.mean(Qimgx540K_top100_v, 1), torch.mean(Qtxtx540K_top100_v, 1)]).cpu()
print(avg_sim_w_txt.size())
fig = plt.figure(figsize=(8, 5))
plt.hist(list(avg_sim_w_txt), bins=50, color='lightskyblue')
plt.hist(list(avg_sim_w_img), bins=50, color='gold')
plt.show()
avg_sim_Qimg_w_img = torch.mean(Qimgx390K_top2000_v, 1).cpu()
avg_sim_Qtxt_w_img = torch.mean(Qtxtx390K_top2000_v, 1).cpu()
print(avg_sim_Qimg_w_img.size(), avg_sim_Qtxt_w_img.size())
avg_sim_Qimg_w_txt = torch.mean(Qimgx540K_top2000_v, 1).cpu()
avg_sim_Qtxt_w_txt = torch.mean(Qtxtx540K_top2000_v, 1).cpu()
print(avg_sim_Qimg_w_txt.size(), avg_sim_Qtxt_w_txt.size())
fig = plt.figure(figsize=(8, 5))
plt.hist(list(avg_sim_Qtxt_w_txt), bins=50, alpha = 0.5, color='lightskyblue')
plt.hist(list(avg_sim_Qimg_w_txt), bins=50, alpha = 0.5, color='powderblue')
plt.hist(list(avg_sim_Qimg_w_img), bins=50, alpha = 0.5, color='orange')
plt.hist(list(avg_sim_Qtxt_w_img), bins=50, alpha = 0.5, color='khaki')
plt.show()
fig = plt.figure(figsize=(8, 8))
plt.plot([0, 1], [0, 1], '-')
plt.scatter(list(avg_sim_Qtxt_w_txt), list(avg_sim_Qtxt_w_img), c ="lightgreen", label="Txt queries", alpha=0.3, s=5)
plt.scatter(list(avg_sim_Qimg_w_txt), list(avg_sim_Qimg_w_img), c ="pink", label="Img queries", alpha=0.3, s=5)
plt.legend()
plt.ylim(0.1, 0.5)
plt.xlim(0.2, 1)
plt.show()
###Output
_____no_output_____
###Markdown
Recall @[2, 10, 20, 50, 100], assuming answer modality is known
###Code
def recall(candidate_list, ans_list):
# Arguments are sets of integers
intersection = ans_list.intersection(candidate_list)
return len(intersection)/len(ans_list)
def compute_retrieval_metrics(pred, gth):
common = len(set(pred).intersection(gth))
RE = common / (len(gth))
PR = common / (len(pred)) # No protection against division by zero because it's assumed that CLIP never gives empty output
F1 = 2*PR*RE / (PR + RE + 1e-5)
return F1, RE, PR
### Load val/test Retrieval_answers
imgRetrievalAns = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/{}_imgRetrievalAns.pkl".format(split)), "rb"))
txtRetrievalAns = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/{}_txtRetrievalAns.pkl".format(split)), "rb"))
K = [2, 10, 20, 50, 100, 200, 500, 1000, 2000]
Qimg_scores = defaultdict(float)
Qtxt_scores = defaultdict(float)
for k in K:
Qimg_scores[k] = np.mean([recall(set(Qimgx390K_top2000_i[i][:k].numpy()), set(imgRetrievalAns[i])) for i in range(len(imgRetrievalAns))])
Qtxt_scores[k] = np.mean([recall(set(Qtxtx540K_top2000_i[i][:k].numpy()), set(txtRetrievalAns[i])) for i in range(len(txtRetrievalAns))])
print("Recall@k img queries:")
pprint(Qimg_scores)
print("Recall@k txt queries:")
pprint(Qtxt_scores)
K = [2, 10, 20, 50, 100, 200, 500, 1000, 2000]
Qimg_scores = defaultdict(float)
Qtxt_scores = defaultdict(float)
for k in K:
Qimg_scores[k] = np.mean([compute_retrieval_metrics(set(Qimgx390K_top2000_i[i][:k].numpy()), set(imgRetrievalAns[i]))[1] for i in range(len(imgRetrievalAns))])
Qtxt_scores[k] = np.mean([compute_retrieval_metrics(set(Qtxtx540K_top2000_i[i][:k].numpy()), set(txtRetrievalAns[i]))[1] for i in range(len(txtRetrievalAns))])
print("Recall@k img queries:")
top2_perf = [compute_retrieval_metrics(set(Qimgx390K_top2000_i[i][:2].numpy()), set(imgRetrievalAns[i])) for i in range(len(imgRetrievalAns))]
pprint(Qimg_scores)
print("Top2: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
print("Recall@k txt queries:")
top2_perf = [compute_retrieval_metrics(set(Qtxtx540K_top2000_i[i][:2].numpy()), set(txtRetrievalAns[i])) for i in range(len(txtRetrievalAns))]
pprint(Qtxt_scores)
print("Top2: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
fig = plt.figure(figsize=(6, 4))
plt.plot(list(Qimg_scores.keys()), list(Qimg_scores.values()), '-', marker='.', color='salmon', label='Img queries', linewidth=2)
plt.plot(list(Qtxt_scores.keys()), list(Qtxt_scores.values()), '-', marker='.', color='green', label='Txt queries', linewidth=2)
plt.legend(fontsize=14)
plt.ylim(0, 1)
plt.ylabel('Recall@k', fontsize=14)
plt.xscale('log')
plt.xlabel('k', fontsize=14)
plt.savefig("CLIP_recall_curve.jpg", dpi=250)
plt.show()
fluency_avg = 0.4695
acc_avg = 0.4533
mul_avg = 0.2479
###Output
_____no_output_____
###Markdown
Write to pred_dataset_val CLIP top2 as positive and the remaining top20 as negative
###Code
### Create imgid2caption
imgid2caption = {}
for k in tqdm(list(dataset.keys())):
if dataset[k]['Qcate'] == 'text':
for im in dataset[k]['img_negFacts']:
imgid2caption[im['image_id']] = im['caption']
else:
for im in dataset[k]['img_posFacts']:
imgid2caption[im['image_id']] = im['caption']
for im in dataset[k]['img_negFacts']:
imgid2caption[im['image_id']] = im['caption']
print(len(imgid2caption))
pickle.dump(imgid2caption, open(os.path.join(data_dir, "imgid2caption.pkl"), "wb"))
### load imgid2caption and image_id_map_0904
imgid2caption = pickle.load(open(os.path.join(data_dir, "imgid2caption.pkl"), "rb"))
image_id_map_0904 = pickle.load(open(os.path.join(data_dir, "image_id_map_0904.pkl"), "rb"))
r_image_id_map_0904 = {newid:oldid for oldid, newid in image_id_map_0904.items()}
print(len(imgid2caption), len(image_id_map_0904))
### Write dataset with CLIP retrieval results for val (top2 in the posFacts field, the remaining top20 in the negFacts field)
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'val' and not dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_image_ids = Qimgx390K_top2000_i[val_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['img_posFacts'] = []
for i in top20_clip_image_ids[:2]:
pred_data[g]['img_posFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
pred_data[g]['img_negFacts'] = []
for i in top20_clip_image_ids[2:]:
pred_data[g]['img_negFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
top20_clip_text_ids = Qimgx540K_top2000_i[val_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['txt_negFacts'] = []
for i in top20_clip_text_ids:
pred_data[g]['txt_negFacts'].append({
'fact': uniid2fact[i]})
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qimg_clip_retrieval_val.json"), "w"))
### Write dataset with CLIP retrieval results for test (top2 in the posFacts field, the remaining top20 in the negFacts field)
# pred_data for Qimg
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and not dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_image_ids = Qimgx390K_top2000_i[test_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['img_posFacts'] = []
for i in top20_clip_image_ids[:2]:
pred_data[g]['img_posFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
pred_data[g]['img_negFacts'] = []
for i in top20_clip_image_ids[2:]:
pred_data[g]['img_negFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
top20_clip_text_ids = Qimgx540K_top2000_i[test_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['txt_negFacts'] = []
for i in top20_clip_text_ids:
pred_data[g]['txt_negFacts'].append({
'fact': uniid2fact[i]})
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qimg_clip_retrieval_test.json"), "w"))
# pred_data for Qtxt
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_text_ids = Qtxtx540K_top2000_i[test_txtguid2qid[g]][:20].numpy().tolist()
# For each txt fact I need to add a 'snippet_id' field for the filter infr to run and calculate scores
# Because I need to run VLP filter infr on this predicted dataset for coarse-to-fine filter: CLIP Top20, VLP Top2
# Note that this snippet_id is different from the one in original dataset
pred_data[g]['txt_posFacts'] = []
for i in top20_clip_text_ids[:2]:
pred_data[g]['txt_posFacts'].append({
'fact': uniid2fact[i],
'snippet_id': i})
pred_data[g]['txt_negFacts'] = []
for i in top20_clip_text_ids[2:]:
pred_data[g]['txt_negFacts'].append({
'fact': uniid2fact[i],
'snippet_id': i})
top20_clip_image_ids = Qtxtx390K_top2000_i[test_txtguid2qid[g]][:20].numpy().tolist()
pred_data[g]['img_negFacts'] = []
for i in top20_clip_image_ids:
pred_data[g]['img_negFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qtxt_clip_retrieval_test.json"), "w"))
## Generate submission files to eval.ai (CLIP Top2 full-scale retrieval)
evalai_submission_CLIPtop2 = {}
for g in dataset:
if not dataset[g]['split'] == 'test': continue
if dataset[g]['Qcate'] == 'text':
retrieved_snippet_ids = []
retrieved_facts = []
for x in Qtxtx540K_top2000_i[test_txtguid2qid[g]][:2].tolist():
retrieved_facts.append(uniid2fact[x])
for x in dataset[g]['txt_posFacts']:
if x['fact'] in retrieved_facts:
retrieved_snippet_ids.append(x['snippet_id'])
retrieved_snippet_ids.extend((2-len(retrieved_snippet_ids))*["dummy"])
evalai_submission_CLIPtop2[g] = {'sources': retrieved_snippet_ids, 'answer': ""}
else:
evalai_submission_CLIPtop2[g] = \
{'sources': [ x+30000000 for x in Qimgx390K_top2000_i[test_imgguid2qid[g]][:2].tolist() ],
'answer': ""}
json.dump(evalai_submission_CLIPtop2, open("evalai_submission_CLIPtop2.json", "w"))
###Output
_____no_output_____
###Markdown
Decode commandsCUDA_VISIBLE_DEVICES=2 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "img" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --img_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qimg_clip_retrieval_test.jsonCUDA_VISIBLE_DEVICES=1 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "txt" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --txt_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qtxt_clip_retrieval_test.json VLP filter on CLIP's top20 (assume known answer modality) commandsCUDA_VISIBLE_DEVICES=3 python run_webqa.py --new_segment_ids --train_batch_size 20 --split test --answer_provided_by 'img' --task_to_learn 'filter' --num_workers 4 --max_pred 10 --mask_prob 1.0 --learning_rate 3e-5 --gradient_accumulation_steps 1 --save_loss_curve --output_dir light_output/filter_both_x_detectron_upd --ckpts_dir /data/yingshac/WebQA/ckpts/filter_both_x_detectron_upd --recover_step 3 --img_filter_max_choices 20 --img_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qimg_clip_retrieval_test.jsonCUDA_VISIBLE_DEVICES=2 python run_webqa.py --new_segment_ids --train_batch_size 20 --split test --answer_provided_by 'txt' --task_to_learn 'filter' --num_workers 4 --max_pred 10 --mask_prob 1.0 --learning_rate 3e-5 --gradient_accumulation_steps 1 --save_loss_curve --output_dir light_output/filter_both_x_detectron_upd --ckpts_dir /data/yingshac/WebQA/ckpts/filter_both_x_detectron_upd --recover_step 3 --txt_filter_max_choices 20 --txt_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qtxt_clip_retrieval_test.json
###Code
### Write dataset with CLIP retrieval Top20 --> VLP Top2 results for test (top2 in the posFacts field, the remaining top20 in the negFacts field)
TH = 0.2
# pred_data for Qimg
VLP_filter_output = json.load(open("/home/yingshac/CYS/WebQnA/VLP/vlp/light_output/filter_both_x_detectron_upd/test_-1_step3_img_20_True_True_Qimg_clip_retrieval_{}.json".format(split), "r"))
print(len(VLP_filter_output))
VLP_top2_reselection = np.full((len(txtRetrievalAns), 2), -1, dtype=np.int32)
for i in range(len(imgRetrievalAns)):
g = test_imgqid2guid[i]
vlp_filter_scores = np.array([float(x) for x in VLP_filter_output[g]['pred_scores']])
top2_idx = np.argsort(vlp_filter_scores)[-2:]
for j in [0, 1]:
if j==1 or vlp_filter_scores[top2_idx[j]] >= TH:
VLP_top2_reselection[i][j] = image_id_map_0904[VLP_filter_output[g]['choices'][top2_idx[j]]] - 30000000
# Score: VLP_top2_reselection vs. retrievalAns
top2_perf = [compute_retrieval_metrics(set(VLP_top2_reselection[i]), set(imgRetrievalAns[i])) for i in range(len(imgRetrievalAns))]
print("VLP reselected top2 from CLIP's top20, image queries: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
# Write dataset (only contain predicted top2 sources in the posFacts field, assuming known answer modality)
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and not dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_image_ids = VLP_top2_reselection[test_imgguid2qid[g]].tolist()
pred_data[g]['img_posFacts'] = []
for i in top20_clip_image_ids:
if i < 0: continue
pred_data[g]['img_posFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
pred_data[g]['img_negFacts'] = []
pred_data[g]['txt_negFacts'] = []
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qimg_clipvlp_top2_test.json"), "w"))
### ------------------------------------------------------------------------------ ###
# pred_data for Qtxt
VLP_filter_output = json.load(open("/home/yingshac/CYS/WebQnA/VLP/vlp/light_output/filter_both_x_detectron_upd/test_-1_step3_txt_20_True_Qtxt_clip_retrieval_{}.json".format(split), "r"))
print(len(VLP_filter_output))
VLP_top2_reselection = np.full((len(txtRetrievalAns), 2), -1, dtype=np.int32)
for i in range(len(txtRetrievalAns)):
g = test_txtqid2guid[i]
vlp_filter_scores = np.array([float(x) for x in VLP_filter_output[g]['pred_scores']])
top2_idx = np.argsort(vlp_filter_scores)[-2:]
for j in [0, 1]:
if j==1 or vlp_filter_scores[top2_idx[j]] >= TH:
VLP_top2_reselection[i][j] = VLP_filter_output[g]['choices'][top2_idx[j]]
# Score: VLP_top2_reselection vs. retrievalAns
top2_perf = [compute_retrieval_metrics(set(VLP_top2_reselection[i]), set(txtRetrievalAns[i])) for i in range(len(txtRetrievalAns))]
print("VLP reselected top2 from CLIP's top20, text queries: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
# Write dataset (only contain predicted top2 sources in the posFacts field, assuming known answer modality)
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top2_clipvlp_text_ids = VLP_top2_reselection[test_txtguid2qid[g]].tolist()
pred_data[g]['txt_posFacts'] = []
for i in top2_clipvlp_text_ids:
if i < 0: continue
pred_data[g]['txt_posFacts'].append({
'fact': uniid2fact[i]})
pred_data[g]['txt_negFacts'] = []
pred_data[g]['img_negFacts'] = []
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qtxt_clipvlp_top2_test.json"), "w"))
###Output
3464
VLP reselected top2 from CLIP's top20, image queries: F1=0.2168, RE=0.2823, PR=0.1840
3464
4076
VLP reselected top2 from CLIP's top20, text queries: F1=0.2601, RE=0.2593, PR=0.2615
4076
###Markdown
Decode commands on pred_datasets with coarse-to-fine retrieval (CLIP top20 --> VLP top2)CUDA_VISIBLE_DEVICES=2 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "img" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --img_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qimg_clipvlp_top2_test.jsonCUDA_VISIBLE_DEVICES=1 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "txt" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --txt_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qtxt_clipvlp_top2_test.json
###Code
import numpy as np
x = np.array([5878, 2565, 1896])
y = np.array([3565, 5250, 2942])
(x * 3464 + y * 4076) / (3464+4076)
###Output
_____no_output_____
###Markdown
A List of jobs in this notebookPrepare question encodings Qimg x 512, Qtxt x 512Prepare snippet encodings 540K x 512Prepare image encodings 390K x 512Matrix multiplications: Qimg x 540K, Qimg x 390K, Qtxt x 540K, Qtxt x 390KSelect and rank topk: Qimgx540K_top2000_i, Qimgx390K_top2000_i, Qtxtx540K_top2000_i, Qtxtx390K_top2000_iSome plots showing that question-img similarities are systematically lower than question-snippet similaritiesCompute Recall curve (recall@[2, 10, 20, 50, 100, ... 2000]) for zero-shot full-scale CLIP dense retrieval.Compute retrieval F1 for CLIP's top2Write to eval.ai submission files for CLIP's top2Write to pred_dataset files for 1. having VLP filter rerank CLIP's top20, 2. having VLP QA run on CLIP's top2After getting VLP's reranking of CLIP's top20, 1. compute retrieval F1 for CLIP(20)-->VLP(2), 2. write to pred_dataset files for QA
###Code
from tqdm import tqdm
import json, random, time, os, base64, copy
import clip, pickle
import numpy as np
from pprint import pprint
from io import BytesIO
from collections import Counter, defaultdict
import matplotlib.pyplot as plt
%matplotlib inline
np.set_printoptions(precision=4)
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch
data_dir = "/home/yingshac/CYS/WebQnA/WebQnA_data_new/"
large_matrices_dir = "/data/yingshac/WebQA/large_matrices/CLIP_retrieval"
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/16", device=device)
clip.available_models()
#dataset = json.load(open("/home/yingshac/CYS/WebQnA/WebQnA_data_new/WebQA_train_val.json", "r"))
dataset = json.load(open("/home/yingshac/CYS/WebQnA/WebQnA_data_new/WebQA_0904_concat_newimgid_newguid.json", "r"))
print(Counter([dataset[k]['split'] for k in dataset]))
print(len(set([dataset[k]['Guid'] for k in dataset])))
print(Counter([dataset[k]['Qcate'] for k in dataset]))
# Read val_imgguid2qid, val_txtguid2qid
val_imgguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/val_imgguid2qid.pkl"), "rb"))
val_txtguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/val_txtguid2qid.pkl"), "rb"))
val_txtqid2guid = {i:guid for guid, i in val_txtguid2qid.items()}
val_imgqid2guid = {i:guid for guid, i in val_imgguid2qid.items()}
print(len(val_imgqid2guid), len(val_txtqid2guid), val_imgqid2guid[999], val_txtqid2guid[888])
### Generate question encodings for val
### Generate Qimg x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(val_imgqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[val_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(val_imgqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(val_imgqid2guid)))
text_input = clip.tokenize([dataset[val_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qimgx512 = torch.cat(rows)
print(Qimgx512.size())
torch.save(Qimgx512, os.path.join(data_dir, "CLIP_retrieval_experiments/Qimgx512_val.pt"))
### Generate Qtxt x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(val_txtqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[val_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(val_txtqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(val_txtqid2guid)))
text_input = clip.tokenize([dataset[val_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qtxtx512 = torch.cat(rows)
print(Qtxtx512.size())
torch.save(Qtxtx512, os.path.join(data_dir, "CLIP_retrieval_experiments/Qtxtx512_val.pt"))
# Read test_imgguid2qid, test_txtguid2qid
test_imgguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/test_imgguid2qid.pkl"), "rb"))
test_txtguid2qid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/test_txtguid2qid.pkl"), "rb"))
test_txtqid2guid = {i:guid for guid, i in test_txtguid2qid.items()}
test_imgqid2guid = {i:guid for guid, i in test_imgguid2qid.items()}
print(len(test_imgqid2guid), len(test_txtqid2guid), test_imgqid2guid[999], test_txtqid2guid[888])
### Generate question encodings for test
### Generate Qimg x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(test_imgqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[test_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(test_imgqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(test_imgqid2guid)))
text_input = clip.tokenize([dataset[test_imgqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qimgx512 = torch.cat(rows)
print(Qimgx512.size())
torch.save(Qimgx512, os.path.join(data_dir, "CLIP_retrieval_experiments/Qimgx512_test.pt"))
### Generate Qtxt x 512 matrix
rows = []
bs = 512
batched_ids = []
num_bs = len(test_txtqid2guid)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([dataset[test_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(test_txtqid2guid) % bs == 0:
batched_ids = list(range(num_bs*bs, len(test_txtqid2guid)))
text_input = clip.tokenize([dataset[test_txtqid2guid[i]]['Q'] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
Qtxtx512 = torch.cat(rows)
print(Qtxtx512.size())
torch.save(Qtxtx512, os.path.join(large_matrices_dir, "Qtxtx512_test.pt"))
# Read fact2uniid
fact2uniid = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/fact2uniid.pkl"), "rb"))
uniid2fact = {i:fact for fact, i in fact2uniid.items()}
print(len(uniid2fact), uniid2fact[199999])
### Generate 540k x 512 matrix
rows = []
bs = 128
batched_ids = []
num_bs = len(uniid2fact)//bs
for j in tqdm(range(num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
text_input = clip.tokenize([uniid2fact[i] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
if not len(uniid2fact) % bs == 0:
batched_ids = list(range(num_bs*bs, len(uniid2fact)))
text_input = clip.tokenize([uniid2fact[i] for i in batched_ids], truncate=True).to(device)
with torch.no_grad():
text_features = model.encode_text(text_input)
rows.append(text_features)
T540Kx512 = torch.cat(rows)
print(T540Kx512.size())
torch.save(T540Kx512, os.path.join(data_dir, "CLIP_retrieval_experiments/T540Kx512.pt"))
with open("/data/yingshac/WebQA/base64_0904/imgs.lineidx", "r") as fp_img:
img_lineidx = [int(i.strip()) for i in fp_img.readlines()]
print(len(img_lineidx))
def get_image_input(i):
with open("/data/yingshac/WebQA/base64_0904/imgs.tsv", "r") as fp_img:
fp_img.seek(img_lineidx[i])
imgid, img_base64 = fp_img.readline().strip().split('\t')
image = Image.open(BytesIO(base64.b64decode(img_base64)))
image_input = preprocess(image)
return image_input
### Generate 390k x 512 matrix
rows = []
bs = 512
num_bs = 389750//bs
for j in tqdm(range(111,num_bs)):
batched_ids = list(range(j*bs, j*bs+bs))
image_input = torch.tensor(np.stack([get_image_input(i) for i in batched_ids])).to(device)
with torch.no_grad():
image_features = model.encode_image(image_input)
rows.append(image_features)
if not 389750 % bs == 0:
batched_ids = list(range(num_bs*bs, 389750))
image_input = torch.tensor(np.stack([get_image_input(i) for i in batched_ids])).to(device)
with torch.no_grad():
image_features = model.encode_image(image_input)
rows.append(image_features)
I390Kx512 = torch.cat(rows)
print(I390Kx512.size())
torch.save(I390Kx512, os.path.join(large_matrices_dir, "I390Kx512.pt"))
###Output
_____no_output_____
###Markdown
Matrix Multiplication
###Code
split = 'test'
### Load CLIP encodings for 390K images, 540K snippets and 5K val/test Queries
I390Kx512 = torch.load(os.path.join(large_matrices_dir, "I390Kx512.pt"))
T540Kx512 = torch.load(os.path.join(large_matrices_dir, "T540Kx512.pt"))
Qtxtx512 = torch.load(os.path.join(large_matrices_dir, "Qtxtx512_{}.pt".format(split)))
Qimgx512 = torch.load(os.path.join(large_matrices_dir, "Qimgx512_{}.pt".format(split)))
print(I390Kx512.size(), T540Kx512.size())
print(Qimgx512.size(), Qtxtx512.size())
### Normalize encodings
I390Kx512 = I390Kx512 / I390Kx512.norm(dim=1)[:, None]
T540Kx512 = T540Kx512 / T540Kx512.norm(dim=1)[:, None]
Qtxtx512 = Qtxtx512 / Qtxtx512.norm(dim=1)[:, None]
Qimgx512 = Qimgx512 / Qimgx512.norm(dim=1)[:, None]
### Compute 2.5K x 540K
torch.cuda.synchronize()
%time Qtxtx540K = torch.matmul(Qtxtx512.to("cuda"), T540Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qtxtx540K.size())
torch.save(Qtxtx540K.cpu(), os.path.join(large_matrices_dir, "Qtxtx540K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qtxtx540K_top2000_i = torch.topk(Qtxtx540K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qtxtx540K_top2000_v = torch.topk(Qtxtx540K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qtxtx540K_top2000_i.size(), Qtxtx540K_top2000_v.size())
torch.save(Qtxtx540K_top2000_i, os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_i.pt".format(split)))
torch.save(Qtxtx540K_top2000_v, os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_v.pt".format(split)))
# ------------------------- #
torch.cuda.synchronize()
%time Qimgx540K = torch.matmul(Qimgx512.to("cuda"), T540Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qimgx540K.size())
torch.save(Qimgx540K.cpu(), os.path.join(large_matrices_dir, "Qimgx540K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qimgx540K_top2000_i = torch.topk(Qimgx540K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qimgx540K_top2000_v = torch.topk(Qimgx540K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qimgx540K_top2000_i.size(), Qimgx540K_top2000_v.size())
torch.save(Qimgx540K_top2000_i, os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_i.pt".format(split)))
torch.save(Qimgx540K_top2000_v, os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_v.pt".format(split)))
### Compute 2.5K x 390K
torch.cuda.synchronize()
%time Qtxtx390K = torch.matmul(Qtxtx512.to("cuda"), I390Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qtxtx390K.size())
torch.save(Qtxtx390K.cpu(), os.path.join(large_matrices_dir, "Qtxtx390K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qtxtx390K_top2000_i = torch.topk(Qtxtx390K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qtxtx390K_top2000_v = torch.topk(Qtxtx390K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qtxtx390K_top2000_i.size(), Qtxtx390K_top2000_v.size())
torch.save(Qtxtx390K_top2000_i, os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_i.pt".format(split)))
torch.save(Qtxtx390K_top2000_v, os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_v.pt".format(split)))
# ------------------------- #
torch.cuda.synchronize()
%time Qimgx390K = torch.matmul(Qimgx512.to("cuda"), I390Kx512.to("cuda").t()); torch.cuda.synchronize()
print(Qimgx390K.size())
torch.save(Qimgx390K.cpu(), os.path.join(large_matrices_dir, "Qimgx390K_{}.pt".format(split)))
torch.cuda.synchronize()
%time Qimgx390K_top2000_i = torch.topk(Qimgx390K, 2000).indices.cpu(); torch.cuda.synchronize()
%time Qimgx390K_top2000_v = torch.topk(Qimgx390K, 2000).values.cpu(); torch.cuda.synchronize()
print(Qimgx390K_top2000_i.size(), Qimgx390K_top2000_v.size())
torch.save(Qimgx390K_top2000_i, os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_i.pt".format(split)))
torch.save(Qimgx390K_top2000_v, os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_v.pt".format(split)))
## Load top2000 indices and values
Qimgx390K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_i.pt".format(split)))
Qimgx390K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qimgx390K_{}_top2000_v.pt".format(split)))
Qimgx540K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_i.pt".format(split)))
Qimgx540K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qimgx540K_{}_top2000_v.pt".format(split)))
Qtxtx390K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_i.pt".format(split)))
Qtxtx390K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qtxtx390K_{}_top2000_v.pt".format(split)))
Qtxtx540K_top2000_i = torch.load(os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_i.pt".format(split)))
Qtxtx540K_top2000_v = torch.load(os.path.join(large_matrices_dir, "Qtxtx540K_{}_top2000_v.pt".format(split)))
j = random.choice(list(range(2511+2455)))
if j >= 2511:
print("This is a text query")
y1 = Qtxtx390K_top100_v[j-2511]
y2 = Qtxtx540K_top100_v[j-2511]
else:
print("This is an img query")
y1 = Qimgx390K_top100_v[j]
y2 = Qimgx540K_top100_v[j]
x = list(range(100))
plt.scatter(x, y1, c ="gold", label="similarities with img sources")
plt.scatter(x, y2, c ="lightskyblue", label="similarities with text sources")
plt.legend()
plt.show()
avg_sim_w_img = torch.cat([torch.mean(Qimgx390K_top100_v, 1), torch.mean(Qtxtx390K_top100_v, 1)]).cpu()
print(avg_sim_w_img.size())
avg_sim_w_txt = torch.cat([torch.mean(Qimgx540K_top100_v, 1), torch.mean(Qtxtx540K_top100_v, 1)]).cpu()
print(avg_sim_w_txt.size())
fig = plt.figure(figsize=(8, 5))
plt.hist(list(avg_sim_w_txt), bins=50, color='lightskyblue')
plt.hist(list(avg_sim_w_img), bins=50, color='gold')
plt.show()
avg_sim_Qimg_w_img = torch.mean(Qimgx390K_top2000_v, 1).cpu()
avg_sim_Qtxt_w_img = torch.mean(Qtxtx390K_top2000_v, 1).cpu()
print(avg_sim_Qimg_w_img.size(), avg_sim_Qtxt_w_img.size())
avg_sim_Qimg_w_txt = torch.mean(Qimgx540K_top2000_v, 1).cpu()
avg_sim_Qtxt_w_txt = torch.mean(Qtxtx540K_top2000_v, 1).cpu()
print(avg_sim_Qimg_w_txt.size(), avg_sim_Qtxt_w_txt.size())
fig = plt.figure(figsize=(8, 5))
plt.hist(list(avg_sim_Qtxt_w_txt), bins=50, alpha = 0.5, color='lightskyblue')
plt.hist(list(avg_sim_Qimg_w_txt), bins=50, alpha = 0.5, color='powderblue')
plt.hist(list(avg_sim_Qimg_w_img), bins=50, alpha = 0.5, color='orange')
plt.hist(list(avg_sim_Qtxt_w_img), bins=50, alpha = 0.5, color='khaki')
plt.show()
fig = plt.figure(figsize=(8, 8))
plt.plot([0, 1], [0, 1], '-')
plt.scatter(list(avg_sim_Qtxt_w_txt), list(avg_sim_Qtxt_w_img), c ="lightgreen", label="Txt queries", alpha=0.3, s=5)
plt.scatter(list(avg_sim_Qimg_w_txt), list(avg_sim_Qimg_w_img), c ="pink", label="Img queries", alpha=0.3, s=5)
plt.legend()
plt.ylim(0.1, 0.5)
plt.xlim(0.2, 1)
plt.show()
###Output
_____no_output_____
###Markdown
Recall @[2, 10, 20, 50, 100], assuming answer modality is known
###Code
def recall(candidate_list, ans_list):
# Arguments are sets of integers
intersection = ans_list.intersection(candidate_list)
return len(intersection)/len(ans_list)
def compute_retrieval_metrics(pred, gth):
common = len(set(pred).intersection(gth))
RE = common / (len(gth))
PR = common / (len(pred)) # No protection against division by zero because it's assumed that CLIP never gives empty output
F1 = 2*PR*RE / (PR + RE + 1e-5)
return F1, RE, PR
### Load val/test Retrieval_answers
imgRetrievalAns = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/{}_imgRetrievalAns.pkl".format(split)), "rb"))
txtRetrievalAns = pickle.load(open(os.path.join(data_dir, "CLIP_retrieval_experiments/{}_txtRetrievalAns.pkl".format(split)), "rb"))
K = [2, 10, 20, 50, 100, 200, 500, 1000, 2000]
Qimg_scores = defaultdict(float)
Qtxt_scores = defaultdict(float)
for k in K:
Qimg_scores[k] = np.mean([recall(set(Qimgx390K_top2000_i[i][:k].numpy()), set(imgRetrievalAns[i])) for i in range(len(imgRetrievalAns))])
Qtxt_scores[k] = np.mean([recall(set(Qtxtx540K_top2000_i[i][:k].numpy()), set(txtRetrievalAns[i])) for i in range(len(txtRetrievalAns))])
print("Recall@k img queries:")
pprint(Qimg_scores)
print("Recall@k txt queries:")
pprint(Qtxt_scores)
K = [2, 10, 20, 50, 100, 200, 500, 1000, 2000]
Qimg_scores = defaultdict(float)
Qtxt_scores = defaultdict(float)
for k in K:
Qimg_scores[k] = np.mean([compute_retrieval_metrics(set(Qimgx390K_top2000_i[i][:k].numpy()), set(imgRetrievalAns[i]))[1] for i in range(len(imgRetrievalAns))])
Qtxt_scores[k] = np.mean([compute_retrieval_metrics(set(Qtxtx540K_top2000_i[i][:k].numpy()), set(txtRetrievalAns[i]))[1] for i in range(len(txtRetrievalAns))])
print("Recall@k img queries:")
top2_perf = [compute_retrieval_metrics(set(Qimgx390K_top2000_i[i][:2].numpy()), set(imgRetrievalAns[i])) for i in range(len(imgRetrievalAns))]
pprint(Qimg_scores)
print("Top2: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
print("Recall@k txt queries:")
top2_perf = [compute_retrieval_metrics(set(Qtxtx540K_top2000_i[i][:2].numpy()), set(txtRetrievalAns[i])) for i in range(len(txtRetrievalAns))]
pprint(Qtxt_scores)
print("Top2: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
fig = plt.figure(figsize=(6, 4))
plt.plot(list(Qimg_scores.keys()), list(Qimg_scores.values()), '-', marker='.', color='salmon', label='Img queries', linewidth=2)
plt.plot(list(Qtxt_scores.keys()), list(Qtxt_scores.values()), '-', marker='.', color='green', label='Txt queries', linewidth=2)
plt.legend(fontsize=14)
plt.ylim(0, 1)
plt.ylabel('Recall@k', fontsize=14)
plt.xscale('log')
plt.xlabel('k', fontsize=14)
plt.savefig("CLIP_recall_curve.jpg", dpi=250)
plt.show()
fluency_avg = 0.4695
acc_avg = 0.4533
mul_avg = 0.2479
###Output
_____no_output_____
###Markdown
Write to pred_dataset_val CLIP top2 as positive and the remaining top20 as negative
###Code
### Create imgid2caption
imgid2caption = {}
for k in tqdm(list(dataset.keys())):
if dataset[k]['Qcate'] == 'text':
for im in dataset[k]['img_negFacts']:
imgid2caption[im['image_id']] = im['caption']
else:
for im in dataset[k]['img_posFacts']:
imgid2caption[im['image_id']] = im['caption']
for im in dataset[k]['img_negFacts']:
imgid2caption[im['image_id']] = im['caption']
print(len(imgid2caption))
pickle.dump(imgid2caption, open(os.path.join(data_dir, "imgid2caption.pkl"), "wb"))
### load imgid2caption and image_id_map_0904
imgid2caption = pickle.load(open(os.path.join(data_dir, "imgid2caption.pkl"), "rb"))
image_id_map_0904 = pickle.load(open(os.path.join(data_dir, "image_id_map_0904.pkl"), "rb"))
r_image_id_map_0904 = {newid:oldid for oldid, newid in image_id_map_0904.items()}
print(len(imgid2caption), len(image_id_map_0904))
### Write dataset with CLIP retrieval results for val (top2 in the posFacts field, the remaining top20 in the negFacts field)
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'val' and not dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_image_ids = Qimgx390K_top2000_i[val_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['img_posFacts'] = []
for i in top20_clip_image_ids[:2]:
pred_data[g]['img_posFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
pred_data[g]['img_negFacts'] = []
for i in top20_clip_image_ids[2:]:
pred_data[g]['img_negFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
top20_clip_text_ids = Qimgx540K_top2000_i[val_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['txt_negFacts'] = []
for i in top20_clip_text_ids:
pred_data[g]['txt_negFacts'].append({
'fact': uniid2fact[i]})
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qimg_clip_retrieval_val.json"), "w"))
### Write dataset with CLIP retrieval results for test (top2 in the posFacts field, the remaining top20 in the negFacts field)
# pred_data for Qimg
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and not dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_image_ids = Qimgx390K_top2000_i[test_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['img_posFacts'] = []
for i in top20_clip_image_ids[:2]:
pred_data[g]['img_posFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
pred_data[g]['img_negFacts'] = []
for i in top20_clip_image_ids[2:]:
pred_data[g]['img_negFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
top20_clip_text_ids = Qimgx540K_top2000_i[test_imgguid2qid[g]][:20].numpy().tolist()
pred_data[g]['txt_negFacts'] = []
for i in top20_clip_text_ids:
pred_data[g]['txt_negFacts'].append({
'fact': uniid2fact[i]})
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qimg_clip_retrieval_test.json"), "w"))
# pred_data for Qtxt
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_text_ids = Qtxtx540K_top2000_i[test_txtguid2qid[g]][:20].numpy().tolist()
# For each txt fact I need to add a 'snippet_id' field for the filter infr to run and calculate scores
# Because I need to run VLP filter infr on this predicted dataset for coarse-to-fine filter: CLIP Top20, VLP Top2
# Note that this snippet_id is different from the one in original dataset
pred_data[g]['txt_posFacts'] = []
for i in top20_clip_text_ids[:2]:
pred_data[g]['txt_posFacts'].append({
'fact': uniid2fact[i],
'snippet_id': i})
pred_data[g]['txt_negFacts'] = []
for i in top20_clip_text_ids[2:]:
pred_data[g]['txt_negFacts'].append({
'fact': uniid2fact[i],
'snippet_id': i})
top20_clip_image_ids = Qtxtx390K_top2000_i[test_txtguid2qid[g]][:20].numpy().tolist()
pred_data[g]['img_negFacts'] = []
for i in top20_clip_image_ids:
pred_data[g]['img_negFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qtxt_clip_retrieval_test.json"), "w"))
## Generate submission files to eval.ai (CLIP Top2 full-scale retrieval)
evalai_submission_CLIPtop2 = {}
for g in dataset:
if not dataset[g]['split'] == 'test': continue
if dataset[g]['Qcate'] == 'text':
retrieved_snippet_ids = []
retrieved_facts = []
for x in Qtxtx540K_top2000_i[test_txtguid2qid[g]][:2].tolist():
retrieved_facts.append(uniid2fact[x])
for x in dataset[g]['txt_posFacts']:
if x['fact'] in retrieved_facts:
retrieved_snippet_ids.append(x['snippet_id'])
retrieved_snippet_ids.extend((2-len(retrieved_snippet_ids))*["dummy"])
evalai_submission_CLIPtop2[g] = {'sources': retrieved_snippet_ids, 'answer': ""}
else:
evalai_submission_CLIPtop2[g] = \
{'sources': [ x+30000000 for x in Qimgx390K_top2000_i[test_imgguid2qid[g]][:2].tolist() ],
'answer': ""}
json.dump(evalai_submission_CLIPtop2, open("evalai_submission_CLIPtop2.json", "w"))
###Output
_____no_output_____
###Markdown
Decode commandsCUDA_VISIBLE_DEVICES=2 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "img" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --img_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qimg_clip_retrieval_test.jsonCUDA_VISIBLE_DEVICES=1 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "txt" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --txt_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qtxt_clip_retrieval_test.json VLP filter on CLIP's top20 (assume known answer modality) commandsCUDA_VISIBLE_DEVICES=3 python run_webqa.py --new_segment_ids --train_batch_size 20 --split test --answer_provided_by 'img' --task_to_learn 'filter' --num_workers 4 --max_pred 10 --mask_prob 1.0 --learning_rate 3e-5 --gradient_accumulation_steps 1 --save_loss_curve --output_dir light_output/filter_both_x_detectron_upd --ckpts_dir /data/yingshac/WebQA/ckpts/filter_both_x_detectron_upd --recover_step 3 --img_filter_max_choices 20 --img_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qimg_clip_retrieval_test.jsonCUDA_VISIBLE_DEVICES=2 python run_webqa.py --new_segment_ids --train_batch_size 20 --split test --answer_provided_by 'txt' --task_to_learn 'filter' --num_workers 4 --max_pred 10 --mask_prob 1.0 --learning_rate 3e-5 --gradient_accumulation_steps 1 --save_loss_curve --output_dir light_output/filter_both_x_detectron_upd --ckpts_dir /data/yingshac/WebQA/ckpts/filter_both_x_detectron_upd --recover_step 3 --txt_filter_max_choices 20 --txt_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qtxt_clip_retrieval_test.json
###Code
### Write dataset with CLIP retrieval Top20 --> VLP Top2 results for test (top2 in the posFacts field, the remaining top20 in the negFacts field)
TH = 0.2
# pred_data for Qimg
VLP_filter_output = json.load(open("/home/yingshac/CYS/WebQnA/VLP/vlp/light_output/filter_both_x_detectron_upd/test_-1_step3_img_20_True_True_Qimg_clip_retrieval_{}.json".format(split), "r"))
print(len(VLP_filter_output))
VLP_top2_reselection = np.full((len(txtRetrievalAns), 2), -1, dtype=np.int32)
for i in range(len(imgRetrievalAns)):
g = test_imgqid2guid[i]
vlp_filter_scores = np.array([float(x) for x in VLP_filter_output[g]['pred_scores']])
top2_idx = np.argsort(vlp_filter_scores)[-2:]
for j in [0, 1]:
if j==1 or vlp_filter_scores[top2_idx[j]] >= TH:
VLP_top2_reselection[i][j] = image_id_map_0904[VLP_filter_output[g]['choices'][top2_idx[j]]] - 30000000
# Score: VLP_top2_reselection vs. retrievalAns
top2_perf = [compute_retrieval_metrics(set(VLP_top2_reselection[i]), set(imgRetrievalAns[i])) for i in range(len(imgRetrievalAns))]
print("VLP reselected top2 from CLIP's top20, image queries: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
# Write dataset (only contain predicted top2 sources in the posFacts field, assuming known answer modality)
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and not dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top20_clip_image_ids = VLP_top2_reselection[test_imgguid2qid[g]].tolist()
pred_data[g]['img_posFacts'] = []
for i in top20_clip_image_ids:
if i < 0: continue
pred_data[g]['img_posFacts'].append({
'image_id': r_image_id_map_0904[i+30000000],
'caption': imgid2caption[i+30000000]})
pred_data[g]['img_negFacts'] = []
pred_data[g]['txt_negFacts'] = []
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qimg_clipvlp_top2_test.json"), "w"))
### ------------------------------------------------------------------------------ ###
# pred_data for Qtxt
VLP_filter_output = json.load(open("/home/yingshac/CYS/WebQnA/VLP/vlp/light_output/filter_both_x_detectron_upd/test_-1_step3_txt_20_True_Qtxt_clip_retrieval_{}.json".format(split), "r"))
print(len(VLP_filter_output))
VLP_top2_reselection = np.full((len(txtRetrievalAns), 2), -1, dtype=np.int32)
for i in range(len(txtRetrievalAns)):
g = test_txtqid2guid[i]
vlp_filter_scores = np.array([float(x) for x in VLP_filter_output[g]['pred_scores']])
top2_idx = np.argsort(vlp_filter_scores)[-2:]
for j in [0, 1]:
if j==1 or vlp_filter_scores[top2_idx[j]] >= TH:
VLP_top2_reselection[i][j] = VLP_filter_output[g]['choices'][top2_idx[j]]
# Score: VLP_top2_reselection vs. retrievalAns
top2_perf = [compute_retrieval_metrics(set(VLP_top2_reselection[i]), set(txtRetrievalAns[i])) for i in range(len(txtRetrievalAns))]
print("VLP reselected top2 from CLIP's top20, text queries: F1={:.4f}, RE={:.4f}, PR={:.4f}".format(np.mean([P[0] for P in top2_perf]), np.mean([P[1] for P in top2_perf]), np.mean([P[2] for P in top2_perf]) ))
# Write dataset (only contain predicted top2 sources in the posFacts field, assuming known answer modality)
pred_data = {}
for g in dataset:
if dataset[g]['split'] == 'test' and dataset[g]['Qcate'] == 'text':
pred_data[g] = copy.deepcopy(dataset[g])
top2_clipvlp_text_ids = VLP_top2_reselection[test_txtguid2qid[g]].tolist()
pred_data[g]['txt_posFacts'] = []
for i in top2_clipvlp_text_ids:
if i < 0: continue
pred_data[g]['txt_posFacts'].append({
'fact': uniid2fact[i]})
pred_data[g]['txt_negFacts'] = []
pred_data[g]['img_negFacts'] = []
print(len(pred_data))
json.dump(pred_data, open(os.path.join(data_dir, "CLIP_retrieval_experiments/Qtxt_clipvlp_top2_test.json"), "w"))
###Output
3464
VLP reselected top2 from CLIP's top20, image queries: F1=0.2168, RE=0.2823, PR=0.1840
3464
4076
VLP reselected top2 from CLIP's top20, text queries: F1=0.2601, RE=0.2593, PR=0.2615
4076
###Markdown
Decode commands on pred_datasets with coarse-to-fine retrieval (CLIP top20 --> VLP top2)CUDA_VISIBLE_DEVICES=2 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "img" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --img_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qimg_clipvlp_top2_test.jsonCUDA_VISIBLE_DEVICES=1 python decode_webqa.py --new_segment_ids --batch_size 32 --answer_provided_by "txt" --beam_size 5 --split "test" --num_workers 4 --output_dir light_output/detectron_both_qa_upd --ckpts_dir /data/yingshac/WebQA/ckpts/detectron_both_qa_upd --no_eval --recover_step 11 --txt_dataset_json_path /home/yingshac/CYS/WebQnA/WebQnA_data_new/CLIP_retrieval_experiments/Qtxt_clipvlp_top2_test.json
###Code
x = np.array([2043, 5703, 2817, 2050, 2168, 5734, 2793, 2037])
y = np.array([2815, 1483, 2071, 961, 2601, 1796, 2658, 1254])
(x * 3464 + y * 4076) / (3464+4076)
###Output
_____no_output_____ |
pyfund/Cap02/Notebooks/DSA-Python-Cap02-05-Dicionarios.ipynb | ###Markdown
Data Science Academy - Python Fundamentos - Capítulo 2 Download: http://github.com/dsacademybr
###Code
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
###Output
Versão da Linguagem Python Usada Neste Jupyter Notebook: 3.7.6
###Markdown
Dicionários
###Code
# Isso é uma lista
estudantes_lst = ["Mateus", 24, "Fernanda", 22, "Tamires", 26, "Cristiano", 25]
estudantes_lst
# Isso é um dicionário
estudantes_dict = {"Mateus":24, "Fernanda":22, "Tamires":26, "Cristiano":25}
estudantes_dict
estudantes_dict["Mateus"]
estudantes_dict["Pedro"] = 23
estudantes_dict["Pedro"]
estudantes_dict["Tamires"]
estudantes_dict.clear()
estudantes_dict
del estudantes_dict
estudantes_dict
estudantes = {"Mateus":24, "Fernanda":22, "Tamires":26, "Cristiano":25}
estudantes
len(estudantes)
estudantes.keys()
estudantes.values()
estudantes.items()
estudantes2 = {"Maria":27, "Erika":28, "Milton":26}
estudantes2
estudantes.update(estudantes2)
estudantes
dic1 = {}
dic1
dic1["key_one"] = 2
print(dic1)
dic1[10] = 5
dic1
dic1[8.2] = "Python"
dic1
dic1["teste"] = 5
dic1
dict1 = {}
dict1
dict1["teste"] = 10
dict1["key"] = "teste"
# Atenção, pois chave e valor podem ser iguais, mas representam coisas diferentes.
dict1
dict2 = {}
dict2["key1"] = "Big Data"
dict2["key2"] = 10
dict2["key3"] = 5.6
dict2
a = dict2["key1"]
b = dict2["key2"]
c = dict2["key3"]
a, b, c
# Dicionário de listas
dict3 = {'key1':1230,'key2':[22,453,73.4],'key3':['leite','maça','batata']}
dict3
dict3['key2']
# Acessando um item da lista, dentro do dicionário
dict3['key3'][0].upper()
# Operações com itens da lista, dentro do dicionário
var1 = dict3['key2'][0] - 2
var1
# Duas operações no mesmo comando, para atualizar um item dentro da lista
dict3['key2'][0] -= 2
dict3
###Output
_____no_output_____
###Markdown
Criando dicionários aninhados
###Code
# Criando dicionários aninhados
dict_aninhado = {'key1':{'key2_aninhada':{'key3_aninhada':'Dict aninhado em Python'}}}
dict_aninhado
dict_aninhado['key1']['key2_aninhada']['key3_aninhada']
###Output
_____no_output_____ |
crowd_path.ipynb | ###Markdown
Settingsfor the first run, install the needed modules
###Code
!pip install lwcc
!pip install Dijkstar
###Output
_____no_output_____
###Markdown
set the path to the image
###Code
path = "/content/drive/MyDrive/Colab_Notebooks/test/video_scale2.jpg"
###Output
_____no_output_____
###Markdown
provide the coordinates of the start and end position
###Code
start_position = (2,2)
end_position = (75,75)
###Output
_____no_output_____
###Markdown
AI for getting the crowd density creating the density map of the picture
###Code
from lwcc import LWCC
import matplotlib.pyplot as plt
img = path
count, density = LWCC.get_count(img, return_density = True, model_weights = "SHA", resize_img= False)
plt.imshow(density)
import sys
import numpy
numpy.set_printoptions(threshold=sys.maxsize)
i = 0
with open('/content/density_map.data', 'w') as f:
print(density, file=f)
###Output
_____no_output_____
###Markdown
Path calculation to install the path finding module, uncomment the next line
###Code
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import dijkstar
from dijkstar.graph import Graph
from dijkstar.algorithm import find_path
import json
density_array = density
density_array = [[j*100 for j in i] for i in density_array]
density_array = np.array(density_array)
#print(density_array)
def create_edges(my_graph, density_array):
rows = density_array.shape[0]
columns = density_array.shape[1]
#print("debug %s, %s" %(rows, columns))
for row in range(rows):
for column in range(columns):
my_graph.add_edge((column, row), (column + 1, row), (density_array[row, column], 'right'))
my_graph.add_edge((column, row), (column, row + 1), (density_array[row, column], 'bottom'))
my_graph.add_edge((column + 1, row), (column, row), (density_array[row, column], 'left'))
my_graph.add_edge((column, row + 1), (column, row), (density_array[row, column], 'up'))
def cost_func(u, v, edge, prev_edge):
length, name = edge
if prev_edge:
prev_name = prev_edge[1]
else:
prev_name = None
cost = length
if name != prev_name:
cost += 1
return cost
#defining going to east as 0 degree and a positive angle in clock direction
def direction(cur_pos, path, phone_orient, lookahead):
x_offset = path[lookahead][0] - cur_pos[0]
y_offset = path[lookahead][1] - cur_pos[1]
print(x_offset, y_offset)
angle = math.tan(y_offset/(x_offset + 0.00001))
print(angle)
def save_output(output):
with open('output.json', 'w') as f:
json.dump(output, f)
my_graph = Graph()
create_edges(my_graph, density_array)
my_path = find_path(my_graph, start_position, end_position, cost_func=cost_func)
my_path = my_path.nodes
x_len = density_array.shape[1]
y_len = density_array.shape[0]
save_output(my_path)
print(my_path)
direction((0,0), my_path, 5, 4)
###Output
[(2, 2), (3, 2), (4, 2), (4, 3), (5, 3), (6, 3), (7, 3), (8, 3), (9, 3), (9, 4), (9, 5), (9, 6), (9, 7), (9, 8), (9, 9), (9, 10), (9, 11), (9, 12), (9, 13), (9, 14), (9, 15), (9, 16), (9, 17), (9, 18), (10, 18), (11, 18), (12, 18), (13, 18), (13, 19), (13, 20), (13, 21), (13, 22), (13, 23), (13, 24), (13, 25), (14, 25), (14, 26), (14, 27), (14, 28), (14, 29), (14, 30), (14, 31), (14, 32), (13, 32), (13, 33), (13, 34), (13, 35), (13, 36), (13, 37), (13, 38), (13, 39), (13, 40), (13, 41), (13, 42), (13, 43), (13, 44), (13, 45), (13, 46), (13, 47), (13, 48), (14, 48), (15, 48), (16, 48), (17, 48), (18, 48), (19, 48), (19, 49), (19, 50), (19, 51), (19, 52), (19, 53), (19, 54), (19, 55), (19, 56), (19, 57), (20, 57), (20, 58), (20, 59), (20, 60), (20, 61), (20, 62), (20, 63), (20, 64), (20, 65), (20, 66), (20, 67), (20, 68), (20, 69), (21, 69), (22, 69), (23, 69), (24, 69), (25, 69), (26, 69), (27, 69), (28, 69), (29, 69), (30, 69), (31, 69), (32, 69), (33, 69), (34, 69), (35, 69), (35, 68), (36, 68), (37, 68), (38, 68), (39, 68), (40, 68), (41, 68), (42, 68), (43, 68), (44, 68), (45, 68), (46, 68), (47, 68), (48, 68), (49, 68), (50, 68), (51, 68), (52, 68), (53, 68), (53, 69), (53, 70), (53, 71), (53, 72), (53, 73), (53, 74), (53, 75), (53, 76), (54, 76), (55, 76), (56, 76), (57, 76), (58, 76), (59, 76), (60, 76), (61, 76), (61, 77), (61, 78), (62, 78), (63, 78), (63, 79), (63, 80), (64, 80), (65, 80), (66, 80), (67, 80), (68, 80), (68, 79), (69, 79), (70, 79), (71, 79), (72, 79), (73, 79), (74, 79), (74, 78), (74, 77), (74, 76), (74, 75), (75, 75)]
5 3
0.6841350466948549
###Markdown
just showing the path in a plot for debugging
###Code
#output = my_path
#scale_factor = 680
#xmin, xmax = plt.xlim()
#ymin, ymax = plt.ylim()
#plt.xlim(xmin * scale_factor, xmax * scale_factor)
#plt.ylim(ymin * scale_factor, ymax * scale_factor)
#output = [[j*9 for j in i] for i in output]
#xy2=zip(*output)
#plt.plot(*xy2, color='red', linewidth=2.0)
#plt.grid()
#plt.gca().invert_yaxis()
#plt.show()
###Output
_____no_output_____
###Markdown
Visualisation calculation the scaling factor for the path to fit to the original image
###Code
from PIL import Image
image_data = Image.open(path)
width, height = image_data.size
x_scale = width / x_len
y_scale = height / y_len
###Output
8.0 8.0
###Markdown
creating the output
###Code
import numpy as np
import cv2
import argparse
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from PIL import Image, ImageOps
import plotly.graph_objects as go
import json
im = Image.open(path)
im_flip = ImageOps.flip(im)
with open('output.json') as json_file:
data = json.load(json_file)
plt.rcParams['xtick.bottom'] = plt.rcParams['xtick.labelbottom'] = False
plt.rcParams['xtick.top'] = plt.rcParams['xtick.labeltop'] = True
data = [[j*x_scale for j in i] for i in data]
xy=zip(*data)
image_data = Image.open(path)
width, height = image_data.size
img = plt.imread(path)
#im_flip = cv2.applyColorMap(im_flip, cv2.COLORMAP_OCEAN)
fig, ax = plt.subplots(figsize=(12, 7), dpi=100)
ax.imshow(im_flip, extent=[0, width, 0, height])
plt1.plot(*xy, color='red', linewidth=3.0, linestyle='-')
plt.grid(color='white', linestyle='-', linewidth=0.5)
ax.set_title('Emergency Path From A to B in real time')
plt.gca().invert_yaxis()
plt.show()
###Output
_____no_output_____ |
17 - Deep Learning with TensorFlow 2.0/13_Business case/4_Preprocessing the data (11:32)/TensorFlow_Audiobooks_Preprocessing_with_comments.ipynb | ###Markdown
Audiobooks business case ProblemYou are given data from an Audiobook App. Logically, it relates to the audio versions of books ONLY. Each customer in the database has made a purchase at least once, that's why he/she is in the database. We want to create a machine learning algorithm based on our available data that can predict if a customer will buy again from the Audiobook company.The main idea is that if a customer has a low probability of coming back, there is no reason to spend any money on advertising to him/her. If we can focus our efforts SOLELY on customers that are likely to convert again, we can make great savings. Moreover, this model can identify the most important metrics for a customer to come back again. Identifying new customers creates value and growth opportunities.You have a .csv summarizing the data. There are several variables: Customer ID, ), Book length overall (sum of the minute length of all purchases), Book length avg (average length in minutes of all purchases), Price paid_overall (sum of all purchases) ,Price Paid avg (average of all purchases), Review (a Boolean variable whether the customer left a review), Review out of 10 (if the customer left a review, his/her review out of 10, Total minutes listened, Completion (from 0 to 1), Support requests (number of support requests; everything from forgotten password to assistance for using the App), and Last visited minus purchase date (in days).These are the inputs (excluding customer ID, as it is completely arbitrary. It's more like a name, than a number).The targets are a Boolean variable (0 or 1). We are taking a period of 2 years in our inputs, and the next 6 months as targets. So, in fact, we are predicting if: based on the last 2 years of activity and engagement, a customer will convert in the next 6 months. 6 months sounds like a reasonable time. If they don't convert after 6 months, chances are they've gone to a competitor or didn't like the Audiobook way of digesting information. The task is simple: create a machine learning algorithm, which is able to predict if a customer will buy again. This is a classification problem with two classes: won't buy and will buy, represented by 0s and 1s. Good luck! Preprocess the data. Balance the dataset. Create 3 datasets: training, validation, and test. Save the newly created sets in a tensor friendly format (e.g. *.npz)Since we are dealing with real life data, we will need to preprocess it a bit. This is the relevant code, which is not that hard, but is crucial to creating a good model.If you want to know how to do that, go through the code with comments. In any case, this should do the trick for most datasets organized in the way: many inputs, and then 1 cell containing the targets (supersized learning datasets). Keep in mind that a specific problem may require additional preprocessing.Note that we have removed the header row, which contains the names of the categories. We simply want the data. Extract the data from the csv
###Code
import numpy as np
# We will use the sklearn preprocessing library, as it will be easier to standardize the data.
from sklearn import preprocessing
# Load the data
raw_csv_data = np.loadtxt('Audiobooks_data.csv',delimiter=',')
# The inputs are all columns in the csv, except for the first one [:,0]
# (which is just the arbitrary customer IDs that bear no useful information),
# and the last one [:,-1] (which is our targets)
unscaled_inputs_all = raw_csv_data[:,1:-1]
# The targets are in the last column. That's how datasets are conventionally organized.
targets_all = raw_csv_data[:,-1]
###Output
_____no_output_____
###Markdown
Balance the dataset
###Code
# Count how many targets are 1 (meaning that the customer did convert)
num_one_targets = int(np.sum(targets_all))
# Set a counter for targets that are 0 (meaning that the customer did not convert)
zero_targets_counter = 0
# We want to create a "balanced" dataset, so we will have to remove some input/target pairs.
# Declare a variable that will do that:
indices_to_remove = []
# Count the number of targets that are 0.
# Once there are as many 0s as 1s, mark entries where the target is 0.
for i in range(targets_all.shape[0]):
if targets_all[i] == 0:
zero_targets_counter += 1
if zero_targets_counter > num_one_targets:
indices_to_remove.append(i)
# Create two new variables, one that will contain the inputs, and one that will contain the targets.
# We delete all indices that we marked "to remove" in the loop above.
unscaled_inputs_equal_priors = np.delete(unscaled_inputs_all, indices_to_remove, axis=0)
targets_equal_priors = np.delete(targets_all, indices_to_remove, axis=0)
###Output
_____no_output_____
###Markdown
Standardize the inputs
###Code
# That's the only place we use sklearn functionality. We will take advantage of its preprocessing capabilities
# It's a simple line of code, which standardizes the inputs, as we explained in one of the lectures.
# At the end of the business case, you can try to run the algorithm WITHOUT this line of code.
# The result will be interesting.
scaled_inputs = preprocessing.scale(unscaled_inputs_equal_priors)
###Output
_____no_output_____
###Markdown
Shuffle the data
###Code
# When the data was collected it was actually arranged by date
# Shuffle the indices of the data, so the data is not arranged in any way when we feed it.
# Since we will be batching, we want the data to be as randomly spread out as possible
shuffled_indices = np.arange(scaled_inputs.shape[0])
np.random.shuffle(shuffled_indices)
# Use the shuffled indices to shuffle the inputs and targets.
shuffled_inputs = scaled_inputs[shuffled_indices]
shuffled_targets = targets_equal_priors[shuffled_indices]
###Output
_____no_output_____
###Markdown
Split the dataset into train, validation, and test
###Code
# Count the total number of samples
samples_count = shuffled_inputs.shape[0]
# Count the samples in each subset, assuming we want 80-10-10 distribution of training, validation, and test.
# Naturally, the numbers are integers.
train_samples_count = int(0.8 * samples_count)
validation_samples_count = int(0.1 * samples_count)
# The 'test' dataset contains all remaining data.
test_samples_count = samples_count - train_samples_count - validation_samples_count
# Create variables that record the inputs and targets for training
# In our shuffled dataset, they are the first "train_samples_count" observations
train_inputs = shuffled_inputs[:train_samples_count]
train_targets = shuffled_targets[:train_samples_count]
# Create variables that record the inputs and targets for validation.
# They are the next "validation_samples_count" observations, folllowing the "train_samples_count" we already assigned
validation_inputs = shuffled_inputs[train_samples_count:train_samples_count+validation_samples_count]
validation_targets = shuffled_targets[train_samples_count:train_samples_count+validation_samples_count]
# Create variables that record the inputs and targets for test.
# They are everything that is remaining.
test_inputs = shuffled_inputs[train_samples_count+validation_samples_count:]
test_targets = shuffled_targets[train_samples_count+validation_samples_count:]
# We balanced our dataset to be 50-50 (for targets 0 and 1), but the training, validation, and test were
# taken from a shuffled dataset. Check if they are balanced, too. Note that each time you rerun this code,
# you will get different values, as each time they are shuffled randomly.
# Normally you preprocess ONCE, so you need not rerun this code once it is done.
# If you rerun this whole sheet, the npzs will be overwritten with your newly preprocessed data.
# Print the number of targets that are 1s, the total number of samples, and the proportion for training, validation, and test.
print(np.sum(train_targets), train_samples_count, np.sum(train_targets) / train_samples_count)
print(np.sum(validation_targets), validation_samples_count, np.sum(validation_targets) / validation_samples_count)
print(np.sum(test_targets), test_samples_count, np.sum(test_targets) / test_samples_count)
###Output
_____no_output_____
###Markdown
Save the three datasets in *.npz
###Code
# Save the three datasets in *.npz.
# In the next lesson, you will see that it is extremely valuable to name them in such a coherent way!
np.savez('Audiobooks_data_train', inputs=train_inputs, targets=train_targets)
np.savez('Audiobooks_data_validation', inputs=validation_inputs, targets=validation_targets)
np.savez('Audiobooks_data_test', inputs=test_inputs, targets=test_targets)
###Output
_____no_output_____ |
examples/api_request_deckgl_enan.ipynb | ###Markdown
Package loading and basic configurations
###Code
%load_ext autoreload
%autoreload 2
# load dependencies'
import pandas as pd
import geopandas as gpd
from envirocar import TrackAPI, DownloadClient, BboxSelector, ECConfig
# create an initial but optional config and an api client
config = ECConfig()
track_api = TrackAPI(api_client=DownloadClient(config=config))
###Output
_____no_output_____
###Markdown
Querying enviroCar Tracks The following cell queries tracks from the enviroCar API. It defines a bbox for the area of Münster (Germany) and requests 50 tracks. The result is a GeoDataFrame, which is a geo-extended Pandas dataframe from the GeoPandas library. It contains all information of the track in a flat dataframe format including a specific geometry column.
###Code
bbox = BboxSelector([
7.601165771484375, # min_x
51.94807412325402, # min_y
7.648200988769531, # max_x
51.97261482608728 # max_y
])
# issue a query
track_df = track_api.get_tracks(bbox=bbox, num_results=30) # requesting 30 tracks inside the bbox
track_df
track_df.plot(figsize=(5, 5))
###Output
_____no_output_____
###Markdown
Inspecting a single Track unique id 1
###Code
some_track_id = track_df['track.id'].unique()[1]
some_track = track_df[track_df['track.id'] == some_track_id]
some_track.plot()
ax = some_track['GPS Altitude.value'].plot()
ax.set_title("GPS Altitude")
ax.set_ylabel(some_track['GPS Altitude.unit'][1])
ax
###Output
_____no_output_____
###Markdown
Interactive MapThe following map-based visualization makes use of folium. It allows to visualizate geospatial data based on an interactive leaflet map. Since the data in the GeoDataframe is modelled as a set of Point instead of a LineString, we have to manually create a polyline
###Code
import folium
lats = list(some_track['geometry'].apply(lambda coord: coord.y))
lngs = list(some_track['geometry'].apply(lambda coord: coord.x))
avg_lat = sum(lats) / len(lats)
avg_lngs = sum(lngs) / len(lngs)
m = folium.Map(location=[avg_lat, avg_lngs], zoom_start=13)
folium.PolyLine([coords for coords in zip(lats, lngs)], color='Cyan').add_to(m)
m
###Output
_____no_output_____
###Markdown
Example: Visualization with pydeck (deck.gl) The pydeck library makes use of the basemap tiles from Mapbox. In case you want to visualize the map with basemap tiles, you need to register with MapBox, and configure a specific access token. The service is free until a certain level of traffic is esceeded.You can either configure it via your terminal (i.e. `export MAPBOX_API_KEY=`), which pydeck will automatically read, or you can pass it as a variable to the generation of pydeck (i.e. `pdk.Deck(mapbox_key=, ...)`.
###Code
import pydeck as pdk
# for pydeck the attributes have to be flat
track_df['lat'] = track_df['geometry'].apply(lambda coord: coord.y)
track_df['lng'] = track_df['geometry'].apply(lambda coord: coord.x)
vis_df = pd.DataFrame(track_df)
vis_df['speed'] = vis_df['Speed.value']
# omit unit columns
vis_df_cols = [col for col in vis_df.columns if col.lower()[len(col)-4:len(col)] != 'unit']
vis_df = vis_df[vis_df_cols]
layer = pdk.Layer(
'ScatterplotLayer',
data=vis_df,
get_position='[lng, lat]',
auto_highlight=True,
get_radius=10, # Radius is given in meters
get_fill_color='[speed < 20 ? 0 : (speed - 20)*8.5, speed < 50 ? 255 : 255 - (speed-50)*8.5, 0, 140]', # Set an RGBA value for fill
pickable=True
)
# Set the viewport location
view_state = pdk.ViewState(
longitude=7.7063592529296875,
latitude=51.96246168188569,
zoom=11,
min_zoom=10,
max_zoom=20,
pitch=40.5,
bearing=-27.36)
r = pdk.Deck(
width=200,
layers=[layer],
initial_view_state=view_state, mapbox_key='pk.eyJ1IjoiZXNtYXRlbmFuIiwiYSI6ImNrNmsxMzR4bzAxdzAzbG5zaTkyaWJrcmwifQ.V8Nd3GGomopO8m9cwRddlg'#, mapbox_key=<mapbox-key-here>
)
r.to_html('tracks_muenster.html', iframe_width=1050)
###Output
_____no_output_____ |
SLR-estimation/SLR_estimation.ipynb | ###Markdown
IntroductionThis in-class example demonstrates how you approach a new data set and conduct simple data analysis.What you need to know: - Basic Python operations- Theoretical concepts on statistical moments- Theoretical concepts on simple linear regression modelThe list of [references](References) for detailed concepts and techniques used in this exerise.*** Content- [Load the required modules](Load-the-required-modules)- [Data check and summary statistics](Data-check-and-summary-statistics)- [Simple Linear Regression Model](Simple-Linear-Regression-Model) - [References](References) *** Data Description```------------------------------------------------------------------------------- storage display valuevariable name type format label variable label-------------------------------------------------------------------------------price float %9.0g house price, $1000sassess float %9.0g assessed value, $1000sbdrms byte %9.0g number of bdrmslotsize float %9.0g size of lot in square feetsqrft int %9.0g size of house in square feetcolonial byte %9.0g =1 if home is colonial stylelprice float %9.0g log(price)lassess float %9.0g log(assessllotsize float %9.0g log(lotsize)lsqrft float %9.0g log(sqrft)------------------------------------------------------------------------------- ``` *** Load the required modules
###Code
import math
import numpy as np
import pandas as pd
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
###Output
_____no_output_____
###Markdown
IntroductionThis in-class example demonstrates how you approach a new data set and conduct simple data analysis.What you need to know: - Statsmodels and pandas modules in Python- Theoretical concepts on statistical moments- Theoretical concepts on simple linear regression modelThe list of [references](References) for detailed concepts and techniques used in this exerise.*** Content- [Load the required modules](Load-the-required-modules)- [Data check and summary statistics](Data-check-and-summary-statistics)- [Simple Linear Regression Model](Simple-Linear-Regression-Model) - [References](References) *** Data DescriptionThe house price data with file name ```hprice1.csv``` contains the following variables| Name | Description || :--- | :--- || price | house price, \$1000s || assess | assessed value, \$1000s || bdrms | number of bdrms || lotsize | size of lot in square feet || sqrft | size of house in square feet || colonial | =1 if home is colonial style || lprice | log(price) || lassess | log(assess || llotsize | log(lotsize) || lsqrft | log(sqrft) | *** Load the required modules
###Code
import math
import numpy as np
import pandas as pd
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
###Output
_____no_output_____ |
quantum_annealing/Dwave_MinimumVertexCoverProblem.ipynb | ###Markdown
Basic Demo for D-Wave on Braket: The Minimum Vertex Cover Problem In this tutorial we discuss both the ```BraketSampler``` and ```BraketDWaveSampler```. In essence, they are both doing the same thing; each one just accepts different parameter names. Specifically, the ```BraketDWaveSampler``` allows users familiar with D-Wave to use D-Wave parameter names, e.g., ```answer_mode```, whereas the ```BraketSampler``` parameter names are consistent with the rest of the Braket experience. __Minimum Vertex Cover problem__: Here we consider a well-known combinatorial optimization problem on graphs called the Minimum Vertex Cover problem. Given an undirected graph with a vertex set $V$ and an edge set $E$, a vertex cover is a subset of the vertices (nodes) such that each edge in the graph is incident to at least one vertex in the subset. The Minimum Vertex Cover problem seeks to find a cover with a minimum number of vertices in the subset.
###Code
import json
from braket.aws import AwsDevice
from braket.ocean_plugin import BraketSampler, BraketDWaveSampler
import matplotlib.pyplot as plt
import networkx as nx
import dwave_networkx as dnx
from dimod.binary_quadratic_model import BinaryQuadraticModel
from dwave.system.composites import EmbeddingComposite
###Output
_____no_output_____
###Markdown
__NOTE__: Please enter your S3 bucket and key below.
###Code
# Please enter the S3 bucket you created during onboarding in the code below
my_bucket = f"amazon-braket-Your-Bucket-Name" # the name of the bucket
my_prefix = "Your-Folder-Name" # the name of the folder in the bucket
s3_folder = (my_bucket, my_prefix)
# session and device
# use the DW_2000Q device
device = AwsDevice("arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6")
print('Device:', device)
# or use the Advantage_system1 device with over 5000 qubits to solve larger problems
device = AwsDevice("arn:aws:braket:::device/qpu/d-wave/Advantage_system1")
print('Device:', device)
###Output
Device: Device('name': DW_2000Q_6, 'arn': arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6)
Device: Device('name': Advantage_system1.1, 'arn': arn:aws:braket:::device/qpu/d-wave/Advantage_system1)
###Markdown
HELPER FUNCTIONS
###Code
# helper function to get colors for every node
def get_colors(result, n):
"""
return list of colors=[0, 0, 1, 0, 1, 1, ...] for graph
"""
# Obtain colors of each vertex
colors = [0 for _ in range(n)]
for ii in range(n):
if ii in result:
colors[ii] = 1
return colors
# helper function to plot graph
def get_graph(graph, pos):
"""
plot graph with labels
"""
# positions for all nodes
# pos = nx.spring_layout(graph)
# nodes
nx.draw_networkx_nodes(graph, pos, node_size=400)
# edges
nx.draw_networkx_edges(graph, pos)
# labels
nx.draw_networkx_labels(graph, pos, font_weight='bold', font_color='w')
# plot the graph
plt.axis('off')
#plt.savefig("figures/random_graph.png") # save as png
plt.show();
# helper function to plot graph
def get_colored_graph(graph, pos, colors):
"""
plot colored graph for given solution
"""
# positions for all nodes
# pos = nx.spring_layout(graph)
colorlist = ['#377eb8', '#e41a1c']
nx.draw_networkx(graph, pos, node_color=[colorlist[colors[int(node)]] for node in graph.nodes],
node_size=400, font_weight='bold', font_color='w')
# plot the graph
plt.axis('off');
# plt.savefig("./figures/weighted_graph.png") # save as png
###Output
_____no_output_____
###Markdown
BRAKET SAMPLER: Minimum Vertex Cover Problem A ```sampler```, as defined [here](https://docs.ocean.dwavesys.com/en/latest/glossary.htmlterm-sampler) and [here](https://docs.ocean.dwavesys.com/projects/system/en/stable/reference/samplers.html), accepts a binary quadratic model (```BQM```) and returns variable assignments. Samplers generally try to find minimizing values but can also sample from distributions defined by the ```BQM```: Samplers are processes that sample from low energy states of a problem’s objective function, which is a mathematical expression of the energy of a system. A binary quadratic model (BQM) sampler samples from low energy states in models such as those defined by an Ising equation or a QUBO problem and returns an iterable of samples, in order of increasing energy. ```BraketSampler``` is a structured sampler that uses Braket-formatted parameters and properties. For example, instead of ```answer_mode```, which is used for D-Wave QPU samplers, Braket uses ```resultFormat``` instead. Below is a sample example of solving the minimum vertex cover problem using ```BraketSampler```.
###Code
# set sampler
sampler = BraketSampler(s3_folder, 'arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6')
# EmbeddingComposite automatically maps the problem to the structure of the solver.
embedded_sampler = EmbeddingComposite(sampler)
###Output
_____no_output_____
###Markdown
Minimum Vertex Cover Problem: Star Graph
###Code
# set graph
n = 6
graph = nx.star_graph(n-1)
# positions for all nodes
pos = nx.spring_layout(graph)
# plot graph with labels
get_graph(graph, pos)
# run problem on D-Wave
result = dnx.min_vertex_cover(graph, embedded_sampler, resultFormat="HISTOGRAM")
print('Result to MVC problem:', result)
print('Size of the vertex cover:', len(result))
# get coloring
colors = get_colors(result, n)
# plot result
get_colored_graph(graph, pos, colors)
###Output
Result to MVC problem: [0]
Size of the vertex cover: 1
###Markdown
__Discussion__: For this specific star graph we just need one single vertex, the one at the center, to cover the entire set of edges. Clearly, this solution is the minimal vertex cover. Minimum Vertex Cover Problem: Erdos Renyi graph Let us consider a more complicated graph, from the family of random Erdoes-Renyi graphs. Such a graph can be readily generated using the ```networkx``` library. As input we set the desired number of vertices and edges connecting pairs of vertices.
###Code
# setup Erdos Renyi graph
n = 10 # 10 nodes
m = 20 # 20 edges
# set graph
graph = nx.gnm_random_graph(n, m, seed=42)
# positions for all nodes
pos = nx.spring_layout(graph)
# plot graph with labels
get_graph(graph, pos)
# run problem on D-Wave
result = dnx.min_vertex_cover(graph, embedded_sampler, resultFormat="HISTOGRAM")
print('Result to MVC problem:', result)
print('Size of the vertex cover:', len(result))
# get coloring
colors = get_colors(result, n)
# plot result
get_colored_graph(graph, pos, colors)
###Output
Result to MVC problem: [1, 3, 4, 6, 8, 9]
Size of the vertex cover: 6
###Markdown
__Discussion__: By inspection, we can check that with the subset of blue-colored vertices we can reach every edge in the graph. This vertex cover is a subset of the vertices such that each edge in the graph is incident to at least one vertex in the subset. We have used the ```BraketSampler``` so far. Alternatively, we can use the ```BraketDWaveSampler```; this is just a matter of syntactic preferences. BRAKET D-WAVE SAMPLER: Minimum Vertex Cover Problem ```BraketDWaveSampler``` is a structured sampler that uses D-Wave-formatted parameters and properties. It is interchangeable with D-Wave's ```DWaveSampler```. Only the parameter inputs to the solver need to be changed to be D-Wave formatted (e.g. ```answer_mode``` instead of ```resultFormat```).Below is the same example as above of solving the minimum vertex cover problem. We now consider a larger problem with more nodes and edges, and solve the problem with the DWave advantage system.
###Code
# setup Erdos Renyi graph
n = 100 # 100 nodes
m = 400 # 400 edges
# set graph
graph = nx.gnm_random_graph(n, m, seed=42)
# positions for all nodes
pos = nx.spring_layout(graph)
# plot graph with labels
get_graph(graph, pos)
# set sampler
sampler = BraketDWaveSampler(s3_folder,'arn:aws:braket:::device/qpu/d-wave/Advantage_system1')
# EmbeddingComposite automatically maps the problem to the structure of the solver.
embedded_sampler = EmbeddingComposite(sampler)
# run problem on D-Wave: note replacement of 'resultFormat' with 'answer_mode'
result = dnx.min_vertex_cover(graph, embedded_sampler, answer_mode="histogram")
print('Result to MVC problem:', result)
print('Size of the vertex cover:', len(result))
# get coloring
colors = get_colors(result, n)
# plot result
get_colored_graph(graph, pos, colors)
###Output
Result to MVC problem: [1, 3, 5, 6, 8, 9, 10, 11, 14, 15, 19, 20, 24, 26, 27, 28, 29, 30, 31, 33, 35, 36, 39, 40, 42, 44, 45, 48, 49, 50, 51, 52, 54, 55, 58, 59, 60, 62, 64, 65, 66, 68, 69, 70, 71, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 88, 89, 92, 93, 94, 95, 96, 97, 98, 99]
Size of the vertex cover: 67
###Markdown
--- APPENDIX
###Code
# set sampler
sampler = BraketSampler(s3_folder,'arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6')
# EmbeddingComposite automatically maps the problem to the structure of the solver.
embedded_sampler = EmbeddingComposite(sampler)
# setup Erdos Renyi graph
n = 15 # 10 nodes
m = 30 # 20 edges
# set graph
graph = nx.gnm_random_graph(n, m, seed=42)
# positions for all nodes
pos = nx.spring_layout(graph)
# plot graph with labels
get_graph(graph, pos)
# The below result should be 0 because node 0 is connected to the 4 other nodes in a star graph
result = dnx.min_vertex_cover(graph, embedded_sampler, resultFormat="HISTOGRAM")
print('Result to MVC problem:', result)
print('Size of the vertex cover:', len(result))
# Obtain colors of each vertex
colors = [0 for _ in range(n)]
for ii in range(n):
if ii in result:
colors[ii] = 1
print(colors)
# plot result
get_colored_graph(graph, pos, colors)
print(graph.edges)
colorlist = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf']
nx.draw_networkx(graph, pos, node_color=[colorlist[colors[int(node)]] for node in graph.nodes],
node_size=400, font_weight='bold', font_color='w')
# plot the graph
plt.axis('off');
# plt.savefig("./figures/weighted_graph.png") # save as png
# plt.show();
###Output
_____no_output_____
###Markdown
APPENDIX: Packages Display our environmnet used for this demo.
###Code
! pip freeze
###Output
# Editable install with no version control (amazon-braket-default-simulator==0.4.0)
-e /Users/xxiasos/braketvirtenv/amazon-braket-default-simulator-python/src
# Editable install with no version control (amazon-braket-ocean-plugin==0.2.0)
-e /Users/xxiasos/braketvirtenv/amazon-braket-ocean-plugin-python/src
# Editable install with no version control (amazon-braket-schemas==0.3.0)
-e /Users/xxiasos/braketvirtenv/amazon-braket-schemas-python/src
# Editable install with no version control (amazon-braket-sdk==0.6.0)
-e /Users/xxiasos/braketvirtenv/amazon-braket-sdk-python/src
appdirs==1.4.3
appnope==0.1.0
argon2-cffi==20.1.0
attrs==19.3.0
autograd==1.3
awscli==1.18.112
backcall==0.1.0
backoff==1.10.0
bleach==3.1.3
boltons==20.0.0
boto3==1.12.26
botocore==1.15.49
certifi==2019.11.28
cffi==1.14.1
chardet==3.0.4
click==7.1.1
colorama==0.4.3
cycler==0.10.0
decorator==4.4.2
defusedxml==0.6.0
dimod==0.9.0
distlib==0.3.0
docutils==0.15.2
dwave-cloud-client==0.7.0
dwave-hybrid==0.4.2
dwave-inspector==0.1.3
dwave-neal==0.5.3
dwave-networkx==0.8.4
dwave-ocean-sdk==2.0.1
dwave-qbsolv==0.3.0
dwave-system==0.9.1
dwave-tabu==0.2.2
dwavebinarycsp==0.1.1
entrypoints==0.3
filelock==3.0.12
Flask==1.1.1
future==0.18.2
h5py==2.10.0
homebase==1.0.1
idna==2.9
importlib-metadata==1.5.0
ipykernel==5.1.4
ipython==7.16.1
ipython-genutils==0.2.0
ipywidgets==7.5.1
itsdangerous==1.1.0
jedi==0.16.0
Jinja2==2.11.1
jmespath==0.9.5
json5==0.9.4
jsonschema==3.2.0
jupyter==1.0.0
jupyter-client==6.1.0
jupyter-console==6.1.0
jupyter-contrib-core==0.3.3
jupyter-contrib-nbextensions==0.5.1
jupyter-core==4.6.3
jupyter-highlight-selected-word==0.2.0
jupyter-latex-envs==1.4.6
jupyter-nbextensions-configurator==0.4.1
jupyterlab==2.1.2
jupyterlab-server==1.1.1
kiwisolver==1.1.0
lxml==4.5.2
MarkupSafe==1.1.1
matplotlib==3.2.2
minorminer==0.1.9
mistune==0.8.4
more-itertools==8.2.0
mpmath==1.1.0
nbconvert==5.6.1
nbformat==5.0.4
nest-asyncio==1.3.0
networkx==2.4
nose==1.3.7
notebook==6.0.3
numpy==1.19.0
openfermion==0.11.0
openfermionpsi4==0.4
openfermionpyscf==0.4
opt-einsum==3.2.1
ortools==7.5.7466
packaging==20.3
pandas==1.0.5
pandocfilters==1.4.2
parso==0.6.2
penaltymodel==0.16.3
penaltymodel-cache==0.4.1
penaltymodel-lp==0.1.3
penaltymodel-mip==0.2.2
PennyLane==0.10.0
PennyLane-Qchem==0.10.0
pexpect==4.8.0
pickleshare==0.7.5
Pillow==7.2.0
plucky==0.4.3
pluggy==0.13.1
prometheus-client==0.7.1
prompt-toolkit==3.0.4
protobuf==3.11.3
ptyprocess==0.6.0
PubChemPy==1.0.4
py==1.8.1
pyasn1==0.4.8
pycparser==2.20
pydantic==1.6.1
Pygments==2.6.1
pyparsing==2.4.6
pyqubo==0.4.0
pyrsistent==0.15.7
pyscf==1.7.1
PySocks==1.7.1
pytest==5.4.1
python-dateutil==2.8.1
pytz==2020.1
PyYAML==5.3.1
pyzmq==19.0.0
qtconsole==4.7.1
QtPy==1.9.0
requests==2.23.0
rsa==3.4.2
s3transfer==0.3.3
scipy==1.5.1
semantic-version==2.6.0
Send2Trash==1.5.0
six==1.14.0
sympy==1.6.1
terminado==0.8.3
testpath==0.4.4
toml==0.10.0
tornado==6.0.4
tox==3.15.0
traitlets==4.3.3
urllib3==1.25.8
virtualenv==20.0.20
wcwidth==0.1.8
webencodings==0.5.1
Werkzeug==1.0.0
widgetsnbextension==3.5.1
zipp==3.1.0
|
assignments/KoJaeMin/03.ipynb | ###Markdown
Assignment 1행복 지수를 1점 간격으로 두고 다음과 같은 **Histogram**을 작성 해 주세요.행복 지수의 키 값은 "Ladder score" 입니다.
###Code
plt.hist(df['Ladder score'],range=(2,9),bins=7)
#plt.hist(df['Ladder score'],range(2,10))
plt.title('Happiness score')
plt.xlabel('score')
plt.ylabel('count')
###Output
_____no_output_____
###Markdown
Assignment 2행복 지수를 y축으로, GDP를 x축으로 하여, scatter 한 값을 한 번 입력 해 보세요.
###Code
plt.scatter(df['Logged GDP per capita'],df['Ladder score'])
plt.title('Happiness Score per GDP')
plt.xlabel('GDP')
plt.ylabel('score')
###Output
_____no_output_____
###Markdown
Assignment 3행복지수 상위 10개, 하위 10개를 선택 후, 각각의 Social support, Logged GDP per capita, Freedom to make life choices 의 평균을 구해서 꺾은 막대 그래프로 나타내세요.
###Code
sorted_lst = df.sort_values(by='Ladder score',ascending=False).loc[:,['Social support','Logged GDP per capita','Freedom to make life choices']]
labels = ['Social','GDP Attribute','Freedom']
hg = []
up = []
width = 0.35
x = np.arange(len(labels))
for i in range(len(sorted_lst.head(10).mean())):
hg.append(sorted_lst.head(10).mean()[i])
up.append(sorted_lst.tail(10).mean()[i])
fig, ax = plt.subplots()
rects1 = ax.bar(x-width/2, hg, width, label='Happy')
rects2 = ax.bar(x+width/2, up, width, label='Unhappy')
ax.set_title('Happy Country and Unhappy Country')
ax.set_ylabel('Score')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
ax.bar_label(rects1)
ax.bar_label(rects2)
fig.tight_layout()
###Output
_____no_output_____ |
docs/source/example_notebooks/sensitivity_analysis_testing.ipynb | ###Markdown
Sensitivity Analysis for Linear Regression ModelsSensitivity analysis helps us examine how sensitive a result is against the possibility of unobserved confounding. The current method only supports linear regression estimator. The partial R^2 of treatment with outcome shows how strongly confounders explaining all the residual outcome variation would have to be associated with the treatment to eliminate the estimated effect.The robustness value measures the minimum strength of association unobserved confounding should have with both treatment and outcome in order to change the conclusions.Robustness value close to 1 means the treatment effect can handle strong confounders explaining almost all residual variation of the treatment and the outcome.Robustness value close to 0 means that even very weak confounders can also change the results.Benchmarking examines the sensitivity of causal inferences to plausible strengths of the omitted confounders.This method is based on https://carloscinelli.com/files/Cinelli%20and%20Hazlett%20(2020)%20-%20Making%20Sense%20of%20Sensitivity.pdf Step 1: Load required packages
###Code
import os, sys
sys.path.append(os.path.abspath("../../../"))
import dowhy
from dowhy import CausalModel
import pandas as pd
import numpy as np
import dowhy.datasets
# Config dict to set the logging level
import logging.config
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'': {
'level': 'ERROR',
},
}
}
logging.config.dictConfig(DEFAULT_LOGGING)
# Disabling warnings output
import warnings
from sklearn.exceptions import DataConversionWarning
#warnings.filterwarnings(action='ignore', category=DataConversionWarning)
###Output
_____no_output_____
###Markdown
Step 2: Load the dataset We create a dataset with linear relationships between common causes and treatment, and common causes and outcome. Beta is the true causal effect.
###Code
np.random.seed(100)
data = dowhy.datasets.linear_dataset( beta = 10,
num_common_causes = 7,
num_samples = 500,
num_treatments = 1,
stddev_treatment_noise =10,
stddev_outcome_noise = 5
)
model = CausalModel(
data=data["df"],
treatment=data["treatment_name"],
outcome=data["outcome_name"],
graph=data["gml_graph"],
test_significance=None,
)
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
data['df'].head()
###Output
_____no_output_____
###Markdown
Step 3: Create Causal ModelRemove one of the common causes to simulate unobserved confounding
###Code
data["df"] = data["df"].drop("W4", axis = 1)
graph_str = 'graph[directed 1node[ id "y" label "y"]node[ id "W0" label "W0"] node[ id "W1" label "W1"] node[ id "W2" label "W2"] node[ id "W3" label "W3"] node[ id "W5" label "W5"] node[ id "W6" label "W6"]node[ id "v0" label "v0"]edge[source "v0" target "y"]edge[ source "W0" target "v0"] edge[ source "W1" target "v0"] edge[ source "W2" target "v0"] edge[ source "W3" target "v0"] edge[ source "W5" target "v0"] edge[ source "W6" target "v0"]edge[ source "W0" target "y"] edge[ source "W1" target "y"] edge[ source "W2" target "y"] edge[ source "W3" target "y"] edge[ source "W5" target "y"] edge[ source "W6" target "y"]]'
model = CausalModel(
data=data["df"],
treatment=data["treatment_name"],
outcome=data["outcome_name"],
graph=graph_str,
test_significance=None,
)
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
data['df'].head()
###Output
_____no_output_____
###Markdown
Step 4: Identification
###Code
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
###Output
_____no_output_____
###Markdown
Step 5: EstimationCurrently only Linear Regression estimator is supported for Linear Sensitivity Analysis
###Code
estimate = model.estimate_effect(identified_estimand,method_name="backdoor.linear_regression")
print(estimate)
###Output
_____no_output_____
###Markdown
Step 6: Refutation and Sensitivity Analysisidentified_estimand: An instance of the identifiedEstimand class that provides the information with respect to which causal pathways are employed when the treatment effects the outcomeestimate: An instance of CausalEstimate class. The estimate obtained from the estimator for the original data.method_name: Refutation method name simulated_method_name: "linear-partial-R2" for Linear Sensitivity Analysisbenchmark_common_causes: Name of the covariates used to bound the strengths of unobserved confounderpercent_change_estimate: It is the percentage of reduction of treatment estimate that could alter the results (default = 1) if percent_change_estimate = 1, the robustness value describes the strength of association of confounders with treatment and outcome in order to reduce the estimate by 100% i.e bring it down to 0. confounder_increases_estimate: confounder_increases_estimate = True implies that confounder increases the absolute value of estimate and vice versa. Default is confounder_increases_estimate = False i.e. the considered confounders pull estimate towards zeroeffect_fraction_on_treatment: Strength of association between unobserved confounder and treatment compared to benchmark covariateeffect_fraction_on_outcome: Strength of association between unobserved confounder and outcome compared to benchmark covariatenull_hypothesis_effect: assumed effect under the null hypothesis (default = 0) plot_estimate: Generate contour plot for estimate while performing sensitivity analysis. (default = True). To override the setting, set plot_estimate = False.
###Code
refute = model.refute_estimate(identified_estimand, estimate ,
method_name = "add_unobserved_common_cause",
simulated_method_name = "linear-partial-R2",
benchmark_common_causes = ["W3"],
effect_fraction_on_treatment = [ 1,2,3]
)
###Output
_____no_output_____
###Markdown
The x axis shows hypothetical partial R2 values of unobserved confounder(s) with the treatment. The y axis shows hypothetical partial R2 of unobserved confounder(s) with the outcome.The contour levels represent adjusted t-values or estimates for unobserved confounders with hypothetical partialR2 values when these would be included in full regression model. The red line is the critical threshold: confounders with such strength or stronger are sufficient to invalidate the research conclusions.
###Code
refute.stats
refute.benchmarking_results
###Output
_____no_output_____
###Markdown
Parameter List for plot functionplot_type: "estimate" or "t-value" critical_value: special reference value of the estimate or t-value that will be highlighted in the plotx_limit: plot's maximum x_axis value (default = 0.8) y_limit: plot's minimum y_axis value (default = 0.8) num_points_per_contour: number of points to calculate and plot each contour line (default = 200) plot_size: tuple denoting the size of the plot (default = (7,7))contours_color: color of contour line (default = blue) String or array. If array, lines will be plotted with the specific color in ascending order. critical_contour_color: color of threshold line (default = red) label_fontsize: fontsize for labelling contours (default = 9) contour_linewidths: linewidths for contours (default = 0.75) contour_linestyles: linestyles for contours (default = "solid") See : https://matplotlib.org/3.5.0/gallery/lines_bars_and_markers/linestyles.html contours_label_color: color of contour line label (default = black) critical_label_color: color of threshold line label (default = red) unadjusted_estimate_marker: marker type for unadjusted estimate in the plot (default = 'D') See: https://matplotlib.org/stable/api/markers_api.html unadjusted_estimate_color: marker color for unadjusted estimate in the plot (default = "black") adjusted_estimate_marker: marker type for bias adjusted estimates in the plot (default = '^')adjusted_estimate_color: marker color for bias adjusted estimates in the plot (default = "red") legend_position:tuple denoting the position of the legend (default = (1.6, 0.6))
###Code
refute.plot(plot_type = 't-value')
###Output
_____no_output_____
###Markdown
The t statistic is the coefficient divided by its standard error. The higher the t-value, the greater the evidence to reject the null hypothesis. According to the above plot,at 5% significance level, the null hypothesis of zero effect would be rejected given the above confounders.
###Code
print(refute)
###Output
_____no_output_____
###Markdown
Sensitivity Analysis for dataset with no confoundersWe now run the sensitivity analysis for the same dataset but without dropping any variable. We get a robustness value goes from 0.55 to 0.95 which means that treatment effect can handle strong confounders explaining almost all residual variation of the treatment and the outcome.
###Code
np.random.seed(100)
data = dowhy.datasets.linear_dataset( beta = 10,
num_common_causes = 7,
num_samples = 500,
num_treatments = 1,
stddev_treatment_noise=10,
stddev_outcome_noise = 1
)
model = CausalModel(
data=data["df"],
treatment=data["treatment_name"],
outcome=data["outcome_name"],
graph=data["gml_graph"],
test_significance=None,
)
model.view_model()
from IPython.display import Image, display
display(Image(filename="causal_model.png"))
data['df'].head()
identified_estimand = model.identify_effect(proceed_when_unidentifiable=True)
print(identified_estimand)
estimate = model.estimate_effect(identified_estimand,method_name="backdoor.linear_regression")
print(estimate)
refute = model.refute_estimate(identified_estimand, estimate ,
method_name = "add_unobserved_common_cause",
simulated_method_name = "linear-partial-R2",
benchmark_common_causes = ["W3"],
effect_fraction_on_treatment = [ 1,2,3])
refute.plot(plot_type = 't-value')
print(refute)
refute.stats
refute.benchmarking_results
###Output
_____no_output_____ |
examples/Chachifuncs_Examples.ipynb | ###Markdown
This notebook demonstrates how to use Chachifuncs.py - which is used to clean raw battery data from the CALCE website Calce website: https://www.calce.umd.edu/batteries/data.htm First we import everything we need:
###Code
import chachifuncs as ccf
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Then we apply the get_all_data function, which takes raw data and cleans it into forms other components of this package can use. The function 'get_all_data' will take all the raw data from the specified folder within the root directory, separate out cycles and put them in a folder called 'Separated_Cycles', take those separated cycles and calculates dq/dv while simultaneously cleaning the data and saves those clean files in 'Clean_Separated_Cycles'. Three print messages should appear while this function is running: 1. All data separated into cycles and saved in folder (specified folder). 2. All cycles cleaned and saved in folder (specified folder). 3. All clean cycles recombined and saved in folder (specifed folder).
###Code
ccf.get_all_data('data/example_files/', 'Raw_Data_Examples')
###Output
All data separated into cycles and saved in folder "data/example_files/Separated_Cycles/".
All cycles cleaned and saved in folder "data/example_files/Clean_Separated_Cycles/" .
All clean cycles recombined and saved in folder "data/example_files/Clean_Whole_Sets/" .
###Markdown
Now we can look at the data: If we want to look at the raw data's dq/dv plots, we can apply the calc_dq_dqdv function from ccf, as shown below with one example file of a CS2-type battery (A LiCoO2 cell).
###Code
raw_data_df = pd.read_excel('data/example_files/Separated_Cycles/CS2_33_10_04_10-Cycle12.xlsx')
#we load the file using pd.read_excel. The second argument, 1, indicates the sheet number to use.
###Output
_____no_output_____
###Markdown
Looking at the data below using raw_data_df.head(), we can see the columns do not contain any dq/dv data, so we need to calculate it.
###Code
raw_data_df.head(2)
###Output
_____no_output_____
###Markdown
Now we can calculate the dq/dv by applying the function calc_dv_dqdv to the dataframe. This function calculates the dV of row $i$ by $Voltage_i$ - $Voltage_{i-1}$. The same method is applied to calculate dQ, using the charge capacity for the charging part of the cycle, and the discharge capacity for the discharging part of the cycle. Then dQ is divided by dV, yielding dQ/dV.
###Code
raw_data_dqdv = ccf.calc_dv_dqdv(raw_data_df)
###Output
_____no_output_____
###Markdown
Now we have dq/dv values in the dataframe. We can examine the data below:
###Code
raw_data_dqdv.tail(3)
#We use tail instead of head because the first few dq/dv's are NaN's or 0's. You can look at the head too if you'd like.
###Output
_____no_output_____
###Markdown
Now let's plot the raw dQ/dV data.
###Code
fig1 = plt.figure(figsize = (8,8), facecolor = 'w', edgecolor= 'k')
myplot = plt.plot(raw_data_dqdv['Voltage(V)'], raw_data_dqdv['Charge_dQ/dV'])
plt.plot(raw_data_dqdv['Voltage(V)'], raw_data_dqdv['Discharge_dQ/dV'], color = 'red')
plt.ylim(-20, 20)
plt.ylabel('dQ/dV (C/V)')
plt.xlabel('Voltage(V)')
###Output
_____no_output_____
###Markdown
Now lets look at the clean data:
###Code
clean_cycle_df = pd.read_excel('data/example_files/Clean_Separated_Cycles/CS2_33_10_04_10-Cycle12Clean.xlsx')
#We just picked a random cycle.
fig1 = plt.figure(figsize = (8,8), facecolor = 'w', edgecolor= 'k')
myplot = plt.plot(clean_cycle_df['Voltage(V)'], clean_cycle_df['dQ/dV'])
plt.plot(clean_cycle_df['Voltage(V)'], clean_cycle_df['Smoothed_dQ/dV'], c = 'red')
plt.ylim(-10, 10)
plt.ylabel('dQ/dV (C/V)')
plt.xlabel('Voltage(V)')
###Output
_____no_output_____
###Markdown
The recombined clean cycles, saved in 'Clean_Whole_Sets' are used in the DASH app. If we wanted to separated the charge cycles from the discharge cycles, we would use the sep_char_dis function.
###Code
whole_set_df = pd.read_excel('data/example_files/Clean_Whole_Sets/CS2_33_10_04_10CleanSet.xlsx')
charge, discharge = ccf.sep_char_dis(whole_set_df)
###Output
_____no_output_____
###Markdown
Now we have two dataframes, charge and discharge, that we can plot separately.
###Code
cm = plt.get_cmap('hsv')
fig1 = plt.figure(figsize = (8,8), facecolor = 'w', edgecolor= 'k')
plt.plot(charge['Voltage(V)'], charge['dQ/dV'])
plt.plot(charge['Voltage(V)'], charge['Smoothed_dQ/dV'] )
plt.ylim(-1, 10)
plt.ylabel('dQ/dV (C/V)')
plt.xlabel('Voltage(V)')
cm = plt.get_cmap('hsv')
fig1 = plt.figure(figsize = (8,8), facecolor = 'w', edgecolor= 'k')
plt.plot(discharge['Voltage(V)'], discharge['dQ/dV'])
plt.plot(discharge['Voltage(V)'], discharge['Smoothed_dQ/dV'] )
plt.ylim(-10, 1)
plt.ylabel('dQ/dV (C/V)')
plt.xlabel('Voltage(V)')
###Output
_____no_output_____ |
docs/tutorial.ipynb | ###Markdown
Getting started with KinesisIn this notebook, we will generate a mock cluster, observe (add noise) and fit to see if we recover the true parameters of its internal kinematics.Notes on dependences- [gapipes](https://github.com/smoh/gapipes): for custom pandas accessor `g` to get covariance matrices, astropy coordinate objects..- [arviz](https://github.com/arviz-devs/arviz): for visualization
###Code
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import colors
import pandas as pd
import numpy as np
import astropy.units as u
import astropy.coordinates as coords
import arviz as az
# project dependencies
import gapipes as gp
import kinesis as kn
np.random.seed(18324)
###Output
_____no_output_____
###Markdown
Making and observing a mock clusterFirst, let's make a cluster with some fiducial parameters.
###Code
N = 150 # number of sources
b0 = np.array([17.7, 41.2, 13.3]) # pc
v0 = np.array([-6.32, 45.24, 5.30]) # [vx, vy, vz] in km/s
sigv = 1. # dispersion, km/s
cl = kn.Cluster(v0, sigv, b0=b0)
###Output
_____no_output_____
###Markdown
The cluster currently has no members. We need to 'sample' the cluster in order to populate it. There are two methods for sampling a cluster:- sample_sphere: sampling uniformly within a maximum radius- sample_at: sample at exactly the positions you want by giving it astropy coordinates.Let's do uniform sphere for now. All normal methods of `Cluster` will return itself with modified attributes for method chaining.Sampling will modify its `members` from `None` to an instance of `ClusterMembers`.
###Code
cl.sample_sphere(N=N, Rmax=5)
cl.members
###Output
_____no_output_____
###Markdown
There are two Gaia-like DataFrame associated with `ClusterMembers`:- `truths`: this is the noise-free Gaia data consistent with the `Cluster`'s internal motions- `observed`: this is the noise-added Gaia data. If the `ClusterMembers` has not been `observe`d, it will be `None`.Now let's `observe` the cluster. In order to observe, you need to specify the 3x3 covariance matrix for (parallax, pmra, pmdec) with `cov` keyword. If `cov` is (3,3) array, this will be assumed for all stars. You can also specify different covariance matrix for different stars by specifying `cov` to be (N, 3, 3) array. The noise model is assumed to be Gaussian.It is also possible to specify covariance by giving another Gaia-like data frame using `error_from` keyword. The following columns needed to construct to covariance matrix are expected: `parallax_error`, `pmra_error`, `pmdec_error`, `parallax_pmra_corr`, `parallax_pmdec_corr`, `pmra_pmdec_corr`. This is useful, for example, if you want to simulate data as noisy as the actual Gaia data you want to model.Radial velocity errors should be specified separately with `rv_error` keyword, which should be 1-d array with `N` elements, each specifying the Gaussian standard deviation in km/s. If any element in `rv_error` is `np.nan`, it is assumed that the star does not have RV measurement.This will add noise to the astrometry and radial velocities of the cluster members, and the noise-added Gaia-like data frame is now accessible with `cl.members.observed`. Let's specify simply 0.01 mas or mas/yr errors for parallax and pmra/pmdec and no available RVs.
###Code
cl.observe(cov=np.eye(3)*0.01)
cl.members.observed.head()
###Output
_____no_output_____
###Markdown
Fitting the mock dataNow let's try to fit the mock data that we generated.Under the hood, kinesis uses [stan](https://mc-stan.org/) using its python interface [pystan](https://pystan.readthedocs.io/en/latest/).
###Code
fitter = kn.Fitter(include_T=False, recompile=True)
df = cl.members.observed.copy()
fit = fitter.fit(df, sample=False)
print(f"v0, sigv = {fit['v0']}, {fit['sigv']:.4f}")
print(f"diff from truth: {fit['v0']-v0}, {fit['sigv']-sigv:.4f}")
stanfit = fitter.fit(df)
stanfit.azfit
azfit=stanfit.azfit
az.summary(azfit, var_names=['v0','sigv'])
az.plot_forest(azfit, kind='ridgeplot', var_names=['sigv']);
az.plot_pair(azfit, var_names=['v0','sigv']);
v0_diff = stanfit['v0'] - cl.v0[None,:]
plt.figure(figsize=(4,3))
plt.hist(v0_diff, bins=64, histtype='step', density=True, label=['x','y','z']);
plt.xlabel("diff from truth [km/s]");
plt.legend(loc='upper left');
plt.axvline(0, c='gray', lw=.5);
az.plot_posterior(azfit, var_names=['sigv'], ref_val=cl.sigmav, figsize=(4,3));
# plt.figure(figsize=(4,3))
# g = np.einsum('ni,nij,nj->n', a-r['a_model'], np.linalg.inv(C), a-r['a_model'])
# plt.hist(g, 32, density=True);
# plt.hist(g, np.logspace(0, 4, 32), density=True);
# plt.xscale('log');
# plt.yscale('log')
###Output
_____no_output_____
###Markdown
Basic cluster model with partial RVs
###Code
N = 150
b0 = np.array([17.7, 41.2, 13.3]) # pc
v0 = np.array([-6.32, 45.24, 5.30])
sigv = 1.
cl = kn.Cluster(v0, sigv, b0=b0)\
.sample_sphere(N=N, Rmax=5)\
.observe(cov=np.eye(3)*0.01)
# Give random half of the stars RV with 0.5 km/s uncertainty
Nrv = int(N*0.5)
rv_error = 0.5
irand = np.random.choice(np.arange(N), size=Nrv, replace=False)
df = cl.members.observed.copy()
df['radial_velocity'] = np.random.normal(cl.members.truth['radial_velocity'].values, rv_error)
df['radial_velocity_error'] = rv_error
df = cl.members.observed.copy()
fit = fitter.fit(df, sample=False)
print(f"v0, sigv = {fit['v0']}, {fit['sigv']:.4f}")
print(f"diff from truth: {fit['v0']-v0}, {fit['sigv']-sigv:.4f}")
stanfit_partialrv = fitter.fit(df)
# workaround for a bug in arviz instantiating InferenceData
d = stanfit_partialrv.extract(permuted=False, pars=['v0', 'sigv'])
d = {k:np.swapaxes(v, 0, 1) for k,v in d.items()}
azfit_partialrv = az.from_dict(d)
v0_diff0 = stanfit['v0'] - cl.v0[None,:]
v0_diff = stanfit_partialrv['v0'] - cl.v0[None,:]
plt.figure(figsize=(4,3))
color = ['tab:blue', 'tab:green', 'tab:orange']
plt.hist(v0_diff0, bins=64, histtype='step', density=True, color=color, lw=.5);
plt.hist(v0_diff, bins=64, histtype='step', density=True, color=color, label=['x','y','z'], lw=2);
plt.xlabel("diff from truth [km/s]");
plt.legend(loc='upper left');
plt.axvline(0, c='gray', lw=.5);
axs = az.plot_posterior(azfit, var_names=['sigv'], ref_val=cl.sigmav, figsize=(4,3))
az.plot_posterior(azfit_partialrv, var_names=['sigv'], ax=axs);
###Output
_____no_output_____
###Markdown
Cluster with rotation
###Code
N = 150
b0 = np.array([17.7, 41.2, 13.3]) # pc
v0 = np.array([-6.32, 45.24, 5.30])
sigv = 1.
omegas = [40., 20., 50.]
cl = kn.Cluster(v0, sigv, b0=b0, omegas=omegas)\
.sample_sphere(N=N, Rmax=15)\
.observe(cov=np.eye(3)*0.001)
cl0 = kn.Cluster(v0, sigv, b0=b0).sample_at(cl.members.truth.g.icrs)
# Give random half of the stars RV with 0.5 km/s uncertainty
Nrv = int(N*0.5)
rv_error = 0.5
irand = np.random.choice(np.arange(N), size=Nrv, replace=False)
df = cl.members.observed.copy()
df['radial_velocity'] = np.random.normal(cl.members.truth['radial_velocity'].values, rv_error)
df['radial_velocity_error'] = rv_error
m = kn.Fitter(include_T=True)
fit = m.fit(df, sample=False, b0=b0)
print(f"v0, sigv = {fit['v0']}, {fit['sigv']:.4f}")
print(f"diff from truth: {fit['v0']-v0}, {fit['sigv']-sigv:.4f}")
print(f"{fit['T_param']}")
print(f"{cl.T}")
# omegax = 0.5*(r['T_param'][2, 1] - r['T_param'][1, 2])
# omegay = 0.5*(r['T_param'][0, 2] - r['T_param'][2, 0])
# omegaz = 0.5*(r['T_param'][1, 0] - r['T_param'][0, 1])
# w1 = 0.5*(r['T_param'][2, 1] + r['T_param'][1, 2])
# w2 = 0.5*(r['T_param'][0, 2] + r['T_param'][2, 0])
# w3 = 0.5*(r['T_param'][1, 0] + r['T_param'][0, 1])
# w4 = r['T_param'][0, 0]
# w5 = r['T_param'][1, 1]
# kappa = w4 + w5 + r['T_param'][2, 2]
# print(omegax, omegay, omegaz)
# print(w1, w2, w3)
# print(w4, w5)
# print(kappa)
stanfit = m.fit(df, b0=b0)
azfit = az.from_pystan(stanfit)
v0_diff = stanfit['v0'] - cl.v0[None,:]
plt.figure(figsize=(4,3))
color = ['tab:blue', 'tab:green', 'tab:orange']
plt.hist(v0_diff, bins=64, histtype='step', density=True, color=color, label=['x','y','z'], lw=2);
plt.xlabel("diff from truth [km/s]");
plt.legend(loc='upper left');
plt.axvline(0, c='gray', lw=.5);
fig, ax = plt.subplots(3, 3, figsize=(8, 8), sharex=True, sharey=True)
fig.subplots_adjust(bottom=0.1, top=0.95, right=0.95, left=0.15)
ax = ax.ravel()
for cax, cT, truth in zip(ax, stanfit["T_param"].reshape((-1, 9)).T, cl.T.ravel()):
cax.hist(cT, bins=32, density=True, histtype="step")
cax.axvline(truth, c="k")
cax.axvline(0, c="gray", lw=0.5)
fig.text(0.55, 0.05, "m/s/pc", ha="center", va="center", size=20)
fig.text(0.05, 0.55, "Density", ha="center", va="center", rotation=90, size=20)
fig.savefig("mock_posterior_T.png")
###Output
_____no_output_____
###Markdown
Cluster with rotation - ideal caseTest the most ideal case when the cluster has small dispersion and all velocities are measured to high precision.
###Code
N = 150
b0 = np.array([17.7, 41.2, 13.3]) # pc
v0 = np.array([-6.32, 45.24, 5.30])
sigv = 0.1 # small dispersion
omegas = [40., 20., 50.]
cl = kn.Cluster(v0, sigv, b0=b0, omegas=omegas)\
.sample_sphere(N=N, Rmax=15)\
.observe(cov=np.eye(3)*0.001)
cl0 = kn.Cluster(v0, sigv, b0=b0).sample_at(cl.members.truth.g.icrs)
# Give all stars observed RV with small uncertainty
Nrv = int(N)
rv_error = 0.1
irand = np.random.choice(np.arange(N), size=Nrv, replace=False)
df = cl.members.observed.copy()
df['radial_velocity'] = np.random.normal(cl.members.truth['radial_velocity'].values, rv_error)
df['radial_velocity_error'] = rv_error
m = kn.Fitter(include_T=True)
fit = m.fit(df, sample=False, b0=b0)
print(f"v0, sigv = {fit['v0']}, {fit['sigv']:.4f}")
print(f"diff from truth: {fit['v0']-v0}, {fit['sigv']-sigv:.4f}")
print(f"{fit['T_param']}")
print(f"{cl.T}")
stanfit = m.fit(df, b0=b0)
azfit = az.from_pystan(stanfit, coords={'x':['v1','v2','v3']}, dims={'v0':['x']})
azfit.posterior
azfit.sample_stats
v0_diff = stanfit['v0'] - cl.v0[None,:]
plt.figure(figsize=(4,3))
color = ['tab:blue', 'tab:green', 'tab:orange']
plt.hist(v0_diff, bins=64, histtype='step', density=True, color=color, label=['x','y','z'], lw=2);
plt.xlabel("diff from truth [km/s]");
plt.legend(loc='upper left');
plt.axvline(0, c='gray', lw=.5);
az.plot_posterior(azfit, var_names=['sigv']);
az.plot_posterior(azfit, var_names=['T_param'], coords={'T_param_dim_0':[0], 'T_param_dim_1':[0]});
fig, ax = plt.subplots(3, 3, figsize=(8, 8), sharex=True, sharey=True)
fig.subplots_adjust(bottom=0.1, top=0.95, right=0.95, left=0.15)
ax = ax.ravel()
for cax, cT, truth in zip(ax, stanfit["T_param"].reshape((-1, 9)).T, cl.T.ravel()):
cax.hist(cT, bins=32, density=True, histtype="step")
cax.axvline(truth, c="k")
cax.axvline(0, c="gray", lw=0.5)
fig.text(0.55, 0.05, "m/s/pc", ha="center", va="center", size=20)
fig.text(0.05, 0.55, "Density", ha="center", va="center", rotation=90, size=20)
fig.savefig("mock_posterior_T.png")
az.summary(azfit, var_names=['v0', 'sigv', 'T_param'])
###Output
_____no_output_____
###Markdown
Quick start `klifs_utils` - work with KLIFS data
###Code
%load_ext autoreload
%autoreload 2
from pathlib import Path
import pandas as pd
from rdkit.Chem import Draw
from rdkit.Chem.Draw import IPythonConsole
import klifs_utils as klifs
###Output
_____no_output_____
###Markdown
Remote data Kinases
###Code
klifs.remote.kinases.kinase_groups()
klifs.remote.kinases.kinase_families(kinase_group='CMGC')
# Or get all kinase families
klifs.remote.kinases.kinase_families()[:2]
len(klifs.remote.kinases.kinase_families())
print(len(klifs.remote.kinases.kinase_names()))
klifs.remote.kinases.kinase_names().head()
klifs.remote.kinases.kinases_from_kinase_names(kinase_names='Aak1')
klifs.remote.kinases.kinases_from_kinase_names(kinase_names=['Aak1', 'EGFR'])
klifs.remote.kinases.kinases_from_kinase_ids(kinase_ids=533)
klifs.remote.kinases.kinases_from_kinase_ids(kinase_ids=[533, 531])
###Output
_____no_output_____
###Markdown
Ligands
###Code
klifs.remote.ligands.ligands_from_kinase_ids(kinase_ids=[33, 40])
klifs.remote.ligands.structures_from_ligand_ids(ligand_ids=[101, 920]).columns
###Output
_____no_output_____
###Markdown
Interactions Interaction types
###Code
klifs.remote.interactions.interaction_types()
###Output
_____no_output_____
###Markdown
Interaction fingerprints by structure IDs
###Code
klifs.remote.interactions.interaction_fingerprint_from_structure_ids(structure_ids=[33, 34, 35])
###Output
_____no_output_____
###Markdown
KLIFS pocket numbering
###Code
klifs.remote.interactions.klifs_pocket_numbering_from_structure_id(structure_id=33)
###Output
_____no_output_____
###Markdown
Structures ... by KLIFS structure ID(s)
###Code
klifs.remote.structures.structures_from_structure_ids(structure_ids=33).columns
klifs.remote.structures.structures_from_structure_ids(structure_ids=33)
klifs.remote.structures.structures_from_structure_ids(structure_ids=[33, 34])
###Output
_____no_output_____
###Markdown
... by KLIFS kinase ID(s)
###Code
klifs.remote.structures.structures_from_kinase_ids(kinase_ids=22)
klifs.remote.structures.structures_from_kinase_ids(kinase_ids=[22, 23])
###Output
_____no_output_____
###Markdown
... by PDB ID
###Code
klifs.remote.structures.structures_from_pdb_ids(pdb_ids='3w32')
klifs.remote.structures.structures_from_pdb_ids(pdb_ids=['3w32', '3poz'])
###Output
_____no_output_____
###Markdown
CoordinatesLoad or download structural data from mol2 files. Complex
###Code
klifs.remote.coordinates.complex.mol2_to_dataframe(33).head()
klifs.remote.coordinates.complex.pdb_to_dataframe(33).keys()
###Output
Structural data keys: dict_keys(['ATOM', 'HETATM', 'ANISOU', 'OTHERS'])
###Markdown
Protein
###Code
klifs.remote.coordinates.protein.mol2_to_dataframe(33).head()
###Output
_____no_output_____
###Markdown
Pocket
###Code
klifs.remote.coordinates.pocket.mol2_to_dataframe(33).head()
###Output
_____no_output_____
###Markdown
Ligand
###Code
klifs.remote.coordinates.ligand.mol2_to_dataframe(33).head()
klifs.remote.coordinates.ligand.mol2_to_rdkit_mol(33, compute2d=False)
###Output
_____no_output_____
###Markdown
Local data Get your personal copy of the KLIFS dataset here: https://klifs.vu-compmedchem.nl/search.php. Set the path to the `KLIFS_download` folder:
###Code
PATH_TO_KLIFS_DOWNLOAD = Path('/home/dominique/Documents/Work/Data/KLIFS_download')
###Output
_____no_output_____
###Markdown
Initialize
###Code
# Add test files and update paths
klifs_overview_path = PATH_TO_KLIFS_DOWNLOAD / 'overview.csv'
klifs_export_path = PATH_TO_KLIFS_DOWNLOAD / 'KLIFS_export.csv'
klifs.local.initialize.from_files(
klifs_overview_path=klifs_overview_path,
klifs_export_path=klifs_export_path
).head()
klifs_metadata_path = Path(klifs_overview_path).parent / 'klifs_metadata.csv'
klifs_metadata = pd.read_csv(klifs_metadata_path)
###Output
_____no_output_____
###Markdown
Kinases
###Code
klifs.local.kinases.kinase_groups(klifs_metadata)
klifs.local.kinases.kinase_families(klifs_metadata, kinase_group='TK')
klifs.local.kinases.kinase_families(klifs_metadata)[:2]
len(klifs.local.kinases.kinase_families(klifs_metadata))
klifs.local.kinases.kinase_names(klifs_metadata).head()
klifs_metadata.head()
klifs.local.kinases.kinases_from_kinase_names(klifs_metadata, kinase_names=['Aak1', 'EGFR'])
klifs.local.kinases.kinases_from_kinase_ids(klifs_metadata, kinase_ids=[533, 531])
###Output
_____no_output_____
###Markdown
Ligands Interactions Structures Coordinates
###Code
species = 'human'
kinase_name = 'EGFR'
pdb_id = '3w2s'
alt = 'A'
chain = 'A'
###Output
_____no_output_____
###Markdown
Complex
###Code
klifs.local.coordinates.complex.mol2_to_dataframe(
PATH_TO_KLIFS_DOWNLOAD,
species,
kinase_name,
pdb_id,
alt,
chain
).head(2)
###Output
_____no_output_____
###Markdown
Protein
###Code
klifs.local.coordinates.protein.mol2_to_dataframe(
PATH_TO_KLIFS_DOWNLOAD,
species,
kinase_name,
pdb_id,
alt,
chain
).head(2)
###Output
_____no_output_____
###Markdown
Pocket
###Code
klifs.local.coordinates.pocket.mol2_to_dataframe(
PATH_TO_KLIFS_DOWNLOAD,
species,
kinase_name,
pdb_id,
alt,
chain
).head(2)
###Output
_____no_output_____
###Markdown
Water
###Code
klifs.local.coordinates.water.mol2_to_dataframe(
PATH_TO_KLIFS_DOWNLOAD,
species,
kinase_name,
pdb_id,
alt,
chain
).head(2)
###Output
_____no_output_____
###Markdown
Ligand
###Code
klifs.local.coordinates.ligand.mol2_to_dataframe(
PATH_TO_KLIFS_DOWNLOAD,
species,
kinase_name,
pdb_id,
alt,
chain
).head(2)
klifs.local.coordinates.ligand.mol2_to_rdkit_mol(
PATH_TO_KLIFS_DOWNLOAD,
species,
kinase_name,
pdb_id,
alt,
chain,
compute2d=False
)
###Output
_____no_output_____
###Markdown
InterLex Test Client init
###Code
from ontquery.interlex import interlex_client
ilx_cli = interlex_client('test3.scicrunch.org')
ilx_cli.apiEndpoint
###Output
_____no_output_____
###Markdown
InterLex Production Client init. Please test on Test3 server first!
###Code
ilx_cli_production = interlex_client('scicrunch.org')
ilx_cli_production.apiEndpoint
###Output
_____no_output_____
###Markdown
Check Your User Infro
###Code
ilx_cli.ilx_cli._get('user/info').json()['data']
###Output
_____no_output_____
###Markdown
Query Exact Label & Syonym Match
###Code
# Exact Search with only 1 result back
list(ilx_cli.query(label='brain'))
###Output
_____no_output_____
###Markdown
Get OntTerm represention of query result
###Code
[qr.asTerm() for qr in ilx_cli.query(label='brain')]
###Output
_____no_output_____
###Markdown
Query All Fields
###Code
# General Search through elasticsearch & SciGraph
list(ilx_cli.query(term='brain'))
###Output
_____no_output_____
###Markdown
Get entity via fragment
###Code
ilx_cli.get_entity('ilx_0101431')
###Output
_____no_output_____
###Markdown
Get entity from existing id
###Code
ilx_cli.add_entity?
import string
import random
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
random_label = 'test_' + id_generator(size=12)
entity = {
'label': random_label,
'type': 'term', # broken at the moment NEEDS PDE HARDCODED
'definition': 'Part of the central nervous system',
# 'comment': 'Cannot live without it',
'subThingOf': 'http://uri.interlex.org/base/tmp_0738397', # ILX ID for Organ
'synonyms': ['Encephalon', 'Cerebro'],
'predicates': {
'http://uri.interlex.org/base/tmp_0738407': 'sample_value', # spont firing beta ID | annotation
'http://uri.interlex.org/base/tmp_0738408': 'http://uri.interlex.org/base/tmp_0738406', # relationship
}
}
result = ilx_cli.add_entity(**entity)
print(result)
###Output
[2020-08-12 13:37:20,684] - WARNING - ontquery - interlex_client.py:793 - Annotation: [test_DY0F652BGCZU -> test_K8KAGD9D0QOQ -> sample_value], already exists.
[2020-08-12 13:37:22,470] - WARNING - ontquery - interlex_client.py:904 - relationship: [test_DY0F652BGCZU -> test_67M6YEYODK3V -> troy_test_term], already exists.
###Markdown
Update
###Code
ilx_cli.update_entity?
entity = {
'ilx_id': result['curie'],
'label': 'label_update',
# 'type': 'term', # broken at the moment NEEDS PDE HARDCODED
'definition': 'Updated definition!',
# 'comment': 'Cannot live without it UPDATE',
'subThingOf': 'http://uri.interlex.org/base/tmp_0738406', # ILX ID for Organ
'add_synonyms': ['Encephalon', 'Cerebro_update'],
'predicates_to_add': {
# DUPCLICATE CHECK
'http://uri.interlex.org/base/tmp_0738407': 'sample_value', # spont firing beta ID | annotation
'http://uri.interlex.org/base/tmp_0738407': 'sample_value2', # spont firing beta ID | annotation
'http://uri.interlex.org/base/tmp_0738408': 'http://uri.interlex.org/base/tmp_0738409' # relationship
},
'predicates_to_delete': {
# DELETE ORIGINAL
'http://uri.interlex.org/base/tmp_0738407': 'sample_value', # spont firing beta ID | annotation
'http://uri.interlex.org/base/tmp_0738408': 'http://uri.interlex.org/base/tmp_0738409', # relationship
}
}
result = ilx_cli.update_entity(**entity)
print(result)
ilx_cli.ilx_cli.partial_update?
###Output
_____no_output_____
###Markdown
Partial Update; the passive update Will only update the field if it is null/none server side
###Code
partial_update = ilx_cli.ilx_cli.partial_update(curie=result['curie'], comment='updated definition!')
partial_update['comment']
partial_update = ilx_cli.ilx_cli.partial_update(curie=result['curie'], comment='updated definition please?!')
partial_update['comment']
###Output
_____no_output_____
###Markdown
TutorialThis tutorial will teach the basics of how to use cirq. This tutorial will walk through how to use qubits, gates, and operations to create and simulate your first quantum circuit using cirq. It will briefly introduce devices, unitary matrices, decompositions, and optimizers.Note that this tutorial isn’t a quantum computing 101 tutorial, we assume familiarity of quantum computing at about the level of the textbook “Quantum Computation and Quantum Information” by Nielsen and Chuang.For more in-depth examples closer to those found in current work, check out our case studies page. To begin, please follow the instructions for [installing Cirq](install.md).
###Code
!pip install cirq --quiet
###Output
_____no_output_____
###Markdown
QubitsThe first part of creating a quantum circuit is to define a set of qubits (also known as a quantum registers) to act on.Cirq has three main ways of defining qubits:* `cirq.NamedQubit`: used to label qubits by an abstract name* `cirq.LineQubit`: qubits labelled by number in a linear array * `cirq.GridQubit`: qubits labelled by two numbers in a rectangular lattice.Here are some examples of defining each type of qubit.
###Code
import cirq
# Using named qubits can be useful for abstract algorithms
# as well as algorithms not yet mapped onto hardware.
q0 = cirq.NamedQubit('source')
q1 = cirq.NamedQubit('target')
# Line qubits can be created individually
q3 = cirq.LineQubit(3)
# Or created in a range
# This will create LineQubit(0), LineQubit(1), LineQubit(2)
q0, q1, q2 = cirq.LineQubit.range(3)
# Grid Qubits can also be referenced individually
q4_5 = cirq.GridQubit(4,5)
# Or created in bulk in a square
# This will create 16 qubits from (0,0) to (3,3)
qubits = cirq.GridQubit.square(4)
###Output
_____no_output_____
###Markdown
There are also pre-packaged sets of qubits called [Devices](devices.md). These are qubits along with a set of rules of how they can be used. A `cirq.Device` can be used to apply adjacency rules and other hardware constraints to a quantum circuit. For our example, we will use the `cirq.google.Foxtail` device that comes with cirq. It is a 2x11 grid that mimics early hardware released by Google.
###Code
print(cirq.google.Foxtail)
###Output
(0, 0)───(0, 1)───(0, 2)───(0, 3)───(0, 4)───(0, 5)───(0, 6)───(0, 7)───(0, 8)───(0, 9)───(0, 10)
│ │ │ │ │ │ │ │ │ │ │
│ │ │ │ │ │ │ │ │ │ │
(1, 0)───(1, 1)───(1, 2)───(1, 3)───(1, 4)───(1, 5)───(1, 6)───(1, 7)───(1, 8)───(1, 9)───(1, 10)
###Markdown
Gates and OperationsThe next step is to use the qubits to create operations that can be used in our circuit. Cirq has two concepts that are important to understand here:* A `Gate` is an effect that can be applied to a set of qubits. * An `Operation` is a gate applied to a set of qubits.For instance, `cirq.H` is the quantum [Hadamard](https://en.wikipedia.org/wiki/Quantum_logic_gateHadamard_(H)_gate) and is a `Gate` object. `cirq.H(cirq.LineQubit(1))` is an `Operation` object and is the Hadamard gate applied to a specific qubit (line qubit number 1).Many textbook gates are included within cirq. `cirq.X`, `cirq.Y`, and `cirq.Z` refer to the single-qubit Pauli gates. `cirq.CZ`, `cirq.CNOT`, `cirq.SWAP` are a few of the common two-qubit gates. `cirq.measure` is a macro to apply a `MeasurementGate` to a set of qubits. You can find more, as well as instructions on how to creats your own custom gates, on the [Gates documentation](gates.ipynb) page.Many arithmetic operations can also be applied to gates. Here are some examples:
###Code
# Example gates
not_gate = cirq.CNOT
pauli_z = cirq.Z
# Using exponentiation to get square root gates
sqrt_x_gate = cirq.X**0.5
sqrt_iswap = cirq.ISWAP**0.5
# Some gates can also take parameters
sqrt_sqrt_y = cirq.YPowGate(exponent=0.25)
# Example operations
q0, q1 = cirq.LineQubit.range(2)
z_op = cirq.Z(q0)
not_op = cirq.CNOT(q0, q1)
sqrt_iswap_op = sqrt_iswap(q0, q1)
###Output
_____no_output_____
###Markdown
Circuits and MomentsWe are now ready to construct a quantum circuit. A `Circuit` is a collection of `Moment`s. A `Moment` is a collection of `Operation`s that all act during the same abstract time slice. Each `Operation` must have a disjoint set of qubits from the other `Operation`s in the `Moment`. A `Moment` can be thought of as a vertical slice of a quantum circuit diagram.Circuits can be constructed in several different ways. By default, cirq will attempt to slide your operation into the earliest possible `Moment` when you insert it.
###Code
circuit = cirq.Circuit()
# You can create a circuit by appending to it
circuit.append(cirq.H(q) for q in cirq.LineQubit.range(3))
# All of the gates are put into the same Moment since none overlap
print(circuit)
# We can also create a circuit directly as well:
print(cirq.Circuit(cirq.SWAP(q, q+1) for q in cirq.LineQubit.range(3)))
###Output
0: ───×───────────
│
1: ───×───×───────
│
2: ───────×───×───
│
3: ───────────×───
###Markdown
Sometimes, you may not want cirq to automatically shift operations all the way to the left. To construct a circuit without doing this, you can create the circuit moment-by-moment or use a different `InsertStrategy`, explained more in the [Circuit documentation](circuits.ipynb).
###Code
# Creates each gate in a separate moment.
print(cirq.Circuit(cirq.Moment([cirq.H(q)]) for q in cirq.LineQubit.range(3)))
###Output
0: ───H───────────
1: ───────H───────
2: ───────────H───
###Markdown
Circuits and DevicesOne important comnsideration when using real quantum devices is that there are often hardware constraints on the circuit. Creating a circuit with a `Device` will allow you to capture some of these requirements. These `Device` objects will validate the operations you add to the circuit to make sure that no illegal operations are added.Let's look at an example using the Foxtail device.
###Code
q0 = cirq.GridQubit(0, 0)
q1 = cirq.GridQubit(0, 1)
q2 = cirq.GridQubit(0, 2)
adjacent_op = cirq.CZ(q0, q1)
nonadjacent_op = cirq.CZ(q0, q2)
# This is an unconstrained circuit with no device
free_circuit = cirq.Circuit()
# Both operations are allowed:
free_circuit.append(adjacent_op)
free_circuit.append(nonadjacent_op)
print('Unconstrained device:')
print(free_circuit)
print()
# This is a circuit on the Foxtail device
# only adjacent operations are allowed.
print('Foxtail device:')
foxtail_circuit = cirq.Circuit(device=cirq.google.Foxtail)
foxtail_circuit.append(adjacent_op)
try:
# Not allowed, will throw exception
foxtail_circuit.append(nonadjacent_op)
except ValueError as e:
print('Not allowed. %s' % e)
###Output
Unconstrained device:
(0, 0): ───@───@───
│ │
(0, 1): ───@───┼───
│
(0, 2): ───────@───
Foxtail device:
Not allowed. Non-local interaction: cirq.CZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 2)).
###Markdown
SimulationThe results of the application of a quantum circuit can be calculated by a `Simulator`. Cirq comes bundled with a simulator that can calculate the results of circuits up to about a limit of 20 qubits. It can be initialized with `cirq.Simulator()`.There are two different approaches to using a simulator:* `simulate()`: Since we are classically simulating a circuit, a simulator can directly access and view the resulting wave function. This is useful for debugging, learning, and understanding how circuits will function. * `run()`: When using actual quantum devices, we can only access the end result of a computation and must sample the results to get a distribution of results. Running the simulator as a sampler mimics this behavior and only returns bit strings as output.Let's try to simulate a 2-qubit "Bell State":
###Code
# Create a circuit to generate a Bell State:
# sqrt(2) * ( |00> + |11> )
bell_circuit = cirq.Circuit()
q0, q1 = cirq.LineQubit.range(2)
bell_circuit.append(cirq.H(q0))
bell_circuit.append(cirq.CNOT(q0,q1))
# Initialize Simulator
s=cirq.Simulator()
print('Simulate the circuit:')
results=s.simulate(bell_circuit)
print(results)
print()
# For sampling, we need to add a measurement at the end
bell_circuit.append(cirq.measure(q0, q1, key='result'))
print('Sample the circuit:')
samples=s.run(bell_circuit, repetitions=1000)
# Print a histogram of results
print(samples.histogram(key='result'))
###Output
Simulate the circuit:
measurements: (no measurements)
output vector: 0.707|00⟩ + 0.707|11⟩
Sample the circuit:
Counter({3: 537, 0: 463})
###Markdown
Using parameter sweepsCirq circuits allow for gates to have symbols as free parameters within the circuit. This is especially useful for variational algorithms, which vary parameters within the circuit in order to optimize a cost function, but it can be useful in a variety of circumstances.For parameters, cirq uses the library `sympy` to add `sympy.Symbol` as parameters to gates and operations. Once the circuit is complete, you can fill in the possible values of each of these parameters with a `Sweep`. There are several possibilities that can be used as a sweep:* `cirq.Points`: A list of manually specified values for one specific symbol as a sequence of floats* `cirq.Linspace`: A linear sweep from a starting value to an ending value.* `cirq.ListSweep`: A list of manually specified values for several different symbols, specified as a list of dictionaries.* `cirq.Zip` and `cirq.Product`: Sweeps can be combined list-wise by zipping them together or through their Cartesian product.A parameterized circuit and sweep together can be run using the simulator or other sampler by changing `run()` to `run_sweep()` and adding the sweep as a parameter.Here is an example of sweeping an exponent of a X gate:
###Code
import matplotlib.pyplot as plt
import sympy
# Perform an X gate with variable exponent
q = cirq.GridQubit(1,1)
circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'),
cirq.measure(q, key='m'))
# Sweep exponent from zero (off) to one (on) and back to two (off)
param_sweep = cirq.Linspace('t', start=0, stop=2, length=200)
# Simulate the sweep
s = cirq.Simulator()
trials = s.run_sweep(circuit, param_sweep, repetitions=1000)
# Plot all the results
x_data = [trial.params['t'] for trial in trials]
y_data = [trial.histogram(key='m')[1] / 1000.0 for trial in trials]
plt.scatter('t','p', data={'t': x_data, 'p': y_data})
###Output
_____no_output_____
###Markdown
Unitary matrices and decompositionsMost quantum operations have a unitary matrix representation. This matrix can be accessed by applying `cirq.unitary()`. This can be applied to gates, operations, and circuits that support this protocol and will return the unitary matrix that represents the object.
###Code
print('Unitary of the X gate')
print(cirq.unitary(cirq.X))
print('Unitary of SWAP operator on two qubits.')
q0, q1 = cirq.LineQubit.range(2)
print(cirq.unitary(cirq.SWAP(q0, q1)))
print('Unitary of a sample circuit')
print(cirq.unitary(cirq.Circuit(cirq.X(q0), cirq.SWAP(q0, q1))))
###Output
Unitary of the X gate
[[0.+0.j 1.+0.j]
[1.+0.j 0.+0.j]]
Unitary of SWAP operator on two qubits.
[[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]]
Unitary of a sample circuit
[[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]]
###Markdown
Decompositions Many gates can be decomposed into an equivalent circuit with simpler operations and gates. This is called decomposition and can be accomplished with the `cirq.decompose` protocol. For instance, a Hadamard H gate can be decomposed into X and Y gates:
###Code
print(cirq.decompose(cirq.H(cirq.LineQubit(0))))
###Output
[(cirq.Y**0.5).on(cirq.LineQubit(0)), cirq.XPowGate(exponent=1.0, global_shift=-0.25).on(cirq.LineQubit(0))]
###Markdown
Another example is the 3-qubit Toffoli gate, which is equivalent to a controlled-controlled-X gate. Many devices do not support a three qubit gate, so it is important
###Code
q0, q1, q2 = cirq.LineQubit.range(3)
print(cirq.Circuit(cirq.decompose(cirq.TOFFOLI(q0, q1, q2))))
###Output
0: ───T────────────────@─────────────────────────────────@─────────────────────────────@────────────────────────────@───────────────────────────────────────
│ │ │ │
1: ───T───────Y^-0.5───@───Y^0.5────@───T^-1────Y^-0.5───@────────Y^0.5───@───Y^-0.5───@──────Y^0.5────@───Y^-0.5───@──────Y^0.5────@───────────────────────
│ │ │ │
2: ───Y^0.5───X────────T───Y^-0.5───@───Y^0.5───T────────Y^-0.5───────────@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5───Y^0.5───X───
###Markdown
The above decomposes the Toffoli into a simpler set of one-qubit gates and CZ gates at the cost of lengthening the circuit considerably.Some devices will automatically decompose gates that they do not support. For instance, if we use the `Foxtail` device from above, we can see this in action by adding an unsupported SWAP gate:
###Code
swap = cirq.SWAP(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1))
print(cirq.Circuit(swap, device=cirq.google.Foxtail))
###Output
(0, 0): ───S^-1───Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───Z───
│ │ │
(0, 1): ───Z──────Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───S───
###Markdown
OptimizersThe last concept in this tutorial is the optimizer. An optimizer can take a circuit and modify it. Usually, this will entail combining or modifying operations to make it more efficient and shorter, though an optimizer can, in theory, do any sort of circuit manipulation.For example, the `MergeSingleQubitGates` optimizer will take consecutive single-qubit operations and merge them into a single `PhasedXZ` operation.
###Code
q=cirq.GridQubit(1, 1)
optimizer=cirq.MergeSingleQubitGates()
c=cirq.Circuit(cirq.X(q) ** 0.25, cirq.Y(q) ** 0.25, cirq.Z(q) ** 0.25)
print(c)
optimizer.optimize_circuit(c)
print(c)
###Output
(1, 1): ───X^0.25───Y^0.25───T───
┌ ┐
(1, 1): ───│ 0.5 +0.707j -0. -0.5j │───────────
│ 0.354+0.354j 0.146+0.854j│
└ ┘
###Markdown
TutorialThis tutorial will teach the basics of how to use cirq. This tutorial will walk through how to use qubits, gates, and operations to create and simulate your first quantum circuit using cirq. It will briefly introduce devices, unitary matrices, decompositions, and optimizers.Note that this tutorial isn’t a quantum computing 101 tutorial, we assume familiarity of quantum computing at about the level of the textbook “Quantum Computation and Quantum Information” by Nielsen and Chuang.For more in-depth examples closer to those found in current work, check out our case studies page. To begin, please follow the instructions for [installing Cirq](install.md).
###Code
!pip install cirq --quiet
###Output
_____no_output_____
###Markdown
QubitsThe first part of creating a quantum circuit is to define a set of qubits (also known as a quantum registers) to act on.Cirq has three main ways of defining qubits:* `cirq.NamedQubit`: used to label qubits by an abstract name* `cirq.LineQubit`: qubits labelled by number in a linear array * `cirq.GridQubit`: qubits labelled by two numbers in a rectangular lattice.Here are some examples of defining each type of qubit.
###Code
import cirq
# Using named qubits can be useful for abstract algorithms
# as well as algorithms not yet mapped onto hardware.
q0 = cirq.NamedQubit('source')
q1 = cirq.NamedQubit('target')
# Line qubits can be created individually
q3 = cirq.LineQubit(3)
# Or created in a range
# This will create LineQubit(0), LineQubit(1), LineQubit(2)
q0, q1, q2 = cirq.LineQubit.range(3)
# Grid Qubits can also be referenced individually
q4_5 = cirq.GridQubit(4,5)
# Or created in bulk in a square
# This will create 16 qubits from (0,0) to (3,3)
qubits = cirq.GridQubit.square(4)
###Output
_____no_output_____
###Markdown
There are also pre-packaged sets of qubits called [Devices](devices.md). These are qubits along with a set of rules of how they can be used. A `cirq.Device` can be used to apply adjacency rules and other hardware constraints to a quantum circuit. For our example, we will use the `cirq.google.Foxtail` device that comes with cirq. It is a 2x11 grid that mimics early hardware released by Google.
###Code
print(cirq.google.Foxtail)
###Output
(0, 0)───(0, 1)───(0, 2)───(0, 3)───(0, 4)───(0, 5)───(0, 6)───(0, 7)───(0, 8)───(0, 9)───(0, 10)
│ │ │ │ │ │ │ │ │ │ │
│ │ │ │ │ │ │ │ │ │ │
(1, 0)───(1, 1)───(1, 2)───(1, 3)───(1, 4)───(1, 5)───(1, 6)───(1, 7)───(1, 8)───(1, 9)───(1, 10)
###Markdown
Gates and OperationsThe next step is to use the qubits to create operations that can be used in our circuit. Cirq has two concepts that are important to understand here:* A `Gate` is an effect that can be applied to a set of qubits. * An `Operation` is a gate applied to a set of qubits.For instance, `cirq.H` is the quantum [Hadamard](https://en.wikipedia.org/wiki/Quantum_logic_gateHadamard_(H)_gate) and is a `Gate` object. `cirq.H(cirq.LineQubit(1))` is an `Operation` object and is the Hadamard gate applied to a specific qubit (line qubit number 1).Many textbook gates are included within cirq. `cirq.X`, `cirq.Y`, and `cirq.Z` refer to the single-qubit Pauli gates. `cirq.CZ`, `cirq.CNOT`, `cirq.SWAP` are a few of the common two-qubit gates. `cirq.measure` is a macro to apply a `MeasurementGate` to a set of qubits. You can find more, as well as instructions on how to creats your own custom gates, on the [Gates documentation](gates.ipynb) page.Many arithmetic operations can also be applied to gates. Here are some examples:
###Code
# Example gates
not_gate = cirq.CNOT
pauli_z = cirq.Z
# Using exponentiation to get square root gates
sqrt_x_gate = cirq.X**0.5
sqrt_iswap = cirq.ISWAP**0.5
# Some gates can also take parameters
sqrt_sqrt_y = cirq.YPowGate(exponent=0.25)
# Example operations
q0, q1 = cirq.LineQubit.range(2)
z_op = cirq.Z(q0)
not_op = cirq.CNOT(q0, q1)
sqrt_iswap_op = sqrt_iswap(q0, q1)
###Output
_____no_output_____
###Markdown
Circuits and MomentsWe are now ready to construct a quantum circuit. A `Circuit` is a collection of `Moment`s. A `Moment` is a collection of `Operation`s that all act during the same abstract time slice. Each `Operation` must have a disjoint set of qubits from the other `Operation`s in the `Moment`. A `Moment` can be thought of as a vertical slice of a quantum circuit diagram.Circuits can be constructed in several different ways. By default, cirq will attempt to slide your operation into the earliest possible `Moment` when you insert it.
###Code
circuit = cirq.Circuit()
# You can create a circuit by appending to it
circuit.append(cirq.H(q) for q in cirq.LineQubit.range(3))
# All of the gates are put into the same Moment since none overlap
print(circuit)
# We can also create a circuit directly as well:
print(cirq.Circuit(cirq.SWAP(q, q+1) for q in cirq.LineQubit.range(3)))
###Output
0: ───×───────────
│
1: ───×───×───────
│
2: ───────×───×───
│
3: ───────────×───
###Markdown
Sometimes, you may not want cirq to automatically shift operations all the way to the left. To construct a circuit without doing this, you can create the circuit moment-by-moment or use a different `InsertStrategy`, explained more in the [Circuit documentation](circuits.ipynb).
###Code
# Creates each gate in a separate moment.
print(cirq.Circuit(cirq.Moment([cirq.H(q)]) for q in cirq.LineQubit.range(3)))
###Output
0: ───H───────────
1: ───────H───────
2: ───────────H───
###Markdown
Circuits and DevicesOne important comnsideration when using real quantum devices is that there are often hardware constraints on the circuit. Creating a circuit with a `Device` will allow you to capture some of these requirements. These `Device` objects will validate the operations you add to the circuit to make sure that no illegal operations are added.Let's look at an example using the Foxtail device.
###Code
q0 = cirq.GridQubit(0, 0)
q1 = cirq.GridQubit(0, 1)
q2 = cirq.GridQubit(0, 2)
adjacent_op = cirq.CZ(q0, q1)
nonadjacent_op = cirq.CZ(q0, q2)
# This is an unconstrained circuit with no device
free_circuit = cirq.Circuit()
# Both operations are allowed:
free_circuit.append(adjacent_op)
free_circuit.append(nonadjacent_op)
print('Unconstrained device:')
print(free_circuit)
print()
# This is a circuit on the Foxtail device
# only adjacent operations are allowed.
print('Foxtail device:')
foxtail_circuit = cirq.Circuit(device=cirq.google.Foxtail)
foxtail_circuit.append(adjacent_op)
try:
# Not allowed, will throw exception
foxtail_circuit.append(nonadjacent_op)
except ValueError as e:
print('Not allowed. %s' % e)
###Output
Unconstrained device:
(0, 0): ───@───@───
│ │
(0, 1): ───@───┼───
│
(0, 2): ───────@───
Foxtail device:
Not allowed. Non-local interaction: cirq.CZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 2)).
###Markdown
SimulationThe results of the application of a quantum circuit can be calculated by a `Simulator`. Cirq comes bundled with a simulator that can calculate the results of circuits up to about a limit of 20 qubits. It can be initialized with `cirq.Simulator()`.There are two different approaches to using a simulator:* `simulate()`: Since we are classically simulating a circuit, a simulator can directly access and view the resulting wave function. This is useful for debugging, learning, and understanding how circuits will function. * `run()`: When using actual quantum devices, we can only access the end result of a computation and must sample the results to get a distribution of results. Running the simulator as a sampler mimics this behavior and only returns bit strings as output.Let's try to simulate a 2-qubit "Bell State":
###Code
# Create a circuit to generate a Bell State:
# sqrt(2) * ( |00> + |11> )
bell_circuit = cirq.Circuit()
q0, q1 = cirq.LineQubit.range(2)
bell_circuit.append(cirq.H(q0))
bell_circuit.append(cirq.CNOT(q0,q1))
# Initialize Simulator
s=cirq.Simulator()
print('Simulate the circuit:')
results=s.simulate(bell_circuit)
print(results)
print()
# For sampling, we need to add a measurement at the end
bell_circuit.append(cirq.measure(q0, q1, key='result'))
print('Sample the circuit:')
samples=s.run(bell_circuit, repetitions=1000)
# Print a histogram of results
print(samples.histogram(key='result'))
###Output
Simulate the circuit:
measurements: (no measurements)
output vector: 0.707|00⟩ + 0.707|11⟩
Sample the circuit:
Counter({3: 537, 0: 463})
###Markdown
Using parameter sweepsCirq circuits allow for gates to have symbols as free parameters within the circuit. This is especially useful for variational algorithms, which vary parameters within the circuit in order to optimize a cost function, but it can be useful in a variety of circumstances.For parameters, cirq uses the library `sympy` to add `sympy.Symbol` as parameters to gates and operations. Once the circuit is complete, you can fill in the possible values of each of these parameters with a `Sweep`. There are several possibilities that can be used as a sweep:* `cirq.Points`: A list of manually specified values for one specific symbol as a sequence of floats* `cirq.Linspace`: A linear sweep from a starting value to an ending value.* `cirq.ListSweep`: A list of manually specified values for several different symbols, specified as a list of dictionaries.* `cirq.Zip` and `cirq.Product`: Sweeps can be combined list-wise by zipping them together or through their Cartesian product.A parameterized circuit and sweep together can be run using the simulator or other sampler by changing `run()` to `run_sweep()` and adding the sweep as a parameter.Here is an example of sweeping an exponent of a X gate:
###Code
import matplotlib.pyplot as plt
import sympy
# Perform an X gate with variable exponent
q = cirq.GridQubit(1,1)
circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'),
cirq.measure(q, key='m'))
# Sweep exponent from zero (off) to one (on) and back to two (off)
param_sweep = cirq.Linspace('t', start=0, stop=2, length=200)
# Simulate the sweep
s = cirq.Simulator()
trials = s.run_sweep(circuit, param_sweep, repetitions=1000)
# Plot all the results
x_data = [trial.params['t'] for trial in trials]
y_data = [trial.histogram(key='m')[1] / 1000.0 for trial in trials]
plt.scatter('t','p', data={'t': x_data, 'p': y_data})
###Output
_____no_output_____
###Markdown
Unitary matrices and decompositionsMost quantum operations have a unitary matrix representation. This matrix can be accessed by applying `cirq.unitary()`. This can be applied to gates, operations, and circuits that support this protocol and will return the unitary matrix that represents the object.
###Code
print('Unitary of the X gate')
print(cirq.unitary(cirq.X))
print('Unitary of SWAP operator on two qubits.')
q0, q1 = cirq.LineQubit.range(2)
print(cirq.unitary(cirq.SWAP(q0, q1)))
print('Unitary of a sample circuit')
print(cirq.unitary(cirq.Circuit(cirq.X(q0), cirq.SWAP(q0, q1))))
###Output
Unitary of the X gate
[[0.+0.j 1.+0.j]
[1.+0.j 0.+0.j]]
Unitary of SWAP operator on two qubits.
[[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]]
Unitary of a sample circuit
[[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]]
###Markdown
Decompositions Many gates can be decomposed into an equivalent circuit with simpler operations and gates. This is called decomposition and can be accomplished with the `cirq.decompose` protocol. For instance, a Hadamard H gate can be decomposed into X and Y gates:
###Code
print(cirq.decompose(cirq.H(cirq.LineQubit(0))))
###Output
[(cirq.Y**0.5).on(cirq.LineQubit(0)), cirq.XPowGate(exponent=1.0, global_shift=-0.25).on(cirq.LineQubit(0))]
###Markdown
Another example is the 3-qubit Toffoli gate, which is equivalent to a controlled-controlled-X gate. Many devices do not support a three qubit gate, so it is important
###Code
q0, q1, q2 = cirq.LineQubit.range(3)
print(cirq.Circuit(cirq.decompose(cirq.TOFFOLI(q0, q1, q2))))
###Output
0: ───T────────────────@─────────────────────────────────@─────────────────────────────@────────────────────────────@───────────────────────────────────────
│ │ │ │
1: ───T───────Y^-0.5───@───Y^0.5────@───T^-1────Y^-0.5───@────────Y^0.5───@───Y^-0.5───@──────Y^0.5────@───Y^-0.5───@──────Y^0.5────@───────────────────────
│ │ │ │
2: ───Y^0.5───X────────T───Y^-0.5───@───Y^0.5───T────────Y^-0.5───────────@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5───Y^0.5───X───
###Markdown
The above decomposes the Toffoli into a simpler set of one-qubit gates and CZ gates at the cost of lengthening the circuit considerably.Some devices will automatically decompose gates that they do not support. For instance, if we use the `Foxtail` device from above, we can see this in action by adding an unsupported SWAP gate:
###Code
swap = cirq.SWAP(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1))
print(cirq.Circuit(swap, device=cirq.google.Foxtail))
###Output
(0, 0): ───S^-1───Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───Z───
│ │ │
(0, 1): ───Z──────Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───S───
###Markdown
OptimizersThe last concept in this tutorial is the optimizer. An optimizer can take a circuit and modify it. Usually, this will entail combining or modifying operations to make it more efficient and shorter, though an optimizer can, in theory, do any sort of circuit manipulation.For example, the `MergeSingleQubitGates` optimizer will take consecutive single-qubit operations and merge them into a single `PhasedXZ` operation.
###Code
q=cirq.GridQubit(1, 1)
optimizer=cirq.MergeSingleQubitGates()
c=cirq.Circuit(cirq.X(q) ** 0.25, cirq.Y(q) ** 0.25, cirq.Z(q) ** 0.25)
print(c)
optimizer.optimize_circuit(c)
print(c)
###Output
(1, 1): ───X^0.25───Y^0.25───T───
┌ ┐
(1, 1): ───│ 0.5 +0.707j -0. -0.5j │───────────
│ 0.354+0.354j 0.146+0.854j│
└ ┘
###Markdown
TutorialThis repository contains several utility functions that enable easier analysis acrossCMIP6 model data.It offers solutions to the following problems:1. [Inconsistent naming of dimensions and coordinates](rename)2. [Inconsistent values,shape and dataset location of coordinates](coords)3. [Inconsistent longitude conventions](lon)4. [Inconsistent units](units)5. [Inconsistent longitude/latitude bounds](bounds)5. [TL;DR How to put it all together](combo)
###Code
import matplotlib.pyplot as plt
import intake
import dask
%matplotlib inline
url = "https://storage.googleapis.com/cmip6/pangeo-cmip6.json"
col = intake.open_esm_datastore(url)
###Output
_____no_output_____
###Markdown
Inconsistent naming of dimensions and coordinatesAll cmip6 models (except for the unstructured grid of the AWI model) have in principal the same datastructure and **should** have a consistent naming, such that the user can test an analysis on one model and then seamlessly apply it on another. In practice some models have alternate naming for e.g. the logical (x,y,z) dimensions. `cmip6_preprocessing.preprocessing.rename_cmip6` accesses an internal dictionary to rename all models consistently to the following scheme:- `x`, `y`,`lev` for the logical grid index in the x,y,z direction- `lon`, `lat` for geographical position coordinates- `bnds`, `vertex` for cell bounds or vertex indicies- `time_bounds`, `lev_bounds`, `lon_bounds`, `lat_bounds` for cell bounding values
###Code
# load a few models to illustrate the problem
query = dict(experiment_id=['piControl'], table_id='Oyr',
variable_id='o2', grid_label=['gn', 'gr'],
source_id=['IPSL-CM6A-LR', 'CanESM5', 'GFDL-ESM4']
)
cat = col.search(**query)
cat.df['source_id'].unique()
z_kwargs = {'consolidated': True, 'decode_times':False}
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
dset_dict = cat.to_dataset_dict(zarr_kwargs=z_kwargs)#
# show coordinates
for k, ds in dset_dict.items():
print(k)
print(list(ds.dims))
###Output
CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn
['axis_nbounds', 'member_id', 'nvertex', 'olevel', 'time', 'x', 'y']
CMIP.NOAA-GFDL.GFDL-ESM4.piControl.Oyr.gr
['bnds', 'lat', 'lev', 'lon', 'member_id', 'time']
CMIP.CCCma.CanESM5.piControl.Oyr.gn
['bnds', 'i', 'j', 'lev', 'member_id', 'time', 'vertices']
###Markdown
You can see that e.g. the x dimension is not consistently labelled. E.g. in one model it is called `i` in the other `x`. We can fix this by passing `rename_cmip6` as `preprocess` argument to `to_dataset_dict`:
###Code
from cmip6_preprocessing.preprocessing import rename_cmip6
# load a few models to illustrate the problem
cat = col.search(**query)
cat.df['source_id'].unique()
# pass the preprocessing directly
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
dset_dict_renamed = cat.to_dataset_dict(zarr_kwargs=z_kwargs, preprocess=rename_cmip6)
for k, ds in dset_dict_renamed.items():
print(k)
print(list(ds.dims))
###Output
--> The keys in the returned dictionary of datasets are constructed as follows:
'activity_id.institution_id.source_id.experiment_id.table_id.grid_label'
###Markdown
Beautiful! They have exactly the same dimensions! > You can also always apply the utility functions after loading the data, but be aware that some models have even inconsistent namings between timesteps and ensemble members. This can cause problems with the concatenation that `intake_esm` does. Passing the function will apply it before concatenation, which works nicely above. Here is an example of how it causes problems when applied afterwards:
###Code
# IPSL data is a bit of a mess
ds = dset_dict['CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn']
ds = rename_cmip6(ds)
ds
###Output
_____no_output_____
###Markdown
~See how the data_variable `o2` has several depth variables `o2(member_id, time, olevel, y, x, lev)`~> This has recently been fixed in the pangeo google store, but does still apply if you are e.g. working with a local copy of the CMIP6 netdcf file.**I strongly recommend applying all preprocessing using the `preprocess` keyword, but it is crucial to do so with the initial renaming step** Inconsistent values,shape and dataset location of coordinatesThe naming of the dimensions/coordinates is only the beginning: Some datasets use only index values for the x,y dimensions, while others use nominal longitudes, latitudes (usefull for rough region selection) or the longitudes and latitudes are only 1d arrays (e.g. for regridded outputs). Our goal is to work with all datasets int the same way, and hence we convert all datasets in this form:- `x`, `y` area given as 1D 'nominal' longitudes and latitudes. This means the `x` is the zonal average latitude (can become difficult near the Arctic, but is otherwise very useful) and `y` is the longitude at the equator.- `lon` and `lat` are 2-dimensional coordinate arrays with the 'true' position of grid cells (if the values were initially given as 1d arrays, they are broadcasted appropriately)We achieve this by applying `promote_empty_dims` (give empty dimensions values), `broadcast_lonlat` (convert 1d lon and lat arrays to 2d arrays) and `replace_x_y_nominal_lat_lon` (calculate nominal lon and lat and replace `x` and `y` with them)
###Code
from cmip6_preprocessing.preprocessing import promote_empty_dims, broadcast_lonlat, replace_x_y_nominal_lat_lon
# check out the previous datasets
ds = dset_dict_renamed['CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn']
ds
###Output
_____no_output_____
###Markdown
> Note how the dimensions x and y dont have values (e.g. are not listed as coords)
###Code
ds = promote_empty_dims(ds)
ds
dset_dict_renamed.keys()
###Output
_____no_output_____
###Markdown
Nice. Now check out the `GFDL` model...
###Code
ds = dset_dict_renamed['CMIP.NOAA-GFDL.GFDL-ESM4.piControl.Oyr.gr']
ds
###Output
_____no_output_____
###Markdown
This dataset is from regridded output and has thus only 1D longitude and latitude values (which are called `x` and `y` due to the previous renaming step. `broadcast_lonlat` adds the `lon` and `lat` arrays back as 2d arrays.
###Code
ds = broadcast_lonlat(ds)
ds
###Output
_____no_output_____
###Markdown
When you look back at the `IPSL` model you notice that the `x` and `y` values are given just as indicies, making rough selection of regions using xarrays `.sel` rather useless. To gain back this functionality, we replace `x` and `y` with nominal longitudes and latitudes using `replace_x_y_nominal_lat_lon`:> As of version '0.2' this is not part of the `combined_preprocessing` anymore due to performance issue, when applied to a full dataset. You can still apply this function before plotting as shown in the final part of the tutorial
###Code
ds = dset_dict_renamed['CMIP.CCCma.CanESM5.piControl.Oyr.gn']
print(ds.y.data)
ds = replace_x_y_nominal_lat_lon(ds)
ds.y.data
###Output
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
288 289 290]
###Markdown
We can put all of this together in a wrapper function and plot some data
###Code
def wrapper(ds):
ds = ds.copy()
ds = rename_cmip6(ds)
ds = promote_empty_dims(ds)
ds = broadcast_lonlat(ds)
ds = replace_x_y_nominal_lat_lon(ds)
return ds
# pass the preprocessing directly
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
dset_dict_processed1 = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=wrapper)
fig, axarr = plt.subplots(nrows=3, figsize=[10,15])
for ax, (k, ds) in zip(axarr.flat, dset_dict_processed1.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=-1)
ds.o2.isel(time=0, lev=0).sel(y=slice(-15,15)).plot(ax=ax)
ax.set_title(k)
ax.set_aspect(2)
###Output
_____no_output_____
###Markdown
The naming and units are still inconsistent (not implemented yet) and the longitude is not consistent (we will deal with this below) But this is a big step forward. With the 'unprocessed' datasets this would have needed a lot more logic in the print loop. Inconsistent longitude conventionsWe saw above that not all models have a '0-360' longitude convention. We can fix this very quickly using `correct_lon`:
###Code
from cmip6_preprocessing.preprocessing import correct_lon
# same as above
def wrapper(ds):
ds = ds.copy()
ds = rename_cmip6(ds)
ds = promote_empty_dims(ds)
ds = broadcast_lonlat(ds)
ds = correct_lon(ds)
ds = replace_x_y_nominal_lat_lon(ds)
return ds
# pass the preprocessing directly
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
dset_dict_processed2 = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=wrapper)
fig, axarr = plt.subplots(nrows=3, figsize=[10,15])
for ax, (k, ds) in zip(axarr.flat, dset_dict_processed2.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=-1)
ds.o2.isel(time=0, lev=0).sel(y=slice(-15,15)).plot(ax=ax)
ax.set_title(k)
ax.set_aspect(2)
###Output
_____no_output_____
###Markdown
 Inconsistent unitsBut of course this is not all. Some models, give the depth in centimeters (so far I have only seen this in the NCAR models). We can fix this with `correct_units`:
###Code
from cmip6_preprocessing.preprocessing import correct_units
query = dict(experiment_id = ['historical'],variable_id='thetao', grid_label=['gn'],source_id=['CESM2', 'CanESM5'], member_id='r1i1p1f1',
)
cat = col.search(**query)
# raw data read in
dset_dict = cat.to_dataset_dict(zarr_kwargs=z_kwargs)
# fixed units
dset_dict_fixed_unit = cat.to_dataset_dict(zarr_kwargs=z_kwargs, preprocess=correct_units)
dset_dict['CMIP.NCAR.CESM2.historical.Omon.gn'].lev.plot()
plt.figure()
dset_dict_fixed_unit['CMIP.NCAR.CESM2.historical.Omon.gn'].lev.plot()
###Output
_____no_output_____
###Markdown
This helps tremendously when you are trying to slice a common depth from a series of models
###Code
fig, axarr = plt.subplots(nrows=2, figsize=[10,10])
for ax, (k, ds) in zip(axarr.flat, dset_dict_fixed_unit.items()):
ds.thetao.isel(time=0).sel(lev=5000, method='nearest').plot(ax=ax, vmin=-1, vmax=5)
ax.set_title(k)
###Output
_____no_output_____
###Markdown
As a comparison, for the unprocessed data this would have picked the depth at 50m for the `CESM2` model instead of 5000m:
###Code
fig, axarr = plt.subplots(nrows=2, figsize=[10,10])
for ax, (k, ds) in zip(axarr.flat, dset_dict.items()):
ds.thetao.isel(time=0).sel(lev=5000, method='nearest').plot(ax=ax, vmin=-1, vmax=5)
ax.set_title(k)
###Output
_____no_output_____
###Markdown
Consistent CF boundsMany of the CMIP6 models come with 'bound' dataarrays, that describe the extent of the finite grid cells.For the longitude and latitude there are two conventions: 2-element 'bounds' (describing the width of a cell along the center) and 4 element 'verticies' (describing the 4 corner coordinates of the cell).`cmip6_preprocessing` automatically renames these variables consistently and converts them so that every dataset has both conventions available.
###Code
from cmip6_preprocessing.preprocessing import correct_coordinates,parse_lon_lat_bounds, maybe_convert_bounds_to_vertex, maybe_convert_vertex_to_bounds
# same as above
def wrapper(ds):
ds = ds.copy()
ds = rename_cmip6(ds)
ds = promote_empty_dims(ds)
ds = broadcast_lonlat(ds)
ds = replace_x_y_nominal_lat_lon(ds)
ds = correct_lon(ds)
ds = correct_coordinates(ds)
ds = parse_lon_lat_bounds(ds)
ds = maybe_convert_bounds_to_vertex(ds)
ds = maybe_convert_vertex_to_bounds(ds)
return ds
# pass the preprocessing directly
dset_dict_processed3 = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=wrapper)
for k, ds in dset_dict_processed3.items():
print(ds)
###Output
<xarray.Dataset>
Dimensions: (bnds: 2, lev: 60, member_id: 1, time: 1980, vertex: 4, x: 320, y: 384)
Coordinates:
lat (y, x) float64 dask.array<chunksize=(384, 320), meta=np.ndarray>
lat_verticies (y, x, vertex) float32 dask.array<chunksize=(384, 320, 4), meta=np.ndarray>
* lev (lev) float64 500.0 1.5e+03 2.5e+03 ... 5.125e+05 5.375e+05
lev_bounds (lev, bnds) float32 dask.array<chunksize=(60, 2), meta=np.ndarray>
lon (y, x) float64 dask.array<chunksize=(384, 320), meta=np.ndarray>
lon_verticies (y, x, vertex) float32 dask.array<chunksize=(384, 320, 4), meta=np.ndarray>
* y (y) float64 -79.22 -78.69 -78.15 -77.62 ... 89.11 89.66 89.71
* x (x) float64 1.062 2.187 3.312 4.437 ... 357.7 358.8 359.9
* time (time) float64 6.749e+05 6.749e+05 ... 7.351e+05 7.351e+05
time_bounds (time, bnds) float64 dask.array<chunksize=(1980, 2), meta=np.ndarray>
* bnds (bnds) int64 0 1
* vertex (vertex) int64 0 1 2 3
lon_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 384, 320), meta=np.ndarray>
lat_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 384, 320), meta=np.ndarray>
* member_id (member_id) <U8 'r1i1p1f1'
Data variables:
thetao (member_id, time, lev, y, x) float32 dask.array<chunksize=(1, 8, 60, 384, 320), meta=np.ndarray>
Attributes:
Conventions: CF-1.7 CMIP-6.2
activity_id: CMIP
branch_method: standard
branch_time_in_child: 674885.0
branch_time_in_parent: 219000.0
case_id: 15
cesm_casename: b.e21.BHIST.f09_g17.CMIP6-historical.001
contact: [email protected]
creation_date: 2019-01-16T22:01:19Z
data_specs_version: 01.00.29
experiment: all-forcing simulation of the recent past
experiment_id: historical
external_variables: areacello volcello
forcing_index: 1
frequency: mon
further_info_url: https://furtherinfo.es-doc.org/CMIP6.NCAR.CESM2....
grid: native gx1v7 displaced pole grid (384x320 latxlon)
grid_label: gn
initialization_index: 1
institution: National Center for Atmospheric Research, Climat...
institution_id: NCAR
license: CMIP6 model data produced by <The National Cente...
mip_era: CMIP6
model_doi_url: https://doi.org/10.5065/D67H1H0V
nominal_resolution: 100 km
parent_activity_id: CMIP
parent_experiment_id: piControl
parent_mip_era: CMIP6
parent_source_id: CESM2
parent_time_units: days since 0001-01-01 00:00:00
parent_variant_label: r1i1p1f1
physics_index: 1
product: model-output
realization_index: 1
realm: ocean
source: CESM2 (2017): atmosphere: CAM6 (0.9x1.25 finite ...
source_id: CESM2
source_type: AOGCM BGC
sub_experiment: none
sub_experiment_id: none
table_id: Omon
tracking_id: hdl:21.14100/672d35cd-e662-4807-8dee-7d7d5e1d4d1c
variable_id: thetao
variant_info: CMIP6 20th century experiments (1850-2014) with ...
variant_label: r1i1p1f1
status: 2019-10-25;created;by [email protected]
netcdf_tracking_ids: hdl:21.14100/672d35cd-e662-4807-8dee-7d7d5e1d4d1c
version_id: v20190308
intake_esm_varname: ['thetao']
intake_esm_dataset_key: CMIP.NCAR.CESM2.historical.Omon.gn
<xarray.Dataset>
Dimensions: (bnds: 2, lev: 45, member_id: 1, time: 1980, vertex: 4, x: 360, y: 291)
Coordinates:
* x (x) float64 0.5 1.5 2.5 3.5 4.5 ... 356.5 357.5 358.5 359.5
* y (y) float32 -78.39 -78.19 -77.98 -77.77 ... 88.96 89.37 89.74
lat (y, x) float32 dask.array<chunksize=(291, 360), meta=np.ndarray>
* lev (lev) float64 3.047 9.454 16.36 ... 5.375e+03 5.625e+03
lev_bounds (lev, bnds) float64 dask.array<chunksize=(45, 2), meta=np.ndarray>
lon (y, x) float32 dask.array<chunksize=(291, 360), meta=np.ndarray>
* time (time) int64 0 708 1416 2148 ... 1443192 1443924 1444656
time_bounds (time, bnds) float64 dask.array<chunksize=(1980, 2), meta=np.ndarray>
lat_verticies (y, x, vertex) float32 dask.array<chunksize=(291, 360, 4), meta=np.ndarray>
lon_verticies (y, x, vertex) float32 dask.array<chunksize=(291, 360, 4), meta=np.ndarray>
* bnds (bnds) int64 0 1
* vertex (vertex) int64 0 1 2 3
lon_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 291, 360), meta=np.ndarray>
lat_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 291, 360), meta=np.ndarray>
* member_id (member_id) <U8 'r1i1p1f1'
Data variables:
thetao (member_id, time, lev, y, x) float32 dask.array<chunksize=(1, 6, 45, 291, 360), meta=np.ndarray>
Attributes:
CCCma_model_hash: 3dedf95315d603326fde4f5340dc0519d80d10c0
CCCma_parent_runid: rc3-pictrl
CCCma_pycmor_hash: 094aa868e787693cfe55b2f1665f6a6b0880b03a
CCCma_runid: rc3.1-his01
Conventions: CF-1.7 CMIP-6.2
YMDH_branch_time_in_child: 1850:01:01:00
YMDH_branch_time_in_parent: 5201:01:01:00
activity_id: CMIP
branch_method: Spin-up documentation
branch_time_in_child: 0.0
branch_time_in_parent: 1223115.0
cmor_version: 3.4.0
contact: [email protected]
creation_date: 2019-03-14T05:00:40Z
data_specs_version: 01.00.29
experiment: all-forcing simulation of the recent past
experiment_id: historical
external_variables: areacello volcello
forcing_index: 1
frequency: mon
further_info_url: https://furtherinfo.es-doc.org/CMIP6.CCCma.C...
grid: ORCA1 tripolar grid, 1 deg with refinement t...
grid_label: gn
history: 2019-03-14T05:00:40Z ;rewrote data to be con...
initialization_index: 1
institution: Canadian Centre for Climate Modelling and An...
institution_id: CCCma
license: CMIP6 model data produced by The Government ...
mip_era: CMIP6
nominal_resolution: 100 km
parent_activity_id: CMIP
parent_experiment_id: piControl
parent_mip_era: CMIP6
parent_source_id: CanESM5
parent_time_units: days since 1850-01-01 0:0:0.0
parent_variant_label: r1i1p1f1
physics_index: 1
product: model-output
realization_index: 1
realm: ocean
references: Geophysical Model Development Special issue ...
source: CanESM5 (2017): \naerosol: interactive\natmo...
source_id: CanESM5
source_type: AOGCM
sub_experiment: none
sub_experiment_id: none
table_id: Omon
table_info: Creation Date:(13 December 2018) MD5:e84cb97...
title: CanESM5 output prepared for CMIP6
tracking_id: hdl:21.14100/5446945e-4a3c-43a1-babd-af08607...
variable_id: thetao
variant_label: r1i1p1f1
version: v20190306
status: 2019-11-06;created;by [email protected]
netcdf_tracking_ids: hdl:21.14100/5446945e-4a3c-43a1-babd-af08607...
version_id: v20190306
intake_esm_varname: ['thetao']
intake_esm_dataset_key: CMIP.CCCma.CanESM5.historical.Omon.gn
###Markdown
The vertex convention is consistent across models. The points are sorted from lower-left, upper-left, upper-right to lower-right. TL;DR How to put it all togetherTo combine all these (or just some you like), you can create a wrapper function as above, or you can use the provided `combined_preprocessing`, which does all the above.> Due to concerns regarding the [dask performance of large datasets](https://github.com/jbusecke/cmip6_preprocessing/issues/94), the latest version of `combined_preprocessing` does not apply `replace_x_y_nominal_lat_lon` anymore. You can still apply this at a later point, preferrably at the end of a processing step, to enable rough selection of regions (see example below).
###Code
from cmip6_preprocessing.preprocessing import combined_preprocessing
# lets load a bunch more models this time
query = dict(experiment_id=['piControl', 'historical'],
table_id='Oyr',
source_id=[
'GFDL-ESM4',
'IPSL-CM6A-LR',
'CanESM5',
'CanESM5-CanOE',
'MPI-ESM-1-2-HAM',
'MPI-ESM1-2-HR',
'MPI-ESM1-2-LR',
'ACCESS-ESM1-5',
'MRI-ESM2-0',
'IPSL-CM5A2-INCA',
'EC-Earth3-CC'
],
variable_id='o2',
grid_label=['gn', 'gr'])
cat = col.search(**query)
print(cat.df['source_id'].unique())
with dask.config.set(**{'array.slicing.split_large_chunks': True}):
dset_dict = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=combined_preprocessing)
fig, axarr = plt.subplots(nrows=4, ncols=3, figsize=[25,15])
for ax,(k, ds) in zip(axarr.flat,dset_dict.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=0)
da = ds.o2.isel(time=0).interp(lev=2500)
# this step is necessary to order the longitudes properly for simple plotting. Alternatively you could use a proper map projection
# with e.g. cartopy and would not need this step
da = replace_x_y_nominal_lat_lon(da)
da = da.sel(x=slice(100, 200), y=slice(-20,20))
try:
da.plot(ax=ax, vmax=0.25, vmin=0.05)
except:
print(k)
pass
ax.set_title(k)
###Output
_____no_output_____
###Markdown
Rechunker TutorialThis tutorial notebook explains how to use rechunker with real datasets. We will also use xarray to make some things easier and prettier, but we note that xarray is not a dependency for rechunker. Toy Example Create Example DataHere we load one of xarray's tutorial datasets and write it to Zarr. This is not actually a big dataset, so rechunker is not really needed here. But it's a convenient example.
###Code
import xarray as xr
xr.set_options(display_style='text')
import zarr
import dask.array as dsa
ds = xr.tutorial.open_dataset("air_temperature")
# create initial chunk structure
ds = ds.chunk({'time': 100})
ds.air.encoding = {} # helps when writing to zarr
ds
###Output
_____no_output_____
###Markdown
We can examine the chunk structure of the data variable using Dask's pretty Array repr.
###Code
ds.air.data
! rm -rf *.zarr # clean up any existing temporary data
ds.to_zarr('air_temperature.zarr')
###Output
_____no_output_____
###Markdown
Now we open up a Zarr Group and Array that we will use as inputs to rechunker.
###Code
source_group = zarr.open('air_temperature.zarr')
print(source_group.tree())
source_array = source_group['air']
source_array.info
###Output
_____no_output_____
###Markdown
Rechunk a single ArrayThe original array has chunks of (100, 25, 53). Let's rechunk it to be contiguous in time, but chunked in space.We specify a small value of `max_mem` in order to force rechunker to create an intermediate dataset. We also have to specify a place to store the final and intermediate data.We use the [rechunk](api.rstrechunker.rechunk) function, which returns a [Rechunked](api.rstrechunker.Rechunked) object.
###Code
from rechunker import rechunk
target_chunks = (2920, 25, 1)
max_mem = '1MB'
target_store = 'air_rechunked.zarr'
temp_store = 'air_rechunked-tmp.zarr'
array_plan = rechunk(source_array, target_chunks, max_mem, target_store, temp_store=temp_store)
array_plan
###Output
_____no_output_____
###Markdown
Since this array has dimensions, we can also specify the chunks using a dictionary syntax.
###Code
target_chunks_dict = {'time': 2920, 'lat': 25, 'lon': 1}
# need to remove the existing stores or it won't work
!rm -rf air_rechunked.zarr air_rechunked-tmp.zarr
array_plan = rechunk(source_array, target_chunks_dict, max_mem, target_store, temp_store=temp_store)
array_plan
###Output
_____no_output_____
###Markdown
The `array_plan` is a `Rechunked` object.It has not actually performed the rechunking yet.To do this, we need to call the `execute` method.This will use Dask to perform the rechunking.
###Code
result = array_plan.execute()
result.chunks
###Output
_____no_output_____
###Markdown
By default, Dask will use the multi-threaded scheduler.Since rechunking can take a long time, we might want to use a progress bar.
###Code
from dask.diagnostics import ProgressBar
with ProgressBar():
array_plan.execute()
###Output
[########################################] | 100% Completed | 6.2s
###Markdown
If we create a distributed cluster, then rechunker will use that when it executes.
###Code
from dask.distributed import Client, LocalCluster, progress
cluster = LocalCluster()
client = Client(cluster)
future = array_plan.persist()
progress(future)
###Output
_____no_output_____
###Markdown
Now that it is written to disk, we can open the rechunked array however we please. Using Zarr...
###Code
target_array = zarr.open('air_rechunked.zarr')
target_array
###Output
_____no_output_____
###Markdown
...or Dask
###Code
target_array_dask = dsa.from_zarr('air_rechunked.zarr')
target_array_dask
###Output
_____no_output_____
###Markdown
Rechunk a GroupIn the example above, we only rechunked a single array.We can open it with Dask, but not Xarray, because it doesn't contain any coordinates or metadata.Rechunker also supports rechunking entire groups.In this case, `target_chunks` must be a dictionary.
###Code
target_chunks = {
'air': {'time': 2920, 'lat': 25, 'lon': 1},
'time': None, # don't rechunk this array
'lon': None,
'lat': None,
}
max_mem = '1MB'
target_store = 'group_rechunked.zarr'
temp_store = 'group_rechunked-tmp.zarr'
array_plan = rechunk(source_group, target_chunks, max_mem, target_store, temp_store=temp_store)
array_plan
array_plan.execute()
###Output
_____no_output_____
###Markdown
Now that we have written a group, we can open it back up with Xarray.
###Code
xr.open_zarr('group_rechunked.zarr')
###Output
_____no_output_____
###Markdown
Cloud ExampleIn this example we use real data from Pangeo's [Cloud Data Catalog](http://catalog.pangeo.io/).This dataset is stored in Google Cloud Storage.We also use a [Dask Gateway](https://gateway.dask.org/) distributed cluster to scale up our processing.This part of the tutorial won't work for you unless you are in a [Pangeo Cloud](http://pangeo.io/cloud.html) environment or binder.
###Code
from dask_gateway import GatewayCluster
cluster = GatewayCluster()
cluster.scale(20)
cluster
from dask.distributed import Client
client = Client(cluster)
client
import gcsfs
# a zarr group lives here
url = 'gs://pangeo-cmems-duacs'
gcs = gcsfs.GCSFileSystem(requester_pays=True)
source_store = gcs.get_mapper(url)
###Output
_____no_output_____
###Markdown
Open Zarr Array
###Code
group = zarr.open_consolidated(source_store, mode='r')
source_array = group['sla']
source_array
source_array.chunks
###Output
_____no_output_____
###Markdown
Make a Rechunking Plan
###Code
max_mem = '1GB'
target_chunks = (8901, 72, 72)
# you must have write access to this location
store_tmp = gcs.get_mapper('pangeo-scratch/rabernat/rechunker_demo/temp.zarr')
store_target = gcs.get_mapper('pangeo-scratch/rabernat/rechunker_demo/target.zarr')
r = rechunk(source_array, target_chunks, max_mem,
store_target, temp_store=store_tmp)
r
###Output
_____no_output_____
###Markdown
Execute the Plan
###Code
result = r.execute()
result
dsa.from_zarr(result)
###Output
_____no_output_____
###Markdown
stacklogNested benchmarking and timing code.Stacklog uses a `Logger` object to capture timing output, so you can use multiple logging objects at once.The simplest interface is at the module level, and uses the `'default'` `Logger`.
###Code
import stacklog
import time
def comp(t=.1):
time.sleep(t)
###Output
_____no_output_____
###Markdown
The `Logger` accumulates input, but we can make sure we have an empty log with `reset`.
###Code
stacklog.reset()
###Output
_____no_output_____
###Markdown
- `tic()` and `toc()` can be used to time a section of code- `tic()` needs a key to identify the result in a dictionary- multiple timings with the same key get appended to a list- `log()` is an interface to the dictionary of all the `Logger` objects- without arguments `log()` returns the default `Logger` (same as `log('default')`)
###Code
from stacklog import tic, toc, log, reset
reset()
# normal tic and toc for timing
tic('test1')
comp()
toc()
# with the same key, times get appended to a list
tic('multi')
comp()
toc()
tic('multi')
comp()
toc()
tic('multi')
comp()
toc()
# pretty print the result
log()
###Output
_____no_output_____
###Markdown
- the `Logger` contains a dictionary of all recorded timings- `peek` exposes that dictionary without reseting it- `pull` exposes the dictionary and reset the `Logger`- `pretty_dict` will attemp to pretty print the dictionary- `pretty_dict`'s `clean` argument defaults to `True`, which will remove empty data structuresand replace single-element lists with their value
###Code
from stacklog import peek, pull, pretty_dict
# grab the dictionary of the log
result = peek()
# print the full data structure
pretty_dict(result, clean=False)
print "#"*10
# pretty print a cleaned up version (removing empty containers and extra parens)
pretty_dict(result)
###Output
{'multi': [(0.10065293312072754, {}),
(0.10074996948242188, {}),
(0.10254788398742676, {})],
'test1': [(0.10051989555358887, {})]}
##########
{'multi': [0.10065293312072754, 0.10074996948242188, 0.10254788398742676],
'test1': 0.10051989555358887}
###Markdown
- we can also use the `timer` context manager and the `with` statement instead of `tic` and `toc`
###Code
from stacklog import timer
# can also use a context manager to time instead of tic and toc
with timer('something new'):
comp()
log()
###Output
_____no_output_____
###Markdown
- `pull` returns the `Logger`'s dictionary and resets the log
###Code
result1 = pull()
pretty_dict(result1)
print "#"*10
with timer('all alone'):
comp()
peek()
###Output
{'multi': [0.10065293312072754, 0.10074996948242188, 0.10254788398742676],
'something new': 0.10142993927001953,
'test1': 0.10051989555358887}
##########
###Markdown
nested timing- nesting `tic/toc` and `timer` creates more complicated, hierarchical timing structures
###Code
reset()
with timer('outer'):
tic('inner')
comp()
toc()
with timer('another inner'):
comp()
comp()
comp()
for i in range(4):
with timer('way in there'):
comp()
pretty_dict(peek())
###Output
{'outer': (0.8253848552703857,
{'another inner': (0.7232990264892578,
{'way in there': [0.1048269271850586,
0.1003260612487793,
0.10056900978088379,
0.10022306442260742]}),
'inner': 0.10188698768615723})}
###Markdown
- we can use `lost_time` to see the difference between a parent timer and the sum of its children to see how much time is unaccounted for
###Code
from stacklog import lost_time
lost_time(peek())
###Output
_____no_output_____
###Markdown
- we can also use `pretty_dict` with the output of `lost_time`
###Code
pretty_dict(lost_time(peek()))
###Output
{'outer': (0.00019884109497070312, {'another inner': 0.3173539638519287})}
###Markdown
- notice that outer has very little unaccounted time- this is because the 'outer' timer is about equal to the sum of its children 'another inner' and 'inner'- however, the lost time for 'another inner' is about .3 seconds, which is exactly how long the untimed `comp()` functions should take to execute- if we account for them, we shoun't have such a large loss
###Code
reset()
with timer('outer'):
tic('inner')
comp()
toc()
with timer('another inner'):
with timer('previously unaccounted for'):
comp()
comp()
comp()
for i in range(4):
with timer('way in there'):
comp()
pretty_dict(peek())
###Output
{'outer': (0.8192839622497559,
{'another inner': (0.7142300605773926,
{'previously unaccounted for': 0.3051471710205078,
'way in there': [0.10043597221374512,
0.10362792015075684,
0.10380291938781738,
0.10069108009338379]}),
'inner': 0.10493993759155273})}
###Markdown
- we see that all the times add up as expected
###Code
pretty_dict(lost_time(peek()))
###Output
{'outer': (0.00011396408081054688, {'another inner': 0.0005249977111816406})}
###Markdown
`Logger` interfaceYou can use multiple `Logger`s at once, by calling the member methods of the `Logger` objects.
###Code
from stacklog import Logger
a = Logger()
b = Logger()
a.tic('outer')
comp()
b.tic('something else')
comp()
comp()
comp()
a.toc()
comp()
comp()
b.toc()
print a
print b
###Output
Stack item 0:
{'outer': 0.41005778312683105}
Stack item 0:
{'something else': 0.5142879486083984}
###Markdown
GCM Filters Tutorial Synthetic DataIn this example, we are going to work with "synthetic data"; data we made up for the sake of keeping the example simple and self-contained. Create Input DataGcm-filters uses Xarray DataArrays for its inputs and outputs. So we will first import xarray (and numpy).
###Code
import gcm_filters
import numpy as np
import xarray as xr
###Output
_____no_output_____
###Markdown
Now we will create a random 3D cube of data.
###Code
nt, ny, nx = (10, 128, 256)
data = np.random.rand(nt, ny, nx)
da = xr.DataArray(data, dims=['time', 'y', 'x'])
da
###Output
_____no_output_____
###Markdown
To make things a bit more interesting, we will create a "land mask"; a binary array representing topography in our made-up ocean.The convention is here that the array is 1 in the ocean ("wet points") and 0 in the land ("dry points").
###Code
mask_data = np.ones((ny, nx))
mask_data[(ny // 4):(3 * ny // 4), (nx // 4):(3 * nx // 4)] = 0
wet_mask = xr.DataArray(mask_data, dims=['y', 'x'])
wet_mask.plot()
###Output
_____no_output_____
###Markdown
We have made a big island.We now use this to mask our data.
###Code
da_masked = da.where(wet_mask)
da_masked[0].plot()
###Output
_____no_output_____
###Markdown
Create a FilterThe main class we use from gcm-filters is the {class}`gcm_filters.Filter` object.When we create a filter, we specify how we want to smooth the data, including the filter shape and all the relevant parameters.To define a filter, we need to pick a few options from the predefined lists of filter shapes and grid types.The possible filter shapes are enumerated as follows:
###Code
list(gcm_filters.FilterShape)
###Output
_____no_output_____
###Markdown
The possible grid types are:
###Code
list(gcm_filters.GridType)
###Output
_____no_output_____
###Markdown
(This list will grow as we implement more Laplacians).For now, we will choose `CARTESIAN_WITH_LAND`, which matches our synthetic data.Each grid type has different "grid variables" that must be provided.To find out what these are, we can use this utility function.
###Code
gcm_filters.required_grid_vars(gcm_filters.GridType.CARTESIAN_WITH_LAND)
###Output
_____no_output_____
###Markdown
So if we use this grid type, we have to include a `wet_mask` grid variable.We are now ready to create our filter object.
###Code
filter = gcm_filters.Filter(
filter_scale=4,
dx_min=1,
filter_shape=gcm_filters.FilterShape.TAPER,
grid_type=gcm_filters.GridType.CARTESIAN_WITH_LAND,
grid_vars={'wet_mask': wet_mask}
)
filter
###Output
_____no_output_____
###Markdown
The repr for the filter object includes some of the paramterers it was initiliazed with, to help us keep track of what we are doing. Apply the FilterNow that we have our filter defined, we can use it on some data. We need to specify which dimension names to apply the filter over. In this case, it is y, x.
###Code
%time da_filtered = filter.apply(da_masked, dims=['y', 'x'])
da_filtered
###Output
CPU times: user 1.88 s, sys: 214 ms, total: 2.1 s
Wall time: 333 ms
###Markdown
Let's visualize what the filter did:
###Code
da_filtered[0].plot()
###Output
_____no_output_____
###Markdown
It can be useful to know where the land mask has influenced our results--for example, for assessing commutativity of the filter with differential operators.We can get at this by applying the filter to the land mask itself.We will create a new filter object that ignores the land.
###Code
filter_noland = gcm_filters.Filter(
filter_scale=4,
dx_min=1,
filter_shape=gcm_filters.FilterShape.TAPER,
grid_type=gcm_filters.GridType.CARTESIAN,
)
mask_filtered = filter_noland.apply(wet_mask, dims=['y', 'x'])
mask_filtered.plot()
###Output
_____no_output_____
###Markdown
Use DaskUp to now, we have operated "eagerly"; when we called `.apply`, the results were computed immediately and stored in memory.Gcm-filters is also designed to work seamlessly with Dask array inputs, deferring its computationg and possibly executing it in parallel.We can do this with our synthetic data by converting it to dask.
###Code
da_dask = da_masked.chunk({'time': 2})
da_dask
da_filtered_lazy = filter.apply(da_dask, dims=['y', 'x'])
da_filtered_lazy
###Output
_____no_output_____
###Markdown
Nothing has actually been computed yet.We can trigger computation as follows:
###Code
%time da_filtered_computed = da_filtered_lazy.compute()
###Output
CPU times: user 574 ms, sys: 91.4 ms, total: 665 ms
Wall time: 224 ms
###Markdown
TutorialIn this tutorial, we will give a brief introduction on the quantization and pruning techniques upon which QSPARSE is built. Using our library, we guide you through the building of a image classification neural network, whose both weights and activations are fully quantized and pruned to a given sparsity level.> If you are already familiar with quantization and pruning methods and want to learn the programming syntax, please fast forward to [Building Network with QSPARSE](building-network-with-qsparse). PreliminariesQuantization and pruning are core techniques used to reduce the inference costs of deep neural networks and have been studied extensively. Approaches to quantization are often divided into two categories: 1. Post-training quantization2. Quantization aware trainingThe former applies quantization after a network has been trained, and the latter quantizes the network during training and thereby reduces the quantization error throughout training process and usually yields superior performance. Pruning techniques are often divided into unstructured or structured approaches which define if and how to impose a pre-defined topology, e.g. channel-wise pruning. Here, we focus on applying quantization and unstructured pruning during training. Conceptual diagram of the computational graph of a network whose weights and activations are quantized and pruned using QSPARSE.In QSPARSE, we implement the quantization and pruning as independent operators, which can be applied on both weights and activations, as demonstrated in the figure above. Uniform QuantizationWe denote the uniform quantization operation as $Q_u(\mathbf{x}, d)$, where $\mathbf{x}$ denotes the input to the operator (i.e. weights or activations), $N$ denotes the total number of bits used to represent weights and activations, and $d$ denotes the number of bits used to represent the fractional (i.e. the position of the decimal point to the right, we will refer $d$ as decimal bits).$$Q_u(\mathbf{x}, d) = \text{clip}(\lfloor\mathbf{x} \times 2^{d}\rfloor, -2^{N-1}, 2^{N-1}-1) / 2^d$$Straight-through estimator (STE) is applied to calculate gradients in the backward computation.$$\frac{\partial Loss}{\partial \mathbf{x}} = \text{clip}(\frac{\partial Loss}{\partial Q_u(\mathbf{x}, d)}, -2^{N-d-1}, 2^{N-d-1} - 2^{-d})$$However, STE is known to be sensitive to weight initialization, therefore, we design the quantization operator as $\text{Quantize}$ in the following. Starting with the original full-precision network, we delay the quantization of the network to later training stages, and calculate the optimal decimal bits $d^*$ by minimizing the quantization error after a given number of update steps $t_q$.$$\text{Quantize}(\mathbf{x}_t) = \begin{cases} \mathbf{x}_t & t < t_q \\ Q_u(\mathbf{x}_t, d^*) & t \ge t_q \\ \end{cases} $$$$d^* = \arg \min_{d} \Vert Q_u(\mathbf{x}_{t_q}, d) - \mathbf{x}_{t_q} \Vert^2$$ Magnitude-based Unstructured PruningWe denote the unstructured pruning operator $\textbf{Prune}(\mathbf{x}, s)$ as element-wise multiplication between $\mathbf{x}$ and $\mathbf{M}_{\mathbf{x},s}$, where $\mathbf{x}$ denotes the input to the operator (i.e., weights or activations), $s$ denotes the target sparsity as measured by the percentage of zero-valued elements, and $\mathbf{M}_{\mathbf{x},s}$ denotes a binary mask.$$P(\mathbf{x}, s) = \mathbf{x} \circ \mathbf{M}_{\mathbf{x},s}$$Given that $(i,j)$ are the row and column indices, respectively, the binary mask $\mathbf{M}_{\mathbf{x},s}$ is calculated as belows, where the $\text{quantile}(\mathbf{x}, a)$ is the a-th quantile of $\mathbf{x}$.$$\mathbf{M}_{\mathbf{x},s}^{(i,j)} = \begin{cases} 1 & |\mathbf{x}^{(i, j)}| \ge \text{quantile}(|\mathbf{x}|, s) \\ 0 & \text{otherwise} \end{cases}$$As proposed by [Zhu et al.](https://arxiv.org/pdf/1710.01878.pdf), the sparsity level $s$ is controlled and updated according to a sparsification schedule at time steps $t_p + i \Delta t_p$ such that $i \in \{1,2,..,,n\}$, where $t_p$, $\Delta t_p$, and $n$ are hyper parameters that represent the starting pruning step, frequency, and total number of pruning iterations, respectively. Building Network with QSPARSEWith the above methods in mind, in the following, we will use QSPARSE to build a quantized and sparse network upon the below full precision network borrowed from pytorch official [MNIST example](https://github.com/pytorch/examples/blob/master/mnist/main.py).
###Code
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.bn1 = nn.BatchNorm2d(32)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(9216, 128)
self.bn3 = nn.BatchNorm1d(128)
self.fc2 = nn.Linear(128, 10)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.bn3(self.fc1(x)))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
Net()
###Output
_____no_output_____
###Markdown
Weight Quantization and Pruning The part of diagram in red corresponds to weight quantization and pruning.We can easily create a weight quantized and pruned layer with QSPARSE. Take the convolution as an example:
###Code
from qsparse import prune, quantize, set_qsparse_options
set_qsparse_options(log_on_created=False)
conv = quantize(prune(nn.Conv2d(1, 32, 3),
sparsity=0.5, start=200,
interval=10, repetition=4),
bits=8, timeout=100)
conv
###Output
_____no_output_____
###Markdown
We can see that `prune` and `quantize` layers are injected. The output layer will behave identically to `nn.Conv2d` except that `conv.weight` will return a quantized and pruned version of the vanilla weight. As for the hyper parameters, they map to QSPARSE arguments as the table below.| Param | QSPARSE Argument ||--------------|-----------------------|| $N$ | `bits` || $t_q$ | `timeout` || $s$ | `sparsity` || $t_p$ | `start` || $n$ | `repetition` || $\Delta t_p$ | `interval` | Both the `prune` and `quantize` layers maintain an internal counter to record the number of training steps that have passed through. The counter values can be accessed through the `_n_updates` attribute. Based on the above specified arguments, `conv.weight` will be quantized from step 100 and pruned with 50% sparsity from step 240, which can be verified by:
###Code
data = torch.rand((1, 1, 32, 32))
for _ in range(241):
conv(data)
conv.quantize._n_updates
(conv.weight * (2**conv.quantize.decimal)
- (conv.weight * (2**conv.quantize.decimal)).int()).sum().item()
print(len(conv.prune.mask.nonzero()) / np.prod(conv.prune.mask.shape))
print(np.all((conv.weight.detach().numpy() == 0)
== (conv.prune.mask.detach().numpy() == 0)))
###Output
0.5034722222222222
True
###Markdown
The `mask` and `decimal` denote the binary mask for pruning and number of fractional bits for quantization, which we will revisit in [Inspecting Parameters of a Pruned/Quantized Model](../advanced_usage/inspecting-parameters-of-a-prunedquantized-model). The `prune` and `quantize` functions are compatible with any pytorch module as long as their parameters can be accessed from their `weight` attribute. Take another example of fully-connected layer:
###Code
quantize(prune(nn.Linear(128, 10), 0.5), 8)
###Output
_____no_output_____
###Markdown
Activation Quantization and Pruning The part of diagram in red corresponds to activation quantization and pruning.To prune and quantize and the output of a convolution, we can directly insert `quantize` and `prune` into the computation graph by:
###Code
nn.Sequential(
conv,
prune(sparsity=0.5, start=200, interval=10, repetition=4),
quantize(bits=8, timeout=100),
nn.ReLU()
)
###Output
_____no_output_____
###Markdown
Similarly, the output of `conv` will be quantized from step 100 and pruned with 50% sparsity from step 240. Building a Network with Both Weight and Activation Quantized and PrunedUsing the techniques introduced above, we can implement the `Net` so as to have joint quantization and pruning training capabilities with full transparency and minimal efforts:
###Code
class NetPQ(nn.Module):
def __init__(self, epoch_size=100):
super(NetPQ, self).__init__()
# input quantization, quantize at epoch 10
self.qin = quantize(bits=8, timeout=epoch_size * 10)
# For the sake of simplicity, we ignore the `timeout,start,repetition,
# interval` parameters in the following.
self.conv1 = quantize(nn.Conv2d(1, 32, 3, 1), 8)
self.bn1 = nn.BatchNorm2d(32)
self.p1, self.q1 = prune(sparsity=0.5), quantize(bits=8)
self.conv2 = quantize(prune(nn.Conv2d(32, 64, 3, 1), 0.5), 8)
self.bn2 = nn.BatchNorm2d(64)
self.p2, self.q2 = prune(sparsity=0.5), quantize(bits=8)
self.fc1 = quantize(prune(nn.Linear(9216, 128), 0.5), 8)
self.bn3 = nn.BatchNorm1d(128)
self.p3, self.q3 = prune(sparsity=0.5), quantize(bits=8)
self.fc2 = quantize(nn.Linear(128, 10), 8)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
def forward(self, x):
x = self.qin(x)
x = F.relu(self.q1(self.p1(self.bn1(self.conv1(x)))))
x = F.relu(self.q2(self.p2(self.bn2(self.conv2(x)))))
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.q3(self.p3(self.bn3(self.fc1(x)))))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
NetPQ()
###Output
_____no_output_____
###Markdown
EduNLPEduNLP aims at providing easily used interfaces for processing the text components in exercises and papers, including:* Syntax* Parsing* Segmentation* Embedding* AnalysisBefore we introduce the mentioned above functionalities,we will first define the data format we used for the hybrid exercise content. Data FormatAs we know, in exercise, there are many hybrid contents rather than simple text.As shown in the Figure 1, a math exercise may contain equation, imagewhile a chemistry exercise may contain .Therefore, we propose a data format for the hybrid exercise, named Unified Exercise Representation (UER).In UER, equations will be represented in $$ format. For instance, the math exercise in Figure 1 is represented as.Summarily, the syntax of UER is: Quick StartIn this part, we will introduce the following interfaces:* Syntax* Parsing* Segmentation* Embedding* Analysis SyntaxAs we having presented UER, the first thing we want to do is to check whether an exercise is xxx.In addition, we will try to covert xxx to UER automatically if it is possible. Syntax Check
###Code
en_ex1 = ""
en_ex2 = ""
ch_ex1 = ""
ch_ex2 = ""
###Output
_____no_output_____
###Markdown
Syntax CorrectionWe focus on handling these situations:* \$\text{}\$* f(x) + 1 = yThe following errors will not be handled:* `x^2+1=y` as `x 2 + 1 = y` ParsingAs we discussed, there are xxx such as equations and xxx. In order to xxx, we provide xxx to automatically xxx.Similarity, we xxx, named Unified Structure Representation (USR). Syntax of USR Example
###Code
eq1 = "y=x^2 + 1"
# $ will be automatically ignored
eq2 = "$y = x^2 + 1$"
###Output
_____no_output_____
###Markdown
Tutorial
###Code
# NBVAL_IGNORE_OUTPUT
%load_ext watermark
import pypgk_rtd01
%watermark -v --iversions
###Output
pypgk_rtd01 0.1.0-dev
CPython 3.6.7
IPython 7.1.1
###Markdown
$\newcommand{Re}[0]{\operatorname{Re}}\newcommand{Im}[0]{\operatorname{Im}}\newcommand{dd}[0]{\,\text{d}}\newcommand{abs}[0]{\operatorname{abs}}$ This notebooks illustrates the basic use of the `pypgk_rtd01` package.
###Code
pypgk_rtd01.__version__
pypgk_rtd01.__doc__
###Output
_____no_output_____
###Markdown
xDSL tutorial Imports and setup
###Code
from xdsl import *
from xdsl.ir import *
from xdsl.irdl import *
from xdsl.dialects.func import *
from xdsl.dialects.arith import *
from xdsl.dialects.builtin import *
from xdsl.parser import *
from xdsl.printer import *
from xdsl.util import *
# MLContext, containing information about the registered dialects
context = MLContext()
# Some useful dialects
arith = Arith(context)
func = Func(context)
builtin = Builtin(context)
# Printer used to pretty-print MLIR data structures
printer = Printer()
###Output
_____no_output_____
###Markdown
High-level presentation (TODO)Base ideas of what xDSL is. Example of a small program, and SSA. Base IR features Dialects Dialects are namespaces that contain a collection of attributes and operations. For instance, the Builtin dialect contains (but not exclusively) the attribute `!i32` and the operation `builtin.func`.A dialect is usually a single level of abstraction in the IR, and multiple dialects can be used together in the same MLIR program.Dialects are currently Python classes registering operations and attributes, and providing simple accessors to their attributes and dialects.This will however change in the near future to provide a better interface to dialects. Attributes Attributes represent compile-time information.In particular, each SSA-value is associated with an attribute, representing its type.Each attribute type has a name and belongs in a dialect. The textual representation of attributes is prefixed with `!`, and the dialect name.For instance, the `vector` attribute has the format `!builtin.vector`, where `T` is the expected parameter of the attribute.In Python, attributes are always expected to be immutable objects heriting from either `Data` or `ParametrizedAttribute`. Data attributes `Data` attributes are used to wrap python data structures. For instance, the `IntAttr` is an attribute containing an `int`, and the `StringAttr` is an attribute containing a `str`.`Data` attributes are parsed and printed with the format `dialect_name.attr_name`, where `custom_format` is the format defined by the parser and printer of each `Data` attribute.Note that some attributes, such as `StringAttr`, are shortened by the printer, and do not require the use of `dialect_name.attr_name`. For instance, `builtin.str` is shortened to `"foo"`. Here is an example on how to create and print an `IntAttr` attribute:
###Code
# Attribute definitions usually define a `get` method to create the attribute
my_int = IntAttr.from_int(42)
printer.print_attribute(my_int)
###Output
!int<42>
###Markdown
Note that here, the `IntAttr` does not print a dialect prefix. This will be fixed soon-ish.
###Code
# Access the data in the IntAttr:
print(my_int.data)
###Output
42
###Markdown
Parametrized attributesParametrized attributes are attributes containing optionally multiple attributes as parameters.For instance, the `integer` attribute from `builtin` is a parametrized attribute and expects two attributes as parameter.Parametrized attributes are printed with the format `!dialect.attr_name`, where `attr_i` are the attribute parameters.Here is an example on how to create and inspect an `integer_type` attribute, which represent a machine integer type. It is parametrized by a single `IntAttr` parameter, representing the bitwidth.
###Code
# Get the int that will be passed as parameter to the integer_type
int_64 = IntAttr.from_int(64)
i64 = IntegerType([int_64])
printer.print_attribute(i64)
# Get back the parameters of IntegerType
printer.print_attribute(i64.parameters[0])
# Use a custom `get` method from IntegerType to construct it
assert IntegerType.from_width(64) == i64
###Output
_____no_output_____
###Markdown
Note that parametrized attributes may define invariants that need to be respected.For instance, constructing an `integer_type` with wrong parameters will trigger an error:
###Code
# Try to create an IntegerType with wrong parameters
try:
bad_attr = IntegerType([i64])
except Exception as err:
print(err)
###Output
IntegerType(name='integer_type', parameters=[IntAttr(name='int', data=64)]) should be of base attribute int
###Markdown
Operations Operations represent the computation that a program can do. They span in all abstraction levels, and can be domain-specific.For instance, `arith.addi` will add two integers, while `scf.if` represent an if/else structure.Operations are composed of:* A base operation type, which represent the semantics of the operation;* Operands, which are SSA-values previously defined;* Results, which are new SSA-values defined by the operation;* Attributes, which encode compile-time information about the operation;* Regions, which contain operations, and are used to represent more complex control-flow;* Successors, which are basic block names for which the operation can give control to.The format of an operation is: `results = dialect_name.op_name(operands) (successors) [attributes] regions`Here is for example how to create a constant operation, representing a constant value:
###Code
const_op = Constant.create([], [i64], attributes={"value": IntegerAttr.from_int_and_width(62, 64)})
printer.print_op(const_op)
###Output
%0 : !i64 = arith.constant() ["value" = 62 : !i64]
###Markdown
Note that dialects usually define methods to ease the definition of such operations:
###Code
const_op2 = Constant.from_attr(IntegerAttr.from_int_and_width(62, 64), i64)
printer.print_op(const_op2)
###Output
%1 : !i64 = arith.constant() ["value" = 62 : !i64]
###Markdown
We can use the results from the operation to pass them as operands for a later operation. For instance, we will add the constant to itself using the `arith.addi` operation:
###Code
add_op = Addi.create([const_op.results[0], const_op.results[0]], [i64], {})
printer.print_op(const_op)
print()
printer.print_op(add_op)
###Output
%2 : !i64 = arith.constant() ["value" = 62 : !i64]
%3 : !i64 = arith.addi(%2 : !i64, %2 : !i64)
###Markdown
We can also put the operations in regions, which can be then used by other operations (such as func)
###Code
my_region = Region.from_operation_list([const_op, add_op])
printer._print_region(my_region)
###Output
{
%4 : !i64 = arith.constant() ["value" = 62 : !i64]
%5 : !i64 = arith.addi(%4 : !i64, %4 : !i64)
}
###Markdown
Functions are created using the `builtin.func` op, which contain a single region:
###Code
my_func = FuncOp.from_region("my_function", [], [], my_region)
printer.print_op(my_func)
###Output
builtin.func() ["sym_name" = "my_function", "type" = !fun<[], []>, "sym_visibility" = "private"] {
%6 : !i64 = arith.constant() ["value" = 62 : !i64]
%7 : !i64 = arith.addi(%6 : !i64, %6 : !i64)
}
###Markdown
Tutorial for *diskmap* This tutorial showcases the functionalities of *diskmap*. We will use a polarized scattered light image of the LkCa 15 circumstellar disk. The data were obtained with [VLT/SPHERE](https://www.eso.org/sci/facilities/paranal/instruments/sphere.html) in the $J$ band and have been published by [Thalmann et al. (2016)](https://ui.adsabs.harvard.edu/abs/2016ApJ...828L..17T/abstract). Getting started We start by importing the Python modules that are required for this tutorial.
###Code
import diskmap
import glob
import matplotlib.pyplot as plt
import numpy as np
import urllib.request
from astropy.io import fits
###Output
_____no_output_____
###Markdown
Next, we download the $J$ band image of LkCa 15 circumstellar disk.
###Code
urllib.request.urlretrieve('https://home.strw.leidenuniv.nl/~stolker/diskmap/lkca15_irdis_qphi.fits',
'lkca15_irdis_qphi.fits')
###Output
_____no_output_____
###Markdown
We can read the FITS file with `astropy`.
###Code
image = fits.getdata('lkca15_irdis_qphi.fits')
###Output
_____no_output_____
###Markdown
Let's have a look at the image. The central masked region contains NaNs.
###Code
plt.imshow(image, origin='lower', vmin=np.nanmin(image), vmax=0.35*np.nanmax(image))
###Output
_____no_output_____
###Markdown
Mapping of the disk surface We will now create an instance of `DiskMap` by providing the FITS filename, the pixel scale of the IRDIS detector (12.25 mas), the inclination (50 deg) and position angle (90 deg) of the disk, the distance (160 pc), and the image type (polarized flux)..The inclination convention is such that the near side is located on the right side of the image when using an inclination between 0 and 90 deg and a position angle of 0 deg. Therefore, with an position angle of 90 deg, the near side will be in upward direction of the image, as we will also see later in the `_radius.fits` file.
###Code
mapping = diskmap.DiskMap(fitsfile='lkca15_irdis_qphi.fits',
pixscale=0.01225,
inclination=50.,
pos_angle=90.,
distance=160.,
image_type='polarized')
###Output
_____no_output_____
###Markdown
The scattering surface of the disk is mapped with the `map_disk` method. Here, we provide a powerlaw funtion as the (approximate) shape of the disk surface for which we assume a constant opening angle: $h(r) = 0 + 0.05r^{1}$. The argument of `radius` specifies the sampling of the radii (100 points between 1 and 500 au).For running the deprojection later on, it is important that the outer radius of the `radius` parameter is larger than the field of view of the image. This may not be possible if the disk is strongly inclined and flaring. A mapping of the full field of view is not required for the $r^2$ scaling and phase function extraction. In that case, a smaller outer radius can be used, for example the actual outer radius of the disk. The radius and scattering angle output will contain NaNs beyond the outer radius.
###Code
mapping.map_disk(power_law=(0., 0.05, 1.),
radius=(1., 500., 100))
###Output
_____no_output_____
###Markdown
Radius and scattering angle The available output from the `DiskMap` methods are written by calling `write_output`. The argument of `filename` contains the prefix of the output files.
###Code
mapping.write_output(filename='lkca15')
###Output
_____no_output_____
###Markdown
Let's see which FITS files have been written.
###Code
glob.glob('*.fits')
###Output
_____no_output_____
###Markdown
For simplicity with the plots, we define half the field of view in arcseconds.
###Code
size = mapping.pixscale * image.shape[0]/2
###Output
_____no_output_____
###Markdown
The deprojected radius (in au) from the disk surface to the star is stored in the `_radius.fits` file. Let's plot the image from this FITS file.
###Code
radius = fits.getdata('lkca15_radius.fits')
plt.imshow(radius, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Deprojected radius (au)', size=14)
###Output
_____no_output_____
###Markdown
Similarly, the scattering angles on the disk surface are stored in the `_scat_angle.fits` file. The scattering angle is defined as 180 degrees minus the angle between the direction from the disk surface to the star and the direction from the disk surface to the observer.
###Code
scat_angle = fits.getdata('lkca15_scat_angle.fits')
plt.imshow(scat_angle, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Scattering angle (deg)', size=14)
###Output
_____no_output_____
###Markdown
Irradiation correction Now that we have the deprojected distance from each pixel to the star, we can compute the stellar irradiation corrected (i.e. $r^2$ scaled) image. We set a maximum radius of 100 au such that the flux at large separations, which only consists of noise, is not enhanced by the scaling.
###Code
mapping.r2_scaling(r_max=100.)
###Output
_____no_output_____
###Markdown
We run again the `write_output` method such that also the r$^2$ scaled image is stored as FITS file.
###Code
mapping.write_output(filename='lkca15')
###Output
_____no_output_____
###Markdown
Let's have a look at the r$^2$ scaled image. The dynamical range is smaller compared to the regular image which brings out the disk features more clearly.
###Code
r2_scaled = fits.getdata('lkca15_r2_scaled.fits')
plt.imshow(r2_scaled, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Flux (r$^2$ ADU)', size=14)
###Output
_____no_output_____
###Markdown
Disk deprojection Next, we will use the 3D mapping of the disk surface to deproject the image with the `deproject_disk` method. The deprojection corrects therefore both for the inclination (i.e. the disk midplane) and height of the disk surface (i.e. the powerlaw profile).
###Code
mapping.deproject_disk()
###Output
_____no_output_____
###Markdown
And we write again all available output files.
###Code
mapping.write_output(filename='lkca15')
###Output
_____no_output_____
###Markdown
The deprojected image is stored in the FITS file with the `_deprojected.fits` suffix. This image shows what the disk would look like at an inclination of 0 degrees. Let's have a look at the result.
###Code
deprojected = fits.getdata('lkca15_deprojected.fits')
plt.imshow(deprojected, origin='lower', extent=[size, -size, -size, size],
vmin=np.amin(deprojected), vmax=0.25*np.amax(deprojected))
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Flux (ADU)', size=14)
###Output
_____no_output_____
###Markdown
Estimated total intensity image By assuming a bell-shaped (i.e. Rayleigh-like curve with an adjustable peak value) degree of polarization and using the scattering angles from before, we use the `total_intensity` method to convert the r$^2$-scaled, polarized intensity image into an estimated total intensity image. This method should therefore only be used if the input image is a polarized light image (i.e. `image_type='polarized'`).In this example, we assume a maximum polarization of 100% at a scattering angle of 90 degrees, which is to be expected for aggregate-like dust grains with submicron-sized monomers.
###Code
mapping.total_intensity(pol_max=1.)
###Output
_____no_output_____
###Markdown
We write again all available output, which now also includes the r$^2$-scaled, total intensity image.
###Code
mapping.write_output(filename='lkca15')
###Output
_____no_output_____
###Markdown
Let's plot the total intensity image. The forward scattering by dust grains on the near/north side of the disk is visible in this image.
###Code
total_intensity = fits.getdata('lkca15_total_intensity.fits')
plt.imshow(total_intensity, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Flux (r$^2$ ADU)', size=14)
###Output
_____no_output_____
###Markdown
Scattering phase function As a last step, we extract the scattering phase function from, that is, the normalized flux as function of scattering angle. We use the `phase_function` method and select pixels between a deprojected distance of 80 and 100 au (i.e. along the bright ring in the r$^2$-scaled image) and caculate the average flux in 30 steps between 0 and 180 degrees.
###Code
mapping.phase_function(radius=(80., 100.), n_phase=30)
###Output
_____no_output_____
###Markdown
We run again the `write_output` method to store the phase function in a text file.
###Code
mapping.write_output(filename='lkca15')
###Output
_____no_output_____
###Markdown
We can read the extracted phase function with the `loadtxt` function of `numpy`. The second and third column of the data file contain the extracted phase function and error, which in this case is the polarized phase function. The fourth and fifth column contains an extimated total intensity phase function, which assumes that the degree of polarization is bell-shaped.In case the input image is a total intensity image (i.e. `image_type='total'`), the data file contains only the regular / total intensity phase function.
###Code
angle, pol_flux, pol_error, total_flux, total_error = np.loadtxt('lkca15_phase_function.dat', unpack=True)
###Output
_____no_output_____
###Markdown
Let's plot the polarized phase function that is extracted from the r$^2$-scaled image.
###Code
plt.errorbar(angle, pol_flux, yerr=pol_error)
plt.xlabel('Scattering angle (deg)', fontsize=14)
plt.ylabel('Normalized polarized flux', fontsize=14)
###Output
_____no_output_____
###Markdown
Finally, we plot the total intensity phase function, which shows the onset of a strong forward scattering peak and a more shallow backward scattering peak.
###Code
plt.errorbar(angle, total_flux, yerr=total_error)
plt.xlabel('Scattering angle (deg)', fontsize=14)
plt.ylabel('Normalized total flux', fontsize=14)
###Output
_____no_output_____
###Markdown
pyfssa Tutorial Preamble
###Code
from __future__ import division
# configure plotting
%config InlineBackend.rc = {'figure.dpi': 300, 'savefig.dpi': 300, \
'figure.figsize': (6, 6 / 1.6), 'font.size': 12, \
'figure.facecolor': (1, 1, 1, 0)}
%matplotlib inline
import itertools
from cycler import cycler
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import fssa
###Output
_____no_output_____
###Markdown
A mock scaling function In this tutorial, we will demonstrate the *pyfssa* routines with a mock scaling function\begin{equation}\tilde{f}(x) = e^{-(x+1)^2}\end{equation}
###Code
def mock_scaling_f(x):
"""Mock scaling function"""
return np.exp(-(x + 1.0)**2)
x = np.linspace(-4.0, 2.0, num=200)
fig, ax = plt.subplots()
ax.plot(x, mock_scaling_f(x), label=r'$\tilde{f}(x)$', rasterized=True)
ax.set_xbound(x.min(), x.max())
ax.set_ybound(0.0, 1.1)
ax.set_xlabel(r'$x$')
ax.legend()
plt.show()
###Output
_____no_output_____
###Markdown
**Figure**: Mock scaling function $\tilde{f}(x) = e^{-(x+1)^2}$ Precisely mocking scaled data We generate mock observations $a_{L,\varrho}$ according to the finite-size scaling\begin{equation}a_{L, \varrho} = L^{\zeta/\nu} \tilde{f}\left(L^{1/\nu} (\varrho -\varrho_c)\right)\end{equation}with mock exponents $\nu = \frac{5}{2}, \zeta=\frac{3}{2}$ and $\rho_c = \frac{1}{2}$.
###Code
def mock_scaled_data(l, rho, rho_c=0.5, nu=2.5, zeta=1.5):
"""Generate scaled data from mock scaling function"""
return np.transpose(
np.power(l, zeta / nu) *
mock_scaling_f(
np.outer(
(rho - rho_c), np.power(l, 1 / nu)
)
)
)
rhos = np.linspace(-0.5, 0.8, num=200)
ls = np.logspace(1, 3, num=5).astype(np.int)
# system sizes
ls
# Define colors
palette = sns.cubehelix_palette(
n_colors=ls.size, start=2.0, rot=0.35, gamma=1.0, hue=1.0, light=0.6, dark=0.2,
)
sns.palplot(palette)
# Generate precisely mocked scaled data
a = mock_scaled_data(ls, rhos)
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', palette))
for l_index, l in enumerate(ls):
ax.plot(
rhos, a[l_index, :],
'.',
label=r'${}$'.format(l),
rasterized=True,
)
ax.set_xbound(rhos.min(), rhos.max())
ax.set_xlabel(r'$\rho$')
ax.legend(title=r'$L$', loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
**Figure**: Mocked raw data, precisely sampled from the scaling function and de-scaled. Achieving data collapse with the mock data Our mock data we now want to scale with the **fssa.scaledata** routine.We compare the quality of the data collapse of several values for the critical exponents, numerically and graphically.
###Code
# Put some relative error bars on the precise data
da = a * 0.1
rho_c = np.tile(0.5, (3, 3))
nu = np.tile(2.5, (3, 3))
zeta = np.tile(1.5, (3, 3))
rho_c[0, :] = [0.25, 0.5, 0.75]
nu[1, :] = [2.0, 2.5, 3.0]
zeta[2, :] = [1.0, 1.5, 2.0]
# re-scale data (manually)
scaled_data = list()
quality = list()
for i in range(3):
my_scaled_data = list()
my_quality = list()
for j in range(3):
my_scaled_data.append(
fssa.scaledata(
ls, rhos, a, da,
rho_c[i, j], nu[i, j], zeta[i, j]
)
)
my_quality.append(fssa.quality(*my_scaled_data[-1]))
scaled_data.append(my_scaled_data)
quality.append(my_quality)
# plot manually re-scaled data
fig, axes = plt.subplots(
nrows=3, ncols=3, squeeze=True,
#figsize=(8, 7),
sharex=True, sharey=True,
)
for (i, j) in itertools.product(range(3), range(3)):
ax = axes[i, j]
ax.set_prop_cycle(cycler('color', palette))
my_scaled_data = scaled_data[i][j]
for l_index, l in enumerate(ls):
ax.plot(
my_scaled_data.x[l_index, :], my_scaled_data.y[l_index, :],
'.',
label=r'${}$'.format(l),
rasterized=True,
)
ax.set_xbound(-5, 2)
if i == 0:
ax.set_title(
r'$\rho_c = {}$'.format(rho_c[i, j]),
position=(0.25, 0.65),
)
elif i == 1:
ax.set_title(
r'$\nu = {}$'.format(nu[i, j]),
position=(0.25, 0.65),
)
elif i == 2:
ax.set_title(
r'$\zeta = {}$'.format(zeta[i, j]),
position=(0.25, 0.65),
)
if i == 2:
ax.set_xlabel(r'$x$')
ax.set_xticks([-4, -2, 0, ])
if j == 0:
ax.set_yticks([0, 1, 2, 3, 4, 5])
ax.text(
0.1, 0.5,
r'$S={:.1f}$'.format(quality[i][j]),
transform=ax.transAxes,
)
plt.show()
###Output
_____no_output_____
###Markdown
**Figure**: Scaling the mock data with varying exponents. The true exponents are in the middle column as the critical parameter $\rho_c = \frac{1}{2}$ and $\nu = \frac{5}{2}$, $\zeta = \frac{3}{2}$, as signified by the data collapse onto the single master curve and the quality-of-fit $S$ (smaller is better). Auto-scaling the mock data Now that we have an idea of the approximate range of the exponents, we employ the *fssa.autoscale* function to algorithmically determine accurate values and their errors.
###Code
ret = fssa.autoscale(ls, rhos, a, da, 0.4, 1.8, 2.2)
ret
auto_scaled_data = fssa.scaledata(ls, rhos, a, da, ret.rho, ret.nu, ret.zeta)
# critical exponents and errors, quality of data collapse
print(ret.rho, ret.drho)
print(ret.nu, ret.dnu)
print(ret.zeta, ret.dzeta)
print(ret.fun)
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', palette))
ax.plot(
auto_scaled_data.x.T, auto_scaled_data.y.T,
'.',
)
ax.set_xbound(-4, 2)
ax.set_xlabel(r'$x$')
plt.show()
###Output
_____no_output_____
###Markdown
**Figure**: Auto-scaling with *pyfssa* leads to data collapse of the mock data onto the original scaling function. Auto-scaling noisy mock data
###Code
noisy_a = a + a * 0.015 * np.random.standard_normal(a.shape)
noisy_ret = fssa.autoscale(ls, rhos, noisy_a, da, 0.4, 1.8, 2.2)
noisy_ret
noisy_auto_scaled_data = fssa.scaledata(
ls, rhos, noisy_a, da, noisy_ret.rho, noisy_ret.nu, noisy_ret.zeta
)
fig, ax = plt.subplots()
ax.set_prop_cycle(cycler('color', palette))
ax.plot(
noisy_auto_scaled_data.x.T, noisy_auto_scaled_data.y.T,
'.',
)
ax.set_xbound(-4, 2)
ax.set_xlabel(r'$x$')
plt.show()
###Output
_____no_output_____
###Markdown
Tutorial
###Code
# NBVAL_IGNORE_OUTPUT
%load_ext watermark
import pypkg_x8uqn
%watermark -v --iversions
###Output
pypkg_x8uqn 0.1.3+dev
CPython 3.7.3
IPython 7.10.2
###Markdown
$\newcommand{Re}[0]{\operatorname{Re}}\newcommand{Im}[0]{\operatorname{Im}}\newcommand{dd}[0]{\,\text{d}}\newcommand{abs}[0]{\operatorname{abs}}$ This notebooks illustrates the basic use of the `pypkg_x8uqn` package.
###Code
pypkg_x8uqn.__version__
pypkg_x8uqn.__doc__
###Output
_____no_output_____
###Markdown
TutorialPIPS contains various tools for time-series analysis in astronomy, with primary focus on detecting the period of variability. PIPS is objectively programmed, so that the analysis can be performed in a straightforward way.In this introductory tutorial, you will learn the quickest methods to do the following operations:- Installing PIPS- Initializing photometric data object --- ```PIPS.photdata```- Generating periodogram --- ```photdata.periodogram()```: basic & advanced- Detecting main period --- ```photdata.get_period()```: basic & advanced- Quick visualization --- ```photdata.get_bestfit_curve()```: basic- Multi-period analysis --- ```photdata.amplitude_spectrum()```: basic Importing PIPSPIPS is currently distributed on PyPI and GitHub under the name of ```astroPIPS```. However, the package name itself is still under ```PIPS```, and hence the import statements becomes as shown below:
###Code
import PIPS
from PIPS.resources.sample_RRL import samples
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings(action='ignore')
PIPS.about()
###Output
--------------------------
- Welcome to PIPS! -
--------------------------
Version: 0.3.0-beta.1
Authors: Y. Murakami, A. Savel, J. Sunseri, A. Hoffman, Ivan Altunin, Nachiket Girish
--------------------------
Download the latest version from: https://pypi.org/project/astroPIPS
Report issues to: https://github.com/SterlingYM/astroPIPS
Read the documentations at: https://PIPS.readthedocs.io
--------------------------
###Markdown
Before you start -- sneak peek at PIPS in 5 linesPhotometry data to phase-folded light curve -- This is what you can do with PIPS in 5 lines of code!
###Code
# data prep & initialization
data = samples[0]
star = PIPS.photdata(data)
# generate periodogram
star.periodogram(p_max=1.0).plot()
# automatic period detection
star.get_period(N0=20)
# plot phase-folded data, best-fit curve, and time of maxima
star.plot_lc(plot_bestfit=True,plot_epoch=True)
###Output
_____no_output_____
###Markdown
PIPS is designed so that it can be as simple as this for basic analyses, but at the same time PIPS provides a powerful platform for more high-level analysis for professional astronomers. In the tutorial below, we go over the basic steps to perform some of the most frequent operations and analyses. Data preparationPIPS takes in an array of 3xN data (samples are available on [github]('https://github.com/SterlingYM/astroPIPS/tree/master/sample_data')) -- time, magnitude (flux), and error on magnitude contained in a single python list or numpy array.For convenience, photometry data file from [LOSSPhotPipeline]('https://github.com/benstahl92/LOSSPhotPipeline') can be directly imported using a helper function ```data_readin_LPP```.
###Code
data = samples[0]
x,y,yerr = data
print('data shape:\t',np.array(data).shape)
print('x shape:\t',x.shape)
print('y shape:\t',y.shape)
print('y-error shape:\t',yerr.shape)
###Output
data shape: (3, 103)
x shape: (103,)
y shape: (103,)
y-error shape: (103,)
###Markdown
Create ```photdata``` objectMost of the functions in ```astroPIPS``` is implemented as methods in ```photdata``` object. Once the ```photdata``` object is initialized, various operations, such as period detection and data manipulation, can be done directly to the object.
###Code
star = PIPS.photdata(data)
###Output
_____no_output_____
###Markdown
This object initially contains raw data, and as the user performs analyses using various functions, more information, such as cleaned data, period, or amplitude, will be stored.The list of variables in the object can be printed with the following code:
###Code
print('Initially defined variables: ')
# for att in dir(star): print('- ',att) )
[print('- '+att) for att in dir(star) if not callable(getattr(star, att)) and not att.startswith('__')];
print('\nAvailable functions: ')
[print('- '+att+'()') for att in dir(star) if callable(getattr(star, att)) and not att.startswith('__')];
###Output
Initially defined variables:
- amplitude
- amplitude_err
- band
- data
- epoch
- epoch_offset
- label
- meanmag
- multiprocessing
- period
- period_err
- shape
- x
- y
- yerr
Available functions:
- _get_period()
- _get_period_likelihood()
- amplitude_spectrum()
- check_model()
- classify()
- copy()
- cut()
- get_SDE()
- get_SR()
- get_bestfit_amplitude()
- get_bestfit_curve()
- get_chi2()
- get_epoch_offset()
- get_meanmag()
- get_period()
- get_period_multi()
- open_widget()
- periodogram()
- plot_lc()
- prepare_data()
- reset_cuts()
- summary()
###Markdown
It is always a good idea to keep track of the name and photometric band of the object. For instance, the name of data file can be used as a label:
###Code
star.label = '000.dat'
star.band = 'V'
###Output
_____no_output_____
###Markdown
Generating periodogram```periodogram()``` function provides the most basic yet most valuable information on variability analysis. This is an extended application of Fourier transform in the period space (1/frequency). This function requires arguments ```p_min``` and ```p_max```, which limits the range of periodogram (hence period search). The unit has to be the same (often _days_ in astronomy) as the x-axis in the input data. basic methodMost simply, users can call ```star.periodogram(p_min,p_max).plot()``` to generate periodogram. A few things to note:- This function shouldn't take more than a few seconds to run. If no result is returned, it may be because your platform does not support ```multiprocessing``` operation, in which case we recommend adding another argument ```multiprocessing=False``` to ```periodogram()``` function call.- If no ```p_min``` or ```p_max``` is given, periodogram is generated between 0.1 and 4 (days if your data is in days).
###Code
# periodogram: searching the period between 0.1-day and 1-day
star.periodogram(p_min=0.1,p_max=1.0).plot(show_peak=True)
###Output
_____no_output_____
###Markdown
Zooming in to the peak and re-sampling can also be done in a single line:
###Code
star.periodogram(p_min=0.1,p_max=1.0).zoom().plot(show_peak=True)
star.periodogram(p_min=0.1,p_max=1.0).zoom().refine().plot(show_peak=True)
###Output
_____no_output_____
###Markdown
Obtain periodogram as arraysThe ```periodogram()``` function generates an iterable object. Users can easily obtain the values of periodograms and analyze or plot them manually.
###Code
periods, power = star.periodogram(p_min=0.1,p_max=1.0)
plt.plot(periods,power);
print(periods.shape,power.shape)
###Output
(6830,) (6830,)
###Markdown
More advanced methodBy default, ```periodogram``` function uses 5-term Fourier model, for which a linear algebra-based faster method is available. For instance, the basic method shown above is equivalent to calling ```periodogram(p_min=0.1, p_max=1.0, method='fast', model='Fourier', Nterms=5)```.Users can change the template model based on the expected shape of light curve. Another pre-implemented function is Gaussian Mixture Model (GMM), which can be specified by changing the ```model``` argument:```periodogram(p_min=0.1, p_max=1.0, method='custom', model='Gaussian', Nterms=5)```.Since GMM is integrated with Super-Gaussian function in PIPS, users gan give another argument ```p```, which changes the power parameter in Super-Gaussian.Note the change in ```method``` argument as well: while we implemented Gaussian fitting in log-linear form (```method='fast'```), the resulting fit is often erraneous and thus linear regression (```method='custom'```) is preferred for Gaussian model. More discussion on this topic can be found in our paper.We internally use ```scipy.optimize.curve_fit()``` for linear regression. Since this is significantly slower than linear-algebra method, we recommend users to try finding the optimal maximum iteration by changing ```maxfev``` argument as shown below.
###Code
# periodogram test w/ Gaussian
star.periodogram(p_min=0.1,p_max=1, ## period search range
method='custom',model='Gaussian', ## model selection
Nterms=1, p=1, ## arguments for the model
maxfev=100 ## max iteration in linear regression
).plot(show_peak=True)
###Output
_____no_output_____
###Markdown
Using your own custom functionUsers can use any custom function as a model to be used in periodogram.The model must be accompanied by initial-guess generator (```p0_func```), both of which needs to take speficic format in the argument.See the function docstrings below.
###Code
### define custom functions
from numba import njit
@njit
def polynomial(x,period,params,arg1,arg2=2):
'''
An example custom function (model).
Any custom function must take the arguments (x,period,params),
and users can add as many fixed (not fitted) arguments as needed.
In this function, users can define the exponent in the polynomial
by providing arg1 and arg2.
'''
mod = np.remainder(x,period)
return params[0] + params[1]*(mod-params[3])**arg1 + params[2]*(mod-params[4])**arg2
def poly_p0(x,y,yerr,period,**kwargs):
'''
An example of initial-guess generator (p0_func).
Any p0_func must take the argments (x,y,yerr,period,**kwargs).
The output array or list must be in the same shape as "params" in the model functon.
'''
return [np.mean(y),1,1,period/2,period/2]
### generate periodogram with the custom function
star.periodogram(
p_min=0.1, p_max=1, ## period search between 0.1 to 1 day
method ='custom', ## for any custom function this argument needs to be given
model = polynomial, ## now you can pass the function itself!
p0_func = poly_p0, ## initial-guess generator function must be given as well
arg1 = 1, ## users MUST specify arguments if not default is speficied
arg2 = 4, ## since arg2=2 is speficified by default, this is optional
maxfev = 100 ## start with small maxfev and increase later
).plot(show_peak=True)
###Output
_____no_output_____
###Markdown
Period detectionPeriod detection function utilizes ```periodogram()``` and automatically detects the peak. The periodogram is then refined near the detected peak for accurate period detection. This is followed by linear regression to estimate the uncertainty of detected period.A few things to note:- ```photdata.get_period()``` function by default uses 5-term Fourier model.- Users can simply run the function without any arguments to search period between 0.1-4.0 (days).- Detected period and period error is stored in ```photdata.period``` and ```photdata.period_err```.- This function also returns period and period error. Basic method
###Code
star.get_period(); # no argument -> 5-term Fourier, searches period between 0.1-4 day
print(star.period, star.period_err)
period,period_err = star.get_period(p_min=0.1,p_max=1,debug=True) # debug option enables the progress printing
print(period,period_err)
###Output
0.000s --- starting the process...
0.000s --- preparing data...
0.000s --- getting a periodogram...
0.509s --- detecting top 5 peaks...
0.510s --- preparing for finer sampling near peaks...
0.511s --- performing finer sampling near peaks...
0.916s --- period candidate: 0.6968767193610299
0.930s --- detecting aliasing...
0.930s --- alias factor: 1
0.931s --- period candidate: 0.6968767193610299
0.932s --- estimating the uncertainty...
0.947s --- period candidate: 0.6968767193610299
0.947s --- period fitted*: 0.6968786839335414
0.947s --- period error: 2.2667570909410562e-05
0.947s --- refining samples...
0.948s --- refining search width = 6.588e-04
1.315s --- period candidate: 0.6968899220719549
1.316s --- period fitted*: 0.6968946264691298
1.316s --- period error: 2.285551532900411e-05
1.316s --- * validating period error...
1.316s --- * fitted period - peak period = 4.70e-06
1.316s --- * expected deviation size = 2.29e-05
1.316s --- * period error validated
1.316s --- period = 0.696890 +- 0.000023d
1.316s --- process completed.
0.6968899220719549 2.285551532900411e-05
###Markdown
Advanced methodSince ```get_period()``` internally calls ```periodogram()``` function, any arguments that change the setting for ```periodogram()``` can be applied. For example, users can change the model:
###Code
star.get_period(p_min=0.1,p_max=1.0,method='custom',model='Gaussian')
###Output
_____no_output_____
###Markdown
Similarly, any custom model can be implemented:
###Code
star.get_period(p_min=0.1, p_max=1.0,
method='custom',
model=polynomial,
p0_func=poly_p0,
arg1 = 1,
arg2 = 4,
multiprocessing=False)
###Output
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
###Markdown
VisualizationPIPS provides a tool for easy plotting with ```plot_lc()```. This function automatically uses the most recently updated period value in ```photdata``` and returns phase-folded data. There is also an easy way to overplot the best-fit model at the period using ```get_bestfit_curve()``` function. Like many other functions in ```photdata```, users can specify the model and other parameters.In addition, ```get_epoch_offset()``` returns the time of maxima offset in phase-folded data (in units of original x-axis: not normalized to unitless phase) and enables easy offsetting / visualization of epoch.
###Code
# detect period
star.get_period()
# phase-folded plot
star.plot_lc() # plots (x%period, y) scatter: normalized to phase
x_th,y_th = star.get_bestfit_curve()
epoch_offset = star.get_epoch_offset() # the epoch offset in the unit of [days] (not normalized to phase)
# plot
plt.plot(x_th/star.period,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.plot(x_th/star.period+1,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.axvline(epoch_offset/star.period,color='red')
plt.axvline(epoch_offset/star.period+1,color='red')
# get period with Gaussian model
p_Gaussian,p_err_Gaussian = star.get_period(p_min=0.1,p_max=1.0,method='custom',model='Gaussian')
# auto plot at specified period
star.plot_lc(period=p_Gaussian)
x_th,y_th = star.get_bestfit_curve(period=p_Gaussian,model='Gaussian',Nterms=1,p=1,maxfev=1000000)
# plot
plt.plot(x_th/star.period,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.plot(x_th/star.period+1,y_th,c='yellowgreen',lw=3,alpha=0.7)
###Output
_____no_output_____
###Markdown
Multi-period detectionWhen the object is expected to have more than one period (e.g., double-mode pulsator or variable binaries), the light curve can be a superposition of periodic variation at two or more periods. PIPS can automatically generate the amplitude spectrum of multi-periodic objects.When ```get_period_multi``` is called, it returns the detected period and amplitude of top ```N``` periods. ```amplitude_spectrum``` internally calls it and generates the amplitude spectrum. It should be noted that, however, PIPS forces the detection, even if the signal is just one of the tallest spikes in the background noise and not the real period.
###Code
# multi-period detection
period,spectrum = star.amplitude_spectrum(p_min=0.1,p_max=0.9,N=10,multiprocessing=False)
plt.figure(figsize=(10,3))
plt.plot(period,spectrum)
plt.xlim(0.1,0.9)
plt.xlabel('period (d)')
plt.ylabel('amplitude (mag)')
plt.show()
###Output
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
###Markdown
`smlb` TutorialScientific Machine Learning Benchmark:A benchmark of regression models in chem- and materials informatics.2019-2020, Citrine Informatics. Import `smlb` and a few standard libraries:
###Code
import warnings
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn as skl
import smlb
###Output
_____no_output_____
###Markdown
Introduction This tutorial showcases basic `smlb` functionality by benchmarking different machine learning algorithmson an experimental materials dataset.Further examples can be found in the `examples` directory. We highlight some aspects of `smlb`. See the [Overview](https://github.com/CitrineInformatics/smlb/blob/master/documentation/overview.md) for a description of scope. 1. *Interfaces*. `smlb` defines interfaces (classes) for machine-learning concepts, such as labeled datasets, algorithms for regression, sampling, evaluation metrics and others. This supports correct benchmarking and enables flexibility when combining specific instances (datasets, algorithms, ...) of these concepts from different sources.2. *Instances*. `smlb` comes with several benchmarking datasets and wrappers for machine-learning algorithms included\, for example from [scikit-learn](https://scikit-learn.org/) and [Chemistry Development Kit](https://cdk.github.io/). Adding new datasets and algorithms is easy.3. *Functionality*. `smlb` provides tailored functionality for its purpose, for example, features for molecules and materials, evaluation metrics for predictive uncertainties, or learning curve plots. In the following, we will benchmark several random forest variants for predicting the band gap of a set of elemental and binary semiconductors and insulators from the literature. Dataset First, we load the dataset from the `dataset` directory where the pre-packaged datasets from `smlb` reside. Use tab completion for easy selection.
###Code
from datasets.experimental.band_gaps_sc73.band_gaps_sc73 import BandGapsStrehlowCook1973Dataset
###Output
_____no_output_____
###Markdown
The `BandGapsStrehlowCook1973Dataset` dataset is an instance of the `Data` interface.Specifically it derives from `IndexedFiniteData` and `IndexedFiniteLabels`. For information on the dataset and how to use it, print the doc-strings of the class and the initializer:
###Code
print(BandGapsStrehlowCook1973Dataset.__doc__)
print(BandGapsStrehlowCook1973Dataset.__init__.__doc__)
###Output
Band gaps from Strehlow & Cook (1973) dataset.
Based on:
W. H. Strehlow, E. L. Cook: Compilation of Energy Band Gaps in Elemental and Binary
Compound Semiconductors and Insulators, Journal of Physical Chemistry Reference Data
2(1): 163-199, American Institute of Physics, 1973. DOI 10.1063/1.3253115
Curated version downloaded from the Citrination platform (https://citrination.com),
dataset identifier #1160, on 2019-07-17.
This dataset provides 1,459 compounds, 1,447 of them with measured band gap (eV).
Compound information includes chemical sum formula and, where specified, measurement
temperature (K), crystallinity of sample (amorphous, single, or poly-crystalline)
and other properties.
Loads dataset.
Parameters control preprocessing.
Parameters:
filter_: function that accepts a sample and returns whether to keep it (True)
or to exclude it (False).
Possible choices:
'all': all entries, including those without band gap, are retained
'bg': all entries with a measured band gap are retained
't300pm10': all entries with band gap measured at 300 +- 10 K are retained
't300pm10_mc': all mono-crystalline entries with band gap measured at
300 +- 10 K are retained
join: if True, entries with the same chemical sum formula are joined;
if a positive integer k, only entries with k or more band gap measurements are retained;
this changes band gap, temperature and crystallinity from single numbers to
varying-length sequences of numbers (distributions)
samplef: function accepting and returning a sample; applied to all samples as post-processing
labelf: function accepting and returning a label; applied to all labels as post-processing
Raises:
InvalidParameterError: on invalid parameter values
Examples:
sc = BandGapsStrehlowCook1973Dataset(filter_='t300pm10_mc', join=True, labelf=np.median)
Note that if joined, there is no need for groups anymore.
###Markdown
We must avoid the same material showing up multiple times in the dataset,and thus potentially appearing in both training and validation set, as this would cause arbitrarily over-optimistic performance estimates.Note that formulas like $\text{Mg}_3\text{As}_2$ and $\text{Mg}_{1.5}\text{As}_1$ describe the same compound; `smlb` takes this into account.Overlap between training and validation sets is a frequent mistake;`smlb` supports correctness by providing an option to `join` entrieswith the same sum formula upon loading the dataset. We use this and assign the median band gap as label.Since we will use only the formula as input we extract only those:
###Code
data = BandGapsStrehlowCook1973Dataset(filter_='bg', join=1, samplef=lambda e: e['formula'], labelf=np.median)
print(data.num_samples, 'entries in total')
print(data.samples()[-10]) # example entry
###Output
495 entries in total
Sb0.09Bi0.91
###Markdown
The entries of the dataset are sum formulas encoded as strings.`smlb` supports non-numeric inputs, such as strings or graphs.From 1,447 samples, 495 were unique (34%). On average, each compound is contained 3 times in the dataset.However, the actual distribution is asymmetric, most compounds only have a single measurement:
###Code
t = BandGapsStrehlowCook1973Dataset(filter_='bg', join=False, samplef=lambda e: e['formula'])
_, counts = np.unique(t.samples(), return_counts=True)
counts = np.bincount(counts[::-1]) # [::-1] gives reverse view
plt.bar(np.arange(0, len(counts), 1), counts)
plt.xlabel("multiplicity"); plt.ylabel("occurences")
plt.show()
del t, _, counts
###Output
_____no_output_____
###Markdown
Features While `smlb` is happy to work on strings, the regression algorithms we will use are not.We therefore need to create numerical features from the sum formulas.For this, `smlb` provides `Features`, a `DataValuedTransformation`.We will use the "magpie" features of [matminer](https://hackingmaterials.lbl.gov/matminer/).Tab completion also works here.
###Code
from features.matminer_composition import MatminerCompositionFeatures
print(MatminerCompositionFeatures.__doc__)
###Output
Matminer composition-based materials features.
Based on the matminer package.
Reference:
Logan Ward, Alexander Dunn, Alireza Faghaninia, Nils E.R. Zimmermann, Saurabh Bajaj,
Qi Wang., Joseph Montoya, Jiming Chen, Kyle Bystrom, Maxwell Dylla, Kyle Chard,
Mark Asta, Kristin A. Persson, G. Jeffrey Snyder, Ian Foster, Anubhav Jain:
Matminer: An open source toolkit for materials data mining, Computational Materials
Science 152: 60--69, Elsevier, 2018. DOI 10.1016/j.commatsci.2018.05.018
Code and documentation:
https://hackingmaterials.lbl.gov/matminer/
https://github.com/hackingmaterials/matminer
Currently supports four types of features:
* Stoichiometric attributes describe the amount of each element present in
a compound using several L^p norms.
* Elemental property statistics, computed on 22 different elemental
properties, such as distribution of atomic radii.
* Ionic compound attributes, such as whether it is possible to form an
ionic compound from the elements, and the fractional "ionic character" of the compound.
* Electronic structure attributes, which are the average fraction of
electrons from the s, p, d and f valence shells.
###Markdown
In `smlb`, we will normally not explicitly compute the features ourselves.Instead, we just instantiate the featurizer object for later use in a `Workflow`.
###Code
with warnings.catch_warnings(): # prevent warning about deprecated NumPy feature
warnings.simplefilter("ignore", category=FutureWarning)
features = MatminerCompositionFeatures(ionic_fast=True)
###Output
_____no_output_____
###Markdown
`Features` are `DataValuedTransformations`, which means that they accept `Data` as input and produce other `Data` as output.Let us featurize the band gap dataset just to take a look at one of the resulting feature vectors:
###Code
t = features.fit(data).apply(data).samples()[-10] # 9% Sb 91% Bi example from above
with np.printoptions(precision=2, suppress=True):
print(t)
plt.plot(t)
plt.xlabel('feature index'); plt.ylabel('feature value')
plt.show()
del t
###Output
[ 2. 0.91 0.91 0.91 0.91 0.91 51. 83. 32. 80.12
5.24 83. 85. 86. 1. 85.91 0.16 86. 121.76 208.98
87.22 201.13 14.29 208.98 544.4 903.78 359.38 576.74 58.87 544.4
15. 15. 0. 15. 0. 15. 5. 6. 1. 5.91
0.16 6. 139. 148. 9. 147.19 1.47 148. 2.02 2.05
0.03 2.02 0. 2.02 2. 2. 0. 2. 0. 2.
3. 3. 0. 3. 0. 3. 10. 10. 0. 10.
0. 10. 0. 14. 14. 12.74 2.29 14. 15. 29.
14. 27.74 2.29 29. 0. 0. 0. 0. 0. 0.
3. 3. 0. 3. 0. 3. 0. 0. 0. 0.
0. 0. 0. 0. 0. 0. 0. 0. 3. 3.
0. 3. 0. 3. 31.56 32.95 1.39 32.82 0.23 32.95
0. 0. 0. 0. 0. 0. 0. 0. 0. 0.
0. 0. 12. 166. 154. 25.86 25.23 12. 0. 0.
0. 2. 3. 10. 12.74 0.07 0.11 0.36 0.46]
###Markdown
Machine-learning algorithms We compare three variants of random forest regression from the scientific community's `scikit-learn` project and the open-source `lolo` library.`smlb` wraps all of those already. Again, tab completion provides an overview of existing learners.Note that using the `lolo` learner requires the `lolo` dependency.
###Code
from learners.scikit_learn.random_forest_regression_sklearn import RandomForestRegressionSklearn
from learners.scikit_learn.extremely_randomized_trees_regression_sklearn import ExtremelyRandomizedTreesRegressionSklearn
from learners.scikit_learn.gradient_boosted_trees_regression_sklearn import GradientBoostedTreesRegressionSklearn
from learners.lolo.random_forest_regression_lolo import RandomForestRegressionLolo
###Output
_____no_output_____
###Markdown
Each of these learners derives from `Learner`, specifically `SupervisedLearner`.`Learner`s are special cases of `DataTransformation`s.They have a `fit` method for training the model and an `apply` method to predict new data.Predictions always take the form of `PredictiveDistribution`s. Implementations that return only point predictions yield a `DeltaPredictiveDistribution`. `Learner`s can be parametrized at runtime. Here, we use default values.Specifying seeds for pseudo-random number generation is *mandatory*. smlb and pseudo-random numbers "Random" numbers are generated deterministically using pseudo-random number generators (PRNG). smlb takes reproducibility seriously: Given identical software and hardware, results will be deterministic for a given seed, even if running asynchronously, in parallel, or in a distributed environment. This supports reproducibility; as a consequence, PRNG seeds must be specified. For this, smlb uses ideas from the Google JAX PRNG design.
###Code
prng = smlb.Random(rng=42) # master seed
rng_seeds = prng.random.split(4)
rf_skl = RandomForestRegressionSklearn(random_state=rng_seeds[0])
ert_skl = ExtremelyRandomizedTreesRegressionSklearn(random_state=rng_seeds[1])
gbt_skl = GradientBoostedTreesRegressionSklearn(random_state=rng_seeds[2])
rf_lolo = RandomForestRegressionLolo() # unfortunately, lolo does not support this yet; issue #206
###Output
_____no_output_____
###Markdown
Sampling We split the dataset into a validation set and training sets of increasing size.By keeping the validation set fixed, we avoid additional randomness between training set sizes due to the choice of validation set.In `smlb`, we do not draw these sets ourselves.Instead, we define `Sampler`s and pass those to a `Workflow` (next section).We choose the training set sizes to be equi-distant in log-space.The validation set must be large enough for sufficient error statistics,and small enough to leave enough samples for training.With 495 samples, this dataset is in a border region where bothcross-validation and hold-out sets are feasible.
###Code
nvalid = int(495*0.2) # 20% hold-out set
ntrain_min, ntrain_max, k = 10, 495-nvalid, 6 # k = number of training set sizes
ntrain = np.logspace(np.log10(ntrain_min), np.log10(ntrain_max), k, dtype=int)
print(ntrain)
smpl_seeds = prng.random.split(k+1)
smpl_valid = smlb.RandomSubsetSampler(size=nvalid, rng=smpl_seeds[0])
smpl_train = tuple(smlb.RandomSubsetSampler(size=ntrain[i], rng=smpl_seeds[i+1]) for i in range(k))
###Output
_____no_output_____
###Markdown
Workflow It's time to put everything together!`Workflow`s are pre-defined algorithms for benchmarking.We use a simple `Workflow` to compare different algorithms on a single dataset.Again, use tab completion to select from existing `Workflow`s
###Code
from workflows.learning_curve_regression import LearningCurveRegression
print(LearningCurveRegression.__doc__)
print(LearningCurveRegression.__init__.__doc__)
###Output
Learning-curve for multiple regression learners on a single dataset.
Algorithm:
1) Validation data
Draw validation data from dataset
For finite datasets, remove validation data from dataset
2) Training sets
Draw training sets from remaining dataset
Validate that there is no overlap with the validation set
3) Featurization
Featurize validation and training sets
4) Training and prediction
Train each learner on each training set
For each trained learner, predict validation set
5) Evaluate results
Compute evaluation metric for each run
Render each evaluation
Current limitations:
* no hyperparameter optimization
* no repeated sampling
Initialize workflow.
Parameters:
data: labeled data
training: sequence of Samplers, one for each training set size
validation: Sampler for validation set
learners: sequence of supervised regression algorithms
features: any data-valued transformation
metric: evaluation metric to use; root mean squared error by default
evaluations: one or more evaluations; default are learning curve and table
###Markdown
Because we want to view the resulting plot directly in the notebook(as opposed to saving it to a file), we create a matplotlib figureand let the `LearningCurvePlot` render to it.The `Workflow` itself executes when we run it, and should take less than a minute to complete.
###Code
fig, ax = plt.subplots()
lcplot = smlb.LearningCurvePlot(target=ax, rectify=True)
wf = LearningCurveRegression(
data=data, training=smpl_train, validation=smpl_valid,
learners=[rf_skl, ert_skl, gbt_skl, rf_lolo],
features=features,
# default evaluation metric is smlb.RootMeanSquaredError
evaluations=[lcplot]
)
wf.run()
ax.set_ylabel('RMSE / eV')
plt.show()
###Output
_____no_output_____
###Markdown
`darkemu` module tutorial notebook
###Code
%load_ext autoreload
%autoreload 2
%pylab inline
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.size'] = 18
plt.rcParams['axes.linewidth'] = 1.5
plt.rcParams['xtick.major.size'] = 5
plt.rcParams['ytick.major.size'] = 5
plt.rcParams['xtick.minor.size'] = 3
plt.rcParams['ytick.minor.size'] = 3
plt.rcParams['xtick.top'] = True
plt.rcParams['ytick.right'] = True
plt.rcParams['xtick.minor.visible'] = True
plt.rcParams['ytick.minor.visible'] = True
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['figure.figsize'] = (10,6)
from dark_emulator import darkemu
emu = darkemu.base_class()
###Output
initialize cosmo_class
Initialize pklin emulator
initialize propagator emulator
Initialize sigma_d emulator
initialize cross-correlation emulator
initialize auto-correlation emulator
Initialize sigmaM emulator
initialize xinl emulator
###Markdown
how to set cosmology
###Code
cparam = np.array([0.02225,0.1198,0.6844,3.094,0.9645,-1.])
emu.set_cosmology(cparam)
###Output
_____no_output_____
###Markdown
how to plot halo-mass cross correlation for mass threshold halo samples
###Code
rs = np.logspace(-2,2.5,200)
plt.figure(figsize=(10,6))
z = 0
for i, Mmin in enumerate(np.logspace(12,15,7)):
xihm = emu.get_xicross_massthreshold(rs,Mmin,z)
plt.loglog(rs,xihm,color="C{}".format(i),label='$M_\mathrm{th}=%0.2g$' %Mmin)
plt.loglog(rs,-xihm,':',color="C{}".format(i))
plt.legend(fontsize=12)
plt.ylim(0.00001,1000000)
plt.xlabel("$x\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$\\xi_\mathrm{hm}(x)$")
###Output
_____no_output_____
###Markdown
how to plot DeltaSigma(R) for a mass threshold halo samples
###Code
rs = np.logspace(-1.5,2.5,100)
plt.figure(figsize=(10,6))
z = 0
for i, Mmin in enumerate(np.logspace(12,15,7)):
dsigma = emu.get_DeltaSigma_massthreshold(rs,Mmin,z)
plt.loglog(rs,dsigma,label='$M_\mathrm{th}=%0.2g$' %Mmin)
plt.legend(fontsize=12)
plt.ylim(0.002,1000)
plt.xlabel("$r_p\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$\Delta\Sigma(r_p)\,[h M_\odot \mathrm{pc}^{-2}]$")
###Output
_____no_output_____
###Markdown
how to plot halo-halo correlation for mass threshold halo samples
###Code
rs = np.logspace(-0.5,2.5,400)
plt.figure(figsize=(10,6))
z = 0
for i, Mmin in enumerate(np.logspace(12,14,5)):
xih = emu.get_xiauto_massthreshold(rs,Mmin,z)
plt.loglog(rs,xih,color="C{}".format(i),label='$M_\mathrm{th}=%0.2g$' %Mmin)
plt.loglog(rs,-xih,':',color="C{}".format(i))
plt.legend(fontsize=12)
plt.ylim(0.0001,20)
plt.xlabel("$x\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$\\xi_\mathrm{hh}(x)$")
###Output
_____no_output_____
###Markdown
how to plot halo-halo projected correlation function for mass threshold halo samples
###Code
rs = np.logspace(-0.5,2.5,400)
z = 0
plt.figure(figsize=(10,6))
for i, Mmin in enumerate(np.logspace(12,14,5)):
wh = emu.get_wauto_massthreshold(rs,Mmin,z)
plt.loglog(rs,wh,color="C{}".format(i),label='$M_\mathrm{th}=%0.2g$' %Mmin)
plt.loglog(rs,-wh,':',color="C{}".format(i))
plt.legend(fontsize=12)
plt.xlabel("$r_p\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$w_\mathrm{hh}(r_p)\,[h^{-1}\mathrm{Mpc}]$")
###Output
_____no_output_____
###Markdown
Same as before, but for halos with fixed masses instead of mass threshold samples.
###Code
rs = np.logspace(-2,2.5,200)
plt.figure(figsize=(10,6))
for i, M in enumerate(np.logspace(12,15,7)):
xihm = emu.get_xicross_mass(rs,M,z)
plt.loglog(rs,xihm,color="C{}".format(i),label='$M=%0.2g$' %M)
plt.loglog(rs,-xihm,':',color="C{}".format(i))
plt.legend(fontsize=12)
plt.ylim(0.00001,1000000)
plt.xlabel("$x\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$\\xi_\mathrm{hm}(x)$")
rs = np.logspace(-1.5,2.5,100)
plt.figure(figsize=(10,6))
for i, M in enumerate(np.logspace(12,15,7)):
dsigma = emu.get_DeltaSigma_mass(rs,M,z)
plt.loglog(rs,dsigma,label='$M=%0.2g$' %M)
plt.legend(fontsize=12)
plt.ylim(0.002,1000)
plt.xlabel("$r_p\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$\Delta\Sigma(r_p)\,[h M_\odot \mathrm{pc}^{-2}]$")
rs = np.logspace(-0.5,2.5,400)
plt.figure(figsize=(10,6))
for i, M in enumerate(np.logspace(12,14,5)):
xih = emu.get_xiauto_mass(rs,M,M,z)
plt.loglog(rs,xih,color="C{}".format(i),label='$M=%0.2g$' %M)
plt.loglog(rs,-xih,':',color="C{}".format(i))
plt.legend(fontsize=12)
plt.ylim(0.0001,40)
plt.xlabel("$x\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$\\xi_\mathrm{hh}(x)$")
rs = np.logspace(-0.5,2.5,400)
plt.figure(figsize=(10,6))
for i, M in enumerate(np.logspace(12,14,5)):
wh = emu.get_wauto_mass(rs,M,M,z)
plt.loglog(rs,wh,color="C{}".format(i),label='$M=%0.2g$' %M)
plt.loglog(rs,-wh,':',color="C{}".format(i))
plt.legend(fontsize=12)
plt.xlabel("$r_p\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$w_\mathrm{hh}(r_p)\,[h^{-1}\mathrm{Mpc}]$")
###Output
_____no_output_____
###Markdown
Halo-halo correlation function for halos with 2 different masses
###Code
rs = np.logspace(-0.5,2.5,400)
Ms = np.logspace(12,14,3)
plt.figure(figsize=(10,6))
ii = 0
for i in range(3):
for j in range(i,3):
xih = emu.get_xiauto_mass(rs,Ms[i],Ms[j],z)
plt.loglog(rs,xih,color="C{}".format(ii),label='$M_1=%0.2g,\,M_2=%0.2g$' %(Ms[i],Ms[j]))
plt.loglog(rs,-xih,':',color="C{}".format(ii))
ii+=1
plt.legend(fontsize=12)
plt.ylim(0.0001,40)
plt.xlabel("$x\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$\\xi_\mathrm{hh}(x)$")
rs = np.logspace(-0.5,2.5,400)
plt.figure(figsize=(10,6))
ii = 0
for i in range(3):
for j in range(i,3):
wh = emu.get_wauto_mass(rs,Ms[i],Ms[j],z)
plt.loglog(rs,wh,color="C{}".format(ii),label='$M_1=%0.2g,\,M_2=%0.2g$' %(Ms[i],Ms[j]))
plt.loglog(rs,-wh,':',color="C{}".format(ii))
ii+=1
plt.legend(fontsize=12)
plt.xlabel("$r_p\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$w_\mathrm{hh}(r_p)\,[h^{-1}\mathrm{Mpc}]$")
###Output
_____no_output_____
###Markdown
Projected Halo-halo correlation function with finite projection widthsThis takes more time because of an additional direct integration, which is bypassed by using pyfftlog in other routines.
###Code
plt.figure(figsize=(10,6))
ii = 0
M = 1e13
for i, pimax in enumerate(np.linspace(50,200,4)):
wh = emu.get_wauto_mass_cut(rs,M,M,z,pimax)
plt.loglog(rs,wh,color="C{}".format(i),label='$\Pi_\mathrm{max}=%.1f$' %(pimax))
plt.loglog(rs,-wh,':',color="C{}".format(i))
wh = emu.get_wauto_mass(rs,M,M,z)
plt.loglog(rs,wh,color="C{}".format(4),label='$\Pi_\mathrm{max}=\infty$')
plt.loglog(rs,-wh,':',color="C{}".format(4))
plt.legend(fontsize=12)
plt.xlabel("$r_p\,[h^{-1}\mathrm{Mpc}]$")
plt.ylabel("$w_\mathrm{hh}(r_p)\,[h^{-1}\mathrm{Mpc}]$")
plt.ylim(0.01,100)
###Output
_____no_output_____
###Markdown
Tutorial
###Code
# NBVAL_IGNORE_OUTPUT
%load_ext watermark
import pypkg_bintray_01
%watermark -v --iversions
###Output
pypkg_bintray_01 0.1.0
CPython 3.6.7
IPython 7.1.1
###Markdown
$\newcommand{Re}[0]{\operatorname{Re}}\newcommand{Im}[0]{\operatorname{Im}}\newcommand{dd}[0]{\,\text{d}}\newcommand{abs}[0]{\operatorname{abs}}$ This notebooks illustrates the basic use of the `pypkg_bintray_01` package.
###Code
pypkg_bintray_01.__version__
pypkg_bintray_01.__doc__
###Output
_____no_output_____
###Markdown
Tutorial
###Code
# NBVAL_IGNORE_OUTPUT
%load_ext watermark
import numpy as np
import qutip
import matplotlib
import matplotlib.pylab as plt
import weylchamber
from weylchamber.visualize import WeylChamber
%watermark -v --iversions
###Output
numpy 1.15.4
qutip 4.3.1
matplotlib 3.0.2
matplotlib.pylab 1.15.4
weylchamber 0.1.0
CPython 3.6.7
IPython 7.1.1
###Markdown
$\newcommand{Re}[0]{\operatorname{Re}}\newcommand{Im}[0]{\operatorname{Im}}\newcommand{dd}[0]{\,\text{d}}\newcommand{abs}[0]{\operatorname{abs}}$ Every two-qubit gate is associated with a point in the "Weyl-chamber" that may be visualized in three dimensions as the following polyhedron:
###Code
WeylChamber().plot()
###Output
_____no_output_____
###Markdown
Note: if you run this interactively, and switch to an interactive matplotlib backend, e.g. %matplotlib tk you will be able to rotate the 3D plot to get a better intuition. Consider the following common two-qubit gates:
###Code
IDENTITY = qutip.gates.identity([2,2])
IDENTITY
CNOT = qutip.gates.cnot()
CNOT
CPHASE = qutip.gates.cphase(np.pi)
CPHASE
BGATE = qutip.gates.berkeley()
BGATE
iSWAP = qutip.gates.iswap()
iSWAP
sqrtISWAP = qutip.gates.sqrtiswap()
sqrtISWAP
sqrtSWAP = qutip.gates.sqrtswap()
sqrtSWAP
MGATE = weylchamber.canonical_gate(3/4, 1/4, 0)
MGATE
###Output
_____no_output_____
###Markdown
All of these gates are situatated at special points in the Weyl chamber. We can print their Weyl chamber coordinates and add a point in the graphical representation
###Code
w = WeylChamber();
list_of_gates = [
('Identity', IDENTITY),
('CNOT', CNOT), ('CPHASE', CPHASE), ('BGATE', BGATE),
('iSWAP', iSWAP), ('sqrtISWAP', sqrtISWAP),
('sqrtSWAP', sqrtSWAP), ('MGATE', MGATE)]
print("Weyl Chamber Coordinates")
print("----------------------------------")
for (name, gate) in list_of_gates:
c1, c2, c3 = weylchamber.c1c2c3(gate)
print("%10s: \t%.2fπ %.2fπ %.2fπ" % (name, c1, c2, c3))
w.add_point(c1, c2, c3)
w.plot()
###Output
Weyl Chamber Coordinates
----------------------------------
Identity: 0.00π 0.00π 0.00π
CNOT: 0.50π 0.00π 0.00π
CPHASE: 0.50π 0.00π 0.00π
BGATE: 0.50π 0.25π 0.00π
iSWAP: 0.50π 0.50π 0.00π
sqrtISWAP: 0.25π 0.25π 0.00π
sqrtSWAP: 0.75π 0.25π 0.25π
MGATE: 0.75π 0.25π 0.00π
###Markdown
The gates locally equivalent to the controlled-phase gates are on an the axis 0 - A1 in the Weyl chamber:
###Code
w.scatter(*zip(*[
weylchamber.c1c2c3(qutip.gates.cphase(phase))
for phase in np.linspace(0, 2*np.pi, 20)]))
w.plot()
###Output
_____no_output_____
###Markdown
The Weyl chamber coordinates $(c_1, c_2, c_3)$ are closely associated with the *local invariants* $(g_1, g_2, g_3)$
###Code
print("Local Invariants")
print("----------------------------------")
for (name, gate) in list_of_gates:
g1, g2, g3 = weylchamber.g1g2g3(gate)
print("%10s: \t%5.2f %5.2f %5.2f" % (name, g1, g2, g3))
###Output
Local Invariants
----------------------------------
Identity: 1.00 0.00 3.00
CNOT: 0.00 0.00 1.00
CPHASE: 0.00 0.00 1.00
BGATE: 0.00 -0.00 0.00
iSWAP: 0.00 0.00 -1.00
sqrtISWAP: 0.25 0.00 1.00
sqrtSWAP: 0.00 -0.25 0.00
MGATE: 0.25 0.00 1.00
###Markdown
The mwtab Tutorial==================The :mod:`mwtab` package provides classes and other facilities for downloading,parsing, accessing, and manipulating data stored in either the ``mwTab`` or``JSON`` representation of ``mwTab`` files.Also, the :mod:`mwtab` package provides simple command-line interface to convertbetween ``mwTab`` and ``JSON`` representations, download entries fromMetabolomics Workbench, access the MW REST interface, validate the consistencyof the ``mwTab`` files, or extract metadata and metabolites from these files. Brief mwTab Format Overview~~~~~~~~~~~~~~~~~~~~~~~~~~~.. note:: For full official specification see the following link (``mwTab file specification``): http://www.metabolomicsworkbench.org/data/tutorials.phpThe ``mwTab`` formatted files consist of multiple blocks. Each new block starts with ````.* Some of the blocks contain only "key-value"-like pairs... code-block:: none METABOLOMICS WORKBENCH STUDY_ID:ST000001 ANALYSIS_ID:AN000001 VERSION 1 CREATED_ON 2016-09-17 PROJECT PR:PROJECT_TITLE FatB Gene Project PR:PROJECT_TYPE Genotype treatment PR:PROJECT_SUMMARY Experiment to test the consequence of a mutation at the FatB gene (At1g08510) PR:PROJECT_SUMMARY the wound-response of Arabidopsis.. note:: ``*_SUMMARY`` "key-value"-like pairs are typically span through multiple lines.* ``SUBJECT_SAMPLE_FACTORS`` block is specially formatted, i.e. it contains header specification and tab-separated values... code-block:: none SUBJECT_SAMPLE_FACTORS: SUBJECT(optional)[tab]SAMPLE[tab]FACTORS(NAME:VALUE pairs separated by |)[tab]Additional sample data SUBJECT_SAMPLE_FACTORS - LabF_115873 Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded SUBJECT_SAMPLE_FACTORS - LabF_115878 Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded SUBJECT_SAMPLE_FACTORS - LabF_115883 Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded SUBJECT_SAMPLE_FACTORS - LabF_115888 Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded SUBJECT_SAMPLE_FACTORS - LabF_115893 Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded SUBJECT_SAMPLE_FACTORS - LabF_115898 Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded* ``MS_METABOLITE_DATA`` (results) block contains ``Samples`` identifiers, ``Factors`` identifiers as well as tab-separated data between ``*_START`` and ``*_END``... code-block:: none MS_METABOLITE_DATA MS_METABOLITE_DATA:UNITS Peak height MS_METABOLITE_DATA_START Samples LabF_115904 LabF_115909 LabF_115914 LabF_115919 LabF_115924 LabF_115929 LabF_115842 LabF_115847 LabF_115852 LabF_115857 LabF_115862 LabF_115867 LabF_115873 LabF_115878 LabF_115883 LabF_115888 LabF_115893 LabF_115898 LabF_115811 LabF_115816 LabF_115821 LabF_115826 LabF_115831 LabF_115836 Factors Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Wounded Arabidopsis Genotype:fatb-ko KD; At1g08510 | Plant Wounding Treatment:Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Control - Non-Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Wounded Arabidopsis Genotype:Wassilewskija (Ws) | Plant Wounding Treatment:Wounded 1_2_4-benzenetriol 1874.0000 3566.0000 1945.0000 1456.0000 2004.0000 1995.0000 4040.0000 2432.0000 2189.0000 1931.0000 1307.0000 2880.0000 2218.0000 1754.0000 1369.0000 1201.0000 3324.0000 1355.0000 2257.0000 1718.0000 1740.0000 3472.0000 2054.0000 1367.0000 1-monostearin 987.0000 450.0000 1910.0000 549.0000 1032.0000 902.0000 393.0000 705.0000 100.0000 481.0000 265.0000 120.0000 1185.0000 867.0000 676.0000 569.0000 579.0000 387.0000 1035.0000 789.0000 875.0000 224.0000 641.0000 693.0000 ... MS_METABOLITE_DATA_END* ``METABOLITES`` metadata block contains a header specifying fields and tab-separated data between ``*_START`` and ``*_END``... code-block:: none METABOLITES METABOLITES_START metabolite_name moverz_quant ri ri_type pubchem_id inchi_key kegg_id other_id other_id_type 1,2,4-benzenetriol 239 522741 Fiehn 10787 C02814 205673 BinBase 1-monostearin 399 959625 Fiehn 107036 D01947 202835 BinBase 2-hydroxyvaleric acid 131 310750 Fiehn 98009 218773 BinBase 3-phosphoglycerate 299 611619 Fiehn 724 C00597 217821 BinBase ... METABOLITES_END* ``NMR_BINNED_DATA`` metadata block contains a header specifying fields and tab-separated data between ``*_START`` and ``*_END``... code-block:: none NMR_BINNED_DATA NMR_BINNED_DATA_START Bin range(ppm) CDC029 CDC030 CDC032 CPL101 CPL102 CPL103 CPL201 CPL202 CPL203 CDS039 CDS052 CDS054 0.50...0.56 0.00058149 1.6592 0.039301 0 0 0 0.034018 0.0028746 0.0021478 0.013387 0 0 0.56...0.58 0 0.74267 0 0.007206 0 0 0 0 0 0 0 0.0069721 0.58...0.60 0.051165 0.8258 0.089149 0.060972 0.026307 0.045697 0.069541 0 0 0.14516 0.057489 0.042255 ... NMR_BINNED_DATA_END* Order of metadata and data blocks (MS).. code-block:: none METABOLOMICS WORKBENCH VERSION 1 CREATED_ON 2016-09-17 ... PROJECT ... STUDY ... SUBJECT ... SUBJECT_SAMPLE_FACTORS: SUBJECT(optional)[tab]SAMPLE[tab]FACTORS(NAME:VALUE pairs separated by |)[tab]Additional sample data ... COLLECTION ... TREATMENT ... SAMPLEPREP ... CHROMATOGRAPHY ... ANALYSIS ... MS ... MS_METABOLITE_DATA MS_METABOLITE_DATA:UNITS peak area MS_METABOLITE_DATA_START ... MS_METABOLITE_DATA_END METABOLITES METABOLITES_START ... METABOLITES_END END Using mwtab as a Library~~~~~~~~~~~~~~~~~~~~~~~~Importing mwtab Package-----------------------If the :mod:`mwtab` package is installed on the system, it can be imported:
###Code
import mwtab
###Output
_____no_output_____
###Markdown
Constructing MWTabFile Generator--------------------------------The :mod:`~mwtab.fileio` module provides the :func:`~mwtab.fileio.read_files`generator function that yields :class:`~mwtab.mwtab.MWTabFile` instances. Constructing a:class:`~mwtab.mwtab.MWTabFile` generator is easy - specify the path to a local ``mwTab`` file,directory of files, archive of files:
###Code
import mwtab
mwfile_gen = mwtab.read_files("ST000017_AN000035.txt") # single mwTab file
mwfiles_gen = mwtab.read_files("ST000017_AN000035.txt", "ST000040_AN000060.json") # several mwTab files
mwdir_gen = mwtab.read_files("mwfiles_dir_mwtab") # directory of mwTab files
mwzip_gen = mwtab.read_files("mwfiles_mwtab.zip") # archive of mwTab files
mwanalysis_gen = mwtab.read_files("35", "60") # ANALYSIS_ID of mwTab files
# REST callable url of mwTab file
mwurl_gen = mwtab.read_files("https://www.metabolomicsworkbench.org/rest/study/analysis_id/AN000035/mwtab/txt")
###Output
_____no_output_____
###Markdown
Processing MWTabFile Generator------------------------------The :class:`~mwtab.mwtab.MWTabFile` generator can be processed in several ways:* Feed it to a for-loop and process one file at a time:
###Code
for mwfile in mwtab.read_files("35", "60"):
print("STUDY_ID:", mwfile.study_id) # print STUDY_ID
print("ANALYSIS_ID", mwfile.analysis_id) # print ANALYSIS_ID
print("SOURCE", mwfile.source) # print source
for block_name in mwfile: # print names of blocks
print("\t", block_name)
###Output
STUDY_ID: ST000017
ANALYSIS_ID AN000035
SOURCE https://www.metabolomicsworkbench.org/rest/study/analysis_id/AN000035/mwtab/txt
METABOLOMICS WORKBENCH
PROJECT
STUDY
SUBJECT
SUBJECT_SAMPLE_FACTORS
COLLECTION
TREATMENT
SAMPLEPREP
CHROMATOGRAPHY
ANALYSIS
MS
MS_METABOLITE_DATA
STUDY_ID: ST000040
ANALYSIS_ID AN000060
SOURCE https://www.metabolomicsworkbench.org/rest/study/analysis_id/AN000060/mwtab/txt
METABOLOMICS WORKBENCH
PROJECT
STUDY
SUBJECT
SUBJECT_SAMPLE_FACTORS
COLLECTION
TREATMENT
SAMPLEPREP
CHROMATOGRAPHY
ANALYSIS
MS
MS_METABOLITE_DATA
###Markdown
.. note:: Once the generator is consumed, it becomes empty and needs to be created again. * Since the :class:`~mwtab.mwtab.MWTabFile` generator behaves like an iterator, we can call the :py:func:`next` built-in function:
###Code
mwfiles_generator = mwtab.read_files("35", "60")
mwfile1 = next(mwfiles_generator)
mwfile2 = next(mwfiles_generator)
###Output
_____no_output_____
###Markdown
.. note:: Once the generator is consumed, :py:class:`StopIteration` will be raised. * Convert the :class:`~mwtab.mwtab.MWTabFile` generator into a :py:class:`list` of :class:`~mwtab.mwtab.MWTabFile` objects:
###Code
mwfiles_generator = mwtab.read_files("35", "60")
mwfiles_list = list(mwfiles_generator)
###Output
_____no_output_____
###Markdown
.. warning:: Do not convert the :class:`~mwtab.mwtab.MWTabFile` generator into a :py:class:`list` if the generator can yield a large number of files, e.g. several thousand, otherwise it can consume all available memory. Accessing Data From a Single MWTabFile--------------------------------------Since a :class:`~mwtab.mwtab.MWTabFile` is a Python :py:class:`collections.OrderedDict`,data can be accessed and manipulated as with any regular Python :py:class:`dict` objectusing bracket accessors.* Accessing top-level "keys" in :class:`~mwtab.mwtab.MWTabFile`:
###Code
import os
os.chdir('_static/mwfiles')
mwfile = next(mwtab.read_files("ST000017_AN000035.txt"))
# list MWTabFile-level keys, i.e. saveframe names
list(mwfile.keys())
###Output
_____no_output_____
###Markdown
* Accessing individual blocks in :class:`~mwtab.mwtab.MWTabFile`:
###Code
# access "PROJECT" block
mwfile["PROJECT"]
###Output
_____no_output_____
###Markdown
* Accessing individual "key-value" pairs within blocks:
###Code
# access "INSTITUTE" field within "PROJECT" block
mwfile["PROJECT"]["INSTITUTE"]
###Output
_____no_output_____
###Markdown
* Accessing data in ``SUBJECT_SAMPLE_FACTORS`` block:
###Code
# access "SUBJECT_SAMPLE_FACTORS" block and print first three
mwfile["SUBJECT_SAMPLE_FACTORS"][:3]
# access individual factors (by index)
mwfile["SUBJECT_SAMPLE_FACTORS"][0]
# access individual fields within factors
mwfile["SUBJECT_SAMPLE_FACTORS"][0]["Sample ID"]
###Output
_____no_output_____
###Markdown
* Accessing data in ``MS_METABOLITE_DATA`` block:
###Code
# access data block keys
list(mwfile["MS_METABOLITE_DATA"].keys())
# access units field
mwfile["MS_METABOLITE_DATA"]["Units"]
# access samples field (by index)
mwfile["MS_METABOLITE_DATA"]["Data"][0].keys()
# access metabolite data and print first three
mwfile["MS_METABOLITE_DATA"]["Metabolites"][:3]
###Output
_____no_output_____
###Markdown
Manipulating Data From a Single MWTabFile-----------------------------------------In order to change values within :class:`~mwtab.mwtab.MWTabFile`, descend intothe appropriate level using square bracket accessors and set a new value.* Change regular "key-value" pairs:
###Code
# access phone number information
mwfile["PROJECT"]["PHONE"]
# change phone number information
mwfile["PROJECT"]["PHONE"] = "1-530-754-8258"
# check that it has been modified
mwfile["PROJECT"]["PHONE"]
###Output
_____no_output_____
###Markdown
* Change ``SUBJECT_SAMPLE_FACTORS`` values:
###Code
# access the first subject sample factor by index
mwfile["SUBJECT_SAMPLE_FACTORS"][0]
# provide additional details to the first subject sample factor
mwfile["SUBJECT_SAMPLE_FACTORS"][0]["Additional sample data"] = {"Additional detail key": "Additional detail value"}
# check that it has been modified
mwfile["SUBJECT_SAMPLE_FACTORS"][0]
###Output
_____no_output_____
###Markdown
Printing a MWTabFile and its Components---------------------------------------``MWTabFile`` objects provide the ``print_file()`` method which can be used to output the file in either `mwTab` or JSON format. The method takes a ``file_format`` keyword argument which specifices the output format to be displayed.The MWTabFile can be printed to output in `mwTab` format in its entirety using:* mwfile.print_file(file_format="mwtab")* Print the first 20 lines in ``mwTab`` format.
###Code
from io import StringIO
mwtab_file_str = StringIO()
mwfile.print_file(file_format="mwtab", f=mwtab_file_str)
# print out first 20 lines
print("\n".join(mwtab_file_str.getvalue().split("\n")[:20]))
###Output
#METABOLOMICS WORKBENCH STUDY_ID:ST000017 ANALYSIS_ID:AN000035 PROJECT_ID:PR000016
VERSION 1
CREATED_ON 2016-09-17
#PROJECT
PR:PROJECT_TITLE Rat Stamina Studies
PR:PROJECT_TYPE Feeding
PR:PROJECT_SUMMARY Stamina in rats
PR:INSTITUTE University of Michigan
PR:DEPARTMENT Internal Medicine
PR:LABORATORY Burant Lab
PR:LAST_NAME Beecher
PR:FIRST_NAME Chris
PR:ADDRESS -
PR:EMAIL [email protected]
PR:PHONE 1-530-754-8258
PR:FUNDING_SOURCE NIH: R01 DK077200
#STUDY
ST:STUDY_TITLE Rat HCR/LCR Stamina Study
ST:STUDY_TYPE LC-MS analysis
ST:STUDY_SUMMARY To determine the basis of running capacity and health differences in outbread
###Markdown
The MWTabFile can be printed to output in JSON format in its entirety using:* mwfile.print_file(file_format="json")* Print the first 20 lines in ``JSON`` format.
###Code
from io import StringIO
mwtab_file_str = StringIO()
mwfile.print_file(file_format="json", f=mwtab_file_str)
# print out first 20 lines
print("\n".join(mwtab_file_str.getvalue().split("\n")[:20]))
###Output
{
"METABOLOMICS WORKBENCH": {
"STUDY_ID": "ST000017",
"ANALYSIS_ID": "AN000035",
"PROJECT_ID": "PR000016",
"VERSION": "1",
"CREATED_ON": "2016-09-17"
},
"PROJECT": {
"PROJECT_TITLE": "Rat Stamina Studies",
"PROJECT_TYPE": "Feeding",
"PROJECT_SUMMARY": "Stamina in rats",
"INSTITUTE": "University of Michigan",
"DEPARTMENT": "Internal Medicine",
"LABORATORY": "Burant Lab",
"LAST_NAME": "Beecher",
"FIRST_NAME": "Chris",
"ADDRESS": "-",
"EMAIL": "[email protected]",
"PHONE": "1-530-754-8258",
###Markdown
* Print single block in ``mwTab`` format.
###Code
mwfile.print_block("STUDY", file_format="mwtab")
###Output
ST:STUDY_TITLE Rat HCR/LCR Stamina Study
ST:STUDY_TYPE LC-MS analysis
ST:STUDY_SUMMARY To determine the basis of running capacity and health differences in outbread
ST:STUDY_SUMMARY N/NIH rats selected for high capacity (HCR) and low capacity (LCR) running (a for
ST:STUDY_SUMMARY VO2max) (see:Science. 2005 Jan 21;307(5708):418-20). Plasma collected at 12 of
ST:STUDY_SUMMARY age in generation 28 rats after ad lib feeding or 40% caloric restriction at week
ST:STUDY_SUMMARY 8 of age. All animals fasted 4 hours prior to collection between 5-8
ST:INSTITUTE University of Michigan
ST:DEPARTMENT Internal Medicine
ST:LABORATORY Burant Lab (MMOC)
ST:LAST_NAME Qi
ST:FIRST_NAME Nathan
ST:ADDRESS -
ST:EMAIL [email protected]
ST:PHONE 734-232-0815
ST:NUM_GROUPS 2
ST:TOTAL_SUBJECTS 42
###Markdown
* Print single block in ``JSON`` format.
###Code
mwfile.print_block("STUDY", file_format="json")
###Output
{
"STUDY_TITLE": "Rat HCR/LCR Stamina Study",
"STUDY_TYPE": "LC-MS analysis",
"STUDY_SUMMARY": "To determine the basis of running capacity and health differences in outbread N/NIH rats selected for high capacity (HCR) and low capacity (LCR) running (a for VO2max) (see:Science. 2005 Jan 21;307(5708):418-20). Plasma collected at 12 of age in generation 28 rats after ad lib feeding or 40% caloric restriction at week 8 of age. All animals fasted 4 hours prior to collection between 5-8",
"INSTITUTE": "University of Michigan",
"DEPARTMENT": "Internal Medicine",
"LABORATORY": "Burant Lab (MMOC)",
"LAST_NAME": "Qi",
"FIRST_NAME": "Nathan",
"ADDRESS": "-",
"EMAIL": "[email protected]",
"PHONE": "734-232-0815",
"NUM_GROUPS": "2",
"TOTAL_SUBJECTS": "42"
}
###Markdown
Writing data from a MWTabFile object into a file------------------------------------------------Data from a :class:`~mwtab.mwtab.MWTabFile` can be written into filein original ``mwTab`` format or in equivalent JSON format using:meth:`~mwtab.mwtab.MWTabFile.write()`:* Writing into a ``mwTab`` formatted file:
###Code
with open("out/ST000017_AN000035_modified.txt", "w") as outfile:
mwfile.write(outfile, file_format="mwtab")
###Output
_____no_output_____
###Markdown
* Writing into a ``JSON`` file:
###Code
with open("out/ST000017_AN000035_modified.json", "w") as outfile:
mwfile.write(outfile, file_format="json")
###Output
_____no_output_____
###Markdown
Extracting Metadata and Metabolites from mwTab Files----------------------------------------------------The :mod:`mwtab.mwextract` module can be used to extract metadata from ``mwTab``files. The module contains two main methods: 1):meth:`~mwtab.mwtab.mwextract.extract_metadata()` which can be used to parse metadatavalues from a ``mwTab`` file, and 2):meth:`~mwtab.mwtab.mwextract.extract_metabolites()` which can be used to gather alist of metabolites and samples containing the found metabolites from multiple ``mwTab`` files which contain a given metadata key value pair.Extracting Metadata Values*************************** Extracting metadata values from a given ``mwTab`` file:
###Code
from mwtab.mwextract import extract_metadata
extract_metadata(mwfile, ["STUDY_TYPE", "SUBJECT_TYPE"])
###Output
_____no_output_____
###Markdown
Extracting Metabolites Values****************************** Extracting metabolite information from multiple ``mwTab`` files and outputing the first three metabolites:
###Code
from mwtab.mwextract import extract_metabolites, generate_matchers
from mwtab import read_files
mwtab_gen = read_files(
"ST000017_AN000035.txt",
"ST000040_AN000060.txt"
)
matchers = generate_matchers([
("ST:STUDY_TYPE",
"LC-MS analysis")
])
list(extract_metabolites(mwtab_gen, matchers).keys())[:3]
###Output
_____no_output_____
###Markdown
* Extracting metabolite information from multiple ``mwTab`` files using regualar expressions and outputing the first three metabolites:
###Code
from mwtab.mwextract import extract_metabolites, generate_matchers
from mwtab import read_files
from re import compile
mwtab_gen = read_files(
"ST000017_AN000035.txt",
"ST000040_AN000060.txt"
)
matchers = generate_matchers([
("ST:STUDY_TYPE",
compile("(LC-MS)"))
])
list(extract_metabolites(mwtab_gen, matchers).keys())[:3]
###Output
_____no_output_____
###Markdown
Converting mwTab Files----------------------``mwTab`` files can be converted between the ``mwTab`` file format and their ``JSON``representation using the :mod:`mwtab.converter` module.One-to-one file conversions**************************** Converting from the ``mwTab`` file format into its equivalent ``JSON`` file format:
###Code
from mwtab.converter import Converter
# Using valid ANALYSIS_ID to access file from URL: from_path="1"
converter = Converter(from_path="35", to_path="out/ST000017_AN000035.json",
from_format="mwtab", to_format="json")
converter.convert()
###Output
_____no_output_____
###Markdown
* Converting from JSON file format back to ``mwTab`` file format:
###Code
from mwtab.converter import Converter
converter = Converter(from_path="out/ST000017_AN000035.json", to_path="out/ST000017_AN000035.txt",
from_format="json", to_format="mwtab")
converter.convert()
###Output
_____no_output_____
###Markdown
Many-to-many files conversions******************************* Converting from the directory of ``mwTab`` formatted files into their equivalent ``JSON`` formatted files:
###Code
from mwtab.converter import Converter
converter = Converter(from_path="mwfiles_dir_mwtab",
to_path="out/mwfiles_dir_json",
from_format="mwtab",
to_format="json")
converter.convert()
###Output
_____no_output_____
###Markdown
* Converting from the directory of ``JSON`` formatted files into their equivalent ``mwTab`` formatted files:
###Code
from mwtab.converter import Converter
converter = Converter(from_path="out/mwfiles_dir_json",
to_path="out/mwfiles_dir_mwtab",
from_format="json",
to_format="mwtab")
converter.convert()
###Output
_____no_output_____
###Markdown
.. note:: Many-to-many files and one-to-one file conversions are available. See :mod:`mwtab.converter` for full list of available conversions. Command-Line Interface~~~~~~~~~~~~~~~~~~~~~~The mwtab Command-Line Interface provides the following functionality: * Convert from the ``mwTab`` file format into its equivalent ``JSON`` file format and vice versa. * Download files through Metabolomics Workbench's REST API. * Validate the ``mwTab`` formatted file. * Extract metadata and metabolite information from downloaded files.
###Code
! mwtab --help
###Output
The mwtab command-line interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usage:
mwtab -h | --help
mwtab --version
mwtab convert (<from-path> <to-path>) [--from-format=<format>] [--to-format=<format>] [--validate] [--mw-rest=<url>] [--verbose]
mwtab validate <from-path> [--mw-rest=<url>] [--verbose]
mwtab download url <url> [--to-path=<path>] [--verbose]
mwtab download study all [--to-path=<path>] [--input-item=<item>] [--output-format=<format>] [--mw-rest=<url>] [--validate] [--verbose]
mwtab download study <input-value> [--to-path=<path>] [--input-item=<item>] [--output-item=<item>] [--output-format=<format>] [--mw-rest=<url>] [--validate] [--verbose]
mwtab download (study | compound | refmet | gene | protein) <input-item> <input-value> <output-item> [--output-format=<format>] [--to-path=<path>] [--mw-rest=<url>] [--verbose]
mwtab download moverz <input-item> <m/z-value> <ion-type-value> <m/z-tolerance-value> [--to-path=<path>] [--mw-rest=<url>] [--verbose]
mwtab download exactmass <LIPID-abbreviation> <ion-type-value> [--to-path=<path>] [--mw-rest=<url>] [--verbose]
mwtab extract metadata <from-path> <to-path> <key> ... [--to-format=<format>] [--no-header]
mwtab extract metabolites <from-path> <to-path> (<key> <value>) ... [--to-format=<format>] [--no-header]
Options:
-h, --help Show this screen.
--version Show version.
--verbose Print what files are processing.
--validate Validate the mwTab file.
--from-format=<format> Input file format, available formats: mwtab, json [default: mwtab].
--to-format=<format> Output file format [default: json].
Available formats for convert:
mwtab, json.
Available formats for extract:
json, csv.
--mw-rest=<url> URL to MW REST interface
[default: https://www.metabolomicsworkbench.org/rest/].
--context=<context> Type of resource to access from MW REST interface, available contexts: study,
compound, refmet, gene, protein, moverz, exactmass [default: study].
--input-item=<item> Item to search Metabolomics Workbench with.
--output-item=<item> Item to be retrieved from Metabolomics Workbench.
--output-format=<format> Format for item to be retrieved in, available formats: mwtab, json.
--no-header Include header at the top of csv formatted files.
For extraction <to-path> can take a "-" which will use stdout.
###Markdown
Converting ``mwTab`` files in bulk----------------------------------CLI one-to-one file conversions******************************** Convert from a local file in ``mwTab`` format to a local file in ``JSON`` format:
###Code
! mwtab convert ST000017_AN000035.txt out/ST000017_AN000035.json \
--from-format=mwtab --to-format=json
###Output
_____no_output_____
###Markdown
* Convert from a local file in ``JSON`` format to a local file in ``mwTab`` format:
###Code
! mwtab convert ST000017_AN000035.json out/ST000017_AN000035.txt \
--from-format=json --to-format=mwtab
###Output
_____no_output_____
###Markdown
* Convert from a compressed local file in ``mwTab`` format to a compressed local file in ``JSON`` format:
###Code
! mwtab convert ST000017_AN000035.txt.gz out/ST000017_AN000035.json.gz \
--from-format=mwtab --to-format=json
###Output
_____no_output_____
###Markdown
* Convert from a compressed local file in ``JSON`` format to a compressed local file in ``mwTab`` format:
###Code
! mwtab convert ST000017_AN000035.json.gz out/ST000017_AN000035.txt.gz \
--from-format=json --to-format=mwtab
###Output
_____no_output_____
###Markdown
* Convert from an uncompressed URL file in ``mwTab`` format to a compressed local file in ``JSON`` format:
###Code
! mwtab convert 35 out/ST000017_AN000035.json.bz2 \
--from-format=mwtab --to-format=json
###Output
_____no_output_____
###Markdown
.. note:: See :mod:`mwtab.converter` for full list of available conversions. CLI Many-to-many files conversions*********************************** Convert from a directory of files in ``mwTab`` format to a directory of files in ``JSON`` format:
###Code
! mwtab convert mwfiles_dir_mwtab out/mwfiles_dir_json \
--from-format=mwtab --to-format=json
###Output
_____no_output_____
###Markdown
* Convert from a directory of files in ``JSON`` format to a directory of files in ``mwTab`` format:
###Code
! mwtab convert mwfiles_dir_json out/mwfiles_dir_mwtab \
--from-format=json --to-format=mwtab
###Output
_____no_output_____
###Markdown
* Convert from a directory of files in ``mwTab`` format to a zip archive of files in ``JSON`` format:
###Code
! mwtab convert mwfiles_dir_mwtab out/mwfiles_json.zip \
--from-format=mwtab --to-format=json
###Output
_____no_output_____
###Markdown
* Convert from a compressed tar archive of files in ``JSON`` format to a directory of files in ``mwTab`` format:
###Code
! mwtab convert mwfiles_json.tar.gz out/mwfiles_dir_mwtab \
--from-format=json --to-format=mwtab
###Output
_____no_output_____
###Markdown
* Convert from a zip archive of files in ``mwTab`` format to a compressed tar archive of files in ``JSON`` format:
###Code
! mwtab convert mwfiles_mwtab.zip out/mwfiles_json.tar.bz2 \
--from-format=mwtab --to-format=json
###Output
_____no_output_____
###Markdown
.. note:: See :mod:`mwtab.converter` for full list of available conversions. Download files through Metabolomics Workbenchs REST API------------------------------------------------------The :mod:`mwtab` package provides the :mod:`mwtab.mwrest` module, which contains a number of functions and classes for working with Metabolomics Workbenchs REST API... note:: For full official REST API specification see the following link (``MW REST API (v1.0, 5/7/2019)``): https://www.metabolomicsworkbench.org/tools/MWRestAPIv1.0.pdfDownload by URL**************** To download a file based on a given url, simply call the ``download url`` command with the desired URL and provide an output path:
###Code
! mwtab download url "https://www.metabolomicsworkbench.org/rest/study/analysis_id/AN000035/mwtab/txt" --to-path=out/ST000017_AN000035.txt
###Output
_____no_output_____
###Markdown
* To download single analysis ``mwTab`` files, simply call ``download study`` and specifiy the analysis ID:
###Code
! mwtab download study AN000035 --to-path=out/ST000017_AN000035.txt
###Output
_____no_output_____
###Markdown
* To download an entire study ``mwTab`` file, simply call ``download study`` and specifiy the study ID:
###Code
! mwtab download study ST000017 --to-path=out/ST000017_AN000035.txt
###Output
_____no_output_____
###Markdown
.. note:: It is possible to validate downloaded files by adding the ``--validate`` option to the command line. Download study, compound, refmet, gene, and protein files********************************************************** To download study, compound, refmet, gene, and protein context files, call the ``download`` command and specify the context, input iten, input value, and output item (optionally specifiy the output format).* Download a study:
###Code
! mwtab download study analysis_id AN000035 mwtab --output-format=txt --to-path=out/ST000017_AN000035.txt
###Output
_____no_output_____
###Markdown
* Download compound:
###Code
! mwtab download compound regno 11 name --to-path=out/tmp.txt
###Output
_____no_output_____
###Markdown
* Download refmet:
###Code
! mwtab download refmet name Cholesterol all --to-path=out/tmp.txt
###Output
_____no_output_____
###Markdown
* Download gene:
###Code
! mwtab download gene gene_symbol acaca all --to-path=out/tmp.txt
###Output
_____no_output_____
###Markdown
* Download protein:
###Code
! mwtab download protein uniprot_id Q13085 all --to-path=out/tmp.txt
###Output
_____no_output_____
###Markdown
Download all ``mwTab`` formatted files**********************************The :mod:`mwTab` package provides contains a number of command line functions for downloading Metabolomics ``mwtab`` formatted files through the Workbenchs REST API.* To download all available analysis files, simply call the ``download study all`` command:! mwtab download study all.. note: If an output directory is not specified the command will download to the current working directory. It is recommend to either run the command in the desired output directory or specify an output directory with the ``--to-path`` argument.* It is also possible to download all study files by calling the ``download study all`` command and providing an input item and output path:! mwtab download study all --input-item=study_id Download moverz and exactmass****************************** To download moverz files, call the ``download moverz`` command and specify the input value (LIPIDS, MB, or REFMET), m/z value, ion type value, and m/z tolerance value.
###Code
! mwtab download moverz MB 635.52 M+H 0.5 --to-path=out/tmp.txt
###Output
_____no_output_____
###Markdown
* To download exactmass files, call the ``download exactmass`` command and specify the LIPID abbreviation and ion type value.
###Code
! mwtab download exactmass "PC(34:1)" M+H --to-path=out/tmp.txt
###Output
_____no_output_____
###Markdown
.. note:: It is not necessary to specify an output format for exactmass files.Extracting metabolite data and metadata from ``mwTab`` files------------------------------------------------------------The :mod:`mwtab` package provides the :func:`~mwtab.mwextract.extract_metabolites` and :func:`~mwtab.mwextract.extract_metadata` functions that can parse ``mwTab`` formatted files. The :func:`~mwtab.mwextract.extract_metabolites` takes a source (list of ``mwTab`` file) and list of metadata key-value pairs that are used to search for ``mwTab`` files which contain the given metadata pairs. The :func:`~mwtab.mwextract.extract_metadata` takes a source (list of ``mwTab`` file) and list of metadata keys which are used to search the ``mwTab`` files for possible values to the given keys.* To extract metabolite from ``mwTab`` files in a directory, call the ``extract metabolites`` command and provide a list of metadata key value pairs along with an output path and output format:
###Code
! mwtab extract metabolites mwfiles_dir_mwtab out/output_file.csv SU:SUBJECT_TYPE Plant --to-format=csv
###Output
_____no_output_____
###Markdown
.. note:: It is possible to use ReGeXs to match the metadata value (eg. ... SU:SUBJECT_TYPE "r'(Plant)'").* To extract metadata from ``mwTab`` files in a directory call the ``extract metadata`` command and provide a list of metadata keys along with an output path and output format:
###Code
! mwtab extract metadata mwfiles_dir_json out/output_file.json SUBJECT_TYPE --to-format=json
###Output
_____no_output_____
###Markdown
Validating ``mwTab`` files--------------------------The :mod:`mwtab` package provides the :func:`~mwtab.validator.validate_file` functionthat can validate files based on a ``JSON`` schema definition. The :mod:`mwtab.mwschema`contains schema definitions for every block of ``mwTab`` formatted file, i.e.it lists the types of attributes (e.g. :py:class:`str` as well as specifies which keys areoptional and which are required).* To validate file(s), simply call the ``validate`` command and provide path to file(s):
###Code
! mwtab validate 35
###Output
_____no_output_____
###Markdown
Using the mwtab Python Package to Find Analyses Involving a Specific Disease or Condition~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~The Metabolomics Workbench data repository stores mass spectroscopy and nuclear magnetic resonanse experimental data and metadata in ``mwTab`` formatted files. Metabolomics Workbench also provides a number of tools for searching or analyzing ``mwTab`` files. The mwtab Python package can also be used to perform similar functions through both a programmatic API and command-line interface, which has more search flexibility.In order to search the repository of ``mwTab`` files for analyses associated with a specific disease, Metabolomics Workbench provides a web-based interface: * https://www.metabolomicsworkbench.org/data/metsearch_MS_form2.phpThe mwtab Python package can be used in a number of ways to similar effect. The package provides the :meth:`~mwtab.mwextract.extract_metabolites()` method to extract and organize metabolites from multiple ``mwTab`` files through both Python scripts and a command-line interface. This method has more search flexibility, since it can take either a search string or a regular expression.Using mwtab package API to extract study IDs, analysis IDs, and metabolites---------------------------------------------------------------------------The :meth:`~mwtab.mwextract.extract_metabolites()` method takes two parameters: 1) a iterable of :class:`~mwtab.mwtab.MWTabFile` instances and 2) an iterable of :class:`~mwtab.mwextract.ItemMatcher` or :class:`~mwtab.mwextract.ReGeXMatcher` instances. The iterable of :class:`~mwtab.mwtab.MWTabFile` instances can be created using byt passing ``mwTab`` file sources (filenames, analysis IDs, etc.) to the :meth:`~mwtab.fileio.read_files()` method. The iterable of matcher instances can be created using the :meth:`~mwtab.mwextract.generate_matchers()` method.* An example of using the mwtab package API to extract data from analyses associated with diabetes and output the first three metabolites:
###Code
from mwtab.mwextract import extract_metabolites, generate_matchers
from mwtab import read_files
import re
mwtab_gen = read_files("diabetes/")
matchers = generate_matchers([
("ST:STUDY_SUMMARY",
re.compile("(diabetes)"))
])
list(extract_metabolites(mwtab_gen, matchers).keys())[:3]
###Output
_____no_output_____
###Markdown
Using mwtab CLI to extract study IDs, analysis IDs, and metabolites-------------------------------------------------------------------The mwtab command line interface includes a ``mwtab extract metabolites`` method which takes a directory of ``mwTab`` files, an output path to save the extracted data in, and a series of ``mwTab`` section item keys and values to be matched (either string values or regular expressions). Additionally an output format can be specified. mwtab extract metabolites ( ) ... [--to-format=] [--no-header]* An example of using the mwtab CLI to extract data from analyses associated with diabetes:
###Code
! mwtab extract metabolites diabetes/ out/output_file.json ST:STUDY_SUMMARY "r'(?i)(diabetes)'" --to-format=json
###Output
_____no_output_____
###Markdown
Rechunker TutorialThis tutorial notebook explains how to use rechunker with real datasets. We will also use xarray to make some things easier and prettier, but we note that xarray is not a dependency for rechunker. Toy Example Create Example DataHere we load one of xarray's tutorial datasets and write it to Zarr. This is not actually a big dataset, so rechunker is not really needed here. But it's a convenient example.
###Code
import xarray as xr
xr.set_options(display_style='text')
import zarr
import dask.array as dsa
ds = xr.tutorial.open_dataset("air_temperature")
# create initial chunk structure
ds = ds.chunk({'time': 100})
ds.air.encoding = {} # helps when writing to zarr
ds
###Output
_____no_output_____
###Markdown
We can examine the chunk structure of the data variable using Dask's pretty Array repr.
###Code
ds.air.data
! rm -rf *.zarr # clean up any existing temporary data
ds.to_zarr('air_temperature.zarr')
###Output
_____no_output_____
###Markdown
Now we open up a Zarr Group and Array that we will use as inputs to rechunker.
###Code
source_group = zarr.open('air_temperature.zarr')
print(source_group.tree())
source_array = source_group['air']
source_array.info
###Output
_____no_output_____
###Markdown
Rechunk a single ArrayThe original array has chunks of (100, 25, 53). Let's rechunk it to be contiguous in time, but chunked in space.We specify a small value of `max_mem` in order to force rechunker to create an intermediate dataset. We also have to specify a place to store the final and intermediate data.We use the [rechunk](api.rstrechunker.rechunk) function, which returns a [Rechunked](api.rstrechunker.Rechunked) object.
###Code
from rechunker import rechunk
target_chunks = (2920, 25, 1)
max_mem = '1MB'
target_store = 'air_rechunked.zarr'
temp_store = 'air_rechunked-tmp.zarr'
array_plan = rechunk(source_array, target_chunks, max_mem, target_store, temp_store=temp_store)
array_plan
###Output
_____no_output_____
###Markdown
Since this array has dimensions, we can also specify the chunks using a dictionary syntax.
###Code
target_chunks_dict = {'time': 2920, 'lat': 25, 'lon': 1}
# need to remove the existing stores or it won't work
!rm -rf air_rechunked.zarr air_rechunked-tmp.zarr
array_plan = rechunk(source_array, target_chunks_dict, max_mem, target_store, temp_store=temp_store)
array_plan
###Output
_____no_output_____
###Markdown
The `array_plan` is a `Rechunked` object.It has not actually performed the rechunking yet.To do this, we need to call the `execute` method.This will use Dask to perform the rechunking.
###Code
result = array_plan.execute()
result.chunks
###Output
_copy_chunk((slice(0, 100, None), slice(0, 25, None), slice(0, 53, None)))_copy_chunk((slice(100, 200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(200, 300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(300, 400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(400, 500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(500, 600, None), slice(0, 25, None), slice(0, 53, None)))_copy_chunk((slice(600, 700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(700, 800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(800, 900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(900, 1000, None), slice(0, 25, None), slice(0, 53, None)))_copy_chunk((slice(1000, 1100, None), slice(0, 25, None), slice(0, 53, None)))_copy_chunk((slice(1100, 1200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1200, 1300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1300, 1400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1400, 1500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1500, 1600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1600, 1700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1700, 1800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1800, 1900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1900, 2000, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2000, 2100, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2100, 2200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2200, 2300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2300, 2400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2400, 2500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2500, 2600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2600, 2700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2700, 2800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2800, 2900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2900, 2920, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(0, 3, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(3, 6, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(6, 9, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(9, 12, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(12, 15, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(15, 18, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(18, 21, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(21, 24, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(24, 27, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(27, 30, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(30, 33, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(33, 36, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(36, 39, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(39, 42, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(42, 45, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(45, 48, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(48, 51, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(51, 53, None)))
###Markdown
By default, Dask will use the multi-threaded scheduler.Since rechunking can take a long time, we might want to use a progress bar.
###Code
from dask.diagnostics import ProgressBar
with ProgressBar():
array_plan.execute()
###Output
[ ] | 0% Completed | 0.0s_copy_chunk((slice(0, 100, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(100, 200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(200, 300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(300, 400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(400, 500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(500, 600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(600, 700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(700, 800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(800, 900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(900, 1000, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1000, 1100, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1100, 1200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1200, 1300, None), slice(0, 25, None), slice(0, 53, None)))
[######### ] | 23% Completed | 0.1s_copy_chunk((slice(1300, 1400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1400, 1500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1500, 1600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1600, 1700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1700, 1800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1800, 1900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1900, 2000, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2000, 2100, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2100, 2200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2200, 2300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2300, 2400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2400, 2500, None), slice(0, 25, None), slice(0, 53, None)))
[################## ] | 47% Completed | 0.2s_copy_chunk((slice(2500, 2600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2600, 2700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2700, 2800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2800, 2900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2900, 2920, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(0, 3, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(3, 6, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(6, 9, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(9, 12, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(12, 15, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(15, 18, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(18, 21, None)))
[############################# ] | 72% Completed | 0.3s_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(21, 24, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(24, 27, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(27, 30, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(30, 33, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(33, 36, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(36, 39, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(39, 42, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(42, 45, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(45, 48, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(48, 51, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(51, 53, None)))
[########################################] | 100% Completed | 0.4s
###Markdown
If we create a distributed cluster, then rechunker will use that when it executes.
###Code
from dask.distributed import Client, LocalCluster, progress
cluster = LocalCluster()
client = Client(cluster)
future = array_plan.persist()
progress(future)
###Output
_____no_output_____
###Markdown
Now that it is written to disk, we can open the rechunked array however we please. Using Zarr...
###Code
target_array = zarr.open('air_rechunked.zarr')
target_array
###Output
_____no_output_____
###Markdown
...or Dask
###Code
target_array_dask = dsa.from_zarr('air_rechunked.zarr')
target_array_dask
###Output
_____no_output_____
###Markdown
Rechunk a GroupIn the example above, we only rechunked a single array.We can open it with Dask, but not Xarray, because it doesn't contain any coordinates or metadata.Rechunker also supports rechunking entire groups.In this case, `target_chunks` must be a dictionary.
###Code
target_chunks = {
'air': {'time': 2920, 'lat': 25, 'lon': 1},
'time': None, # don't rechunk this array
'lon': None,
'lat': None,
}
max_mem = '1MB'
target_store = 'group_rechunked.zarr'
temp_store = 'group_rechunked-tmp.zarr'
# need to remove the existing stores or it won't work
!rm -rf group_rechunked.zarr group_rechunked-tmp.zarr
array_plan = rechunk(source_group, target_chunks, max_mem, target_store, temp_store=temp_store)
array_plan
array_plan.execute()
###Output
_copy_chunk((slice(1500, 1600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1600, 1700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(0, 100, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2100, 2200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(100, 200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2200, 2300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2800, 2900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2900, 2920, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1700, 1800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1000, 1100, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2300, 2400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2400, 2500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1800, 1900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1100, 1200, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(300, 400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(400, 500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1900, 2000, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(200, 300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1200, 1300, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2500, 2600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(1300, 1400, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2600, 2700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(500, 600, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(600, 700, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2000, 2100, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(0, 53, None),))
_copy_chunk((slice(1400, 1500, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(2700, 2800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(700, 800, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(800, 900, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(0, 25, None),))
_copy_chunk((slice(0, 2920, None),))
_copy_chunk((slice(900, 1000, None), slice(0, 25, None), slice(0, 53, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(0, 3, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(3, 6, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(42, 45, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(9, 12, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(36, 39, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(33, 36, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(45, 48, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(39, 42, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(30, 33, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(48, 51, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(6, 9, None)))_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(51, 53, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(12, 15, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(15, 18, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(18, 21, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(21, 24, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(24, 27, None)))
_copy_chunk((slice(0, 2920, None), slice(0, 25, None), slice(27, 30, None)))
###Markdown
Now that we have written a group, we can open it back up with Xarray.
###Code
xr.open_zarr('group_rechunked.zarr')
###Output
/var/folders/7c/cchjc_ys3z5_33vyp640xycm0000gn/T/ipykernel_23789/4235005900.py:1: RuntimeWarning: Failed to open Zarr store with consolidated metadata, falling back to try reading non-consolidated metadata. This is typically much slower for opening a dataset. To silence this warning, consider:
1. Consolidating metadata in this existing store with zarr.consolidate_metadata().
2. Explicitly setting consolidated=False, to avoid trying to read consolidate metadata, or
3. Explicitly setting consolidated=True, to raise an error in this case instead of falling back to try reading non-consolidated metadata.
xr.open_zarr('group_rechunked.zarr')
###Markdown
Often groups have many variables sharing all or a subset of dimensions. In the common case that a given dimension should have equivalent chunks in each variable that contains it, chunks can be provided as a simpler dictionary, mapping dimension names to chunksize.
###Code
# extend the dataset with some more variables
ds_complex = ds
ds_complex['air_slice'] = ds.air.isel(lat=10)
ds_complex['air_timeseries'] = ds.air.isel(lat=10, lon=10)
ds_complex
target_chunks = {'time': 2920, 'lat': 25, 'lon': 1}
max_mem = '1MB'
target_store = 'group_complex_rechunked.zarr'
temp_store = 'group_complex_rechunked-tmp.zarr'
# need to remove the existing stores or it won't work
!rm -rf group_complex_rechunked.zarr group_complex_rechunked-tmp.zarr
# rechunk directly from dataset this time
array_plan = rechunk(ds_complex, target_chunks, max_mem, target_store, temp_store=temp_store)
array_plan
array_plan.execute()
xr.open_zarr('group_complex_rechunked.zarr')
###Output
/var/folders/7c/cchjc_ys3z5_33vyp640xycm0000gn/T/ipykernel_23789/3867248564.py:1: RuntimeWarning: Failed to open Zarr store with consolidated metadata, falling back to try reading non-consolidated metadata. This is typically much slower for opening a dataset. To silence this warning, consider:
1. Consolidating metadata in this existing store with zarr.consolidate_metadata().
2. Explicitly setting consolidated=False, to avoid trying to read consolidate metadata, or
3. Explicitly setting consolidated=True, to raise an error in this case instead of falling back to try reading non-consolidated metadata.
xr.open_zarr('group_complex_rechunked.zarr')
###Markdown
Note that all the variables now have the same time chunks. Other dimensions (if they exist) also have consistent chunks. Cloud ExampleIn this example we use real data from Pangeo's [Cloud Data Catalog](http://catalog.pangeo.io/).This dataset is stored in Google Cloud Storage.We also use a [Dask Gateway](https://gateway.dask.org/) distributed cluster to scale up our processing.This part of the tutorial won't work for you unless you are in a [Pangeo Cloud](http://pangeo.io/cloud.html) environment or binder.
###Code
from dask_gateway import GatewayCluster
cluster = GatewayCluster()
cluster.scale(20)
cluster
from dask.distributed import Client
client = Client(cluster)
client
import gcsfs
# a zarr group lives here
url = 'gs://pangeo-cmems-duacs'
gcs = gcsfs.GCSFileSystem(requester_pays=True)
source_store = gcs.get_mapper(url)
###Output
_____no_output_____
###Markdown
Open Zarr Array
###Code
group = zarr.open_consolidated(source_store, mode='r')
source_array = group['sla']
source_array
source_array.chunks
###Output
_____no_output_____
###Markdown
Make a Rechunking Plan
###Code
max_mem = '1GB'
target_chunks = (8901, 72, 72)
# you must have write access to this location
store_tmp = gcs.get_mapper('pangeo-scratch/rabernat/rechunker_demo/temp.zarr')
store_target = gcs.get_mapper('pangeo-scratch/rabernat/rechunker_demo/target.zarr')
r = rechunk(source_array, target_chunks, max_mem,
store_target, temp_store=store_tmp)
r
###Output
_____no_output_____
###Markdown
Execute the Plan
###Code
result = r.execute()
result
dsa.from_zarr(result)
###Output
_____no_output_____
###Markdown
How to use `contaminante`This tutorial will show you how to use `contaminante`. If you'd like to try it yourself, you can use `contaminante` online, in the cloud, for free! Click [here](https://colab.research.google.com/github/christinahedges/contaminante/blob/master/tutorials/Colaboratory-Notebook.ipynb) to run `contaminante` online using Google's Colaboratory. Using `contaminante` on *Kepler* data To demonstrate `contaminante` we'll first use *Kepler* data. First we'll need a target to try `contaminante` on. I've chosen *KIC 6804648*. This target was observed during the prime *Kepler* mission, and was flagged as a planet candidate. In fact, the target has a contaminating eclipsing binary. This binary is only obvious in some of the *Kepler* quarters. Below we run the target through `contaminante`. Running this cell should take less than 5 minutes.
###Code
import contaminante
fig, result = contaminante.calculate_contamination(targetid='KIC {}'.format(6804648),
period=0.700606,
t0=131.59767,
duration=0.993/24,
mission='kepler')
###Output
Modeling TPFs: 100%|██████████| 18/18 [00:36<00:00, 2.03s/it]
###Markdown
Using `contaminante` we can see two pieces of evidence that this target is contaminated.1. There is a significant offset between the center of the **target** (green cross) in the image, and the **source of the transiting signal** (red cross).2. There is a significant difference between the **target** phase curve (green phase curve) and the **source of the transiting signal** phase curve (red phase curve).The result dictionary contains the depth and positions of the target and the "contamintor", including errors. It also contains a flag for whether the target is "contaminated". The user is encouraged to 1) look at the phase curves 2) look at the positions and transit depths before claiming that a target is contaminated.
###Code
result
###Output
_____no_output_____
###Markdown
To compare, we can look at a target that is a true, confirmed planet. Below I run the parameters for **Kepler-10** through `contaminate`.
###Code
fig, result = contaminante.calculate_contamination(targetid='KIC {}'.format(11904151),
period=0.837491,
t0=2454964.57513 - 2454833,
duration=1.8076/24,
mission='kepler')
###Output
Modeling TPFs: 100%|██████████| 15/15 [00:36<00:00, 2.42s/it]
###Markdown
Sometimes there will be no significant transiting source that was not the target, and so there will be no red cross in the image, and no red phase curve in the phase curve diagram. Sometimes there will be a weak detection that there are other pixels that contain the transit, but there is frequently no significant shift if1. The two sources line up in the image2. There is no significant difference between the target aperture and the source aperture.Cases such as this can suggest the aperture you are using may not be optimal to recover all of the transiting signal.
###Code
result
###Output
_____no_output_____
###Markdown
Using `contaminante` on *TESS* Data`contaminante` works on TESS data too. The background scattered light is removed using principle component analysis. For targets that are available in the TESS pipeline TPF products, the TPFs will be used. If no TPF is available, the data will be cut out of the FFI's using the TESSCut API from MAST.
###Code
fig, result = contaminante.calculate_contamination(targetid="TIC 267263253",
period=4.12688,
t0=2458325.78297 - 2457000,
duration=0.3, mission='tess', bin_points=100)
result
###Output
_____no_output_____
###Markdown
Using `contaminante` on *K2* Data`contaminante` works on K2 data too. The motion noise is removed using the same Self Flat Fielding technique used in `lightkurve`. Because of the K2 motion the results may be a little harder to interpret. For example, below there is a slight shift in the centroid, but the light curve from that target is not different from the main target. This is likely due to the pipeline apertures for K2 being slightly too small.
###Code
fig, result = contaminante.calculate_contamination(targetid="EPIC 211732801",
period=2.1316925,
t0=2308.407161,
duration=0.3, mission='K2', bin_points=5)
result
###Output
_____no_output_____
###Markdown
TutorialPIPS contains various tools for time-series analysis in astronomy, with primary focus on detecting the period of variability. PIPS is objectively programmed, so that the analysis can be performed in a straightforward way.In this introductory tutorial, you will learn the quickest methods to do the following operations:- Installing PIPS- Initializing photometric data object --- ```PIPS.photdata```- Generating periodogram --- ```photdata.periodogram()```: basic & advanced- Detecting main period --- ```photdata.get_period()```: basic & advanced- Quick visualization --- ```photdata.get_bestfit_curve()```: basic- Multi-period analysis --- ```photdata.amplitude_spectrum()```: basic Importing PIPSPIPS is currently distributed on PyPI and GitHub under the name of ```astroPIPS```. However, the package name itself is still under ```PIPS```, and hence the import statements becomes as shown below:
###Code
import PIPS
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings(action='ignore')
PIPS.__version__
###Output
_____no_output_____
###Markdown
Before you start -- sneak peek at PIPS in 10 linesPhotometry data to phase-folded light curve -- This is what you can do with PIPS in 10 lines of code!
###Code
# preparation
data = PIPS.data_readin_LPP('../sample_data/005.dat',filter='V')
star = PIPS.photdata(data)
# automatic period detection
star.get_period()
# phase-folded data plot & best-fit curve generation & epoch detection
star.plot_lc()
x_th,y_th = star.get_bestfit_curve()
epoch_offset = star.get_epoch_offset()
# plot
plt.plot(x_th/star.period,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.plot(x_th/star.period+1,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.axvline(epoch_offset/star.period,color='red')
plt.axvline(epoch_offset/star.period+1,color='red');
#### if you are okay with exceeding 10 lines... uncomment below ####
# plt.xlabel('phase')
# plt.ylabel('mag')
# plt.show()
###Output
_____no_output_____
###Markdown
PIPS is designed so that it can be as simple as this for basic analyses, but at the same time PIPS provides a powerful platform for more high-level analysis for professional astronomers. In the tutorial below, we go over the basic steps to perform some of the most frequent operations and analyses. Data preparationPIPS takes in an array of 3xN data (samples are available on [github]('https://github.com/SterlingYM/astroPIPS/tree/master/sample_data')) -- time, magnitude (flux), and error on magnitude contained in a single python list or numpy array.For convenience, photometry data file from [LOSSPhotPipeline]('https://github.com/benstahl92/LOSSPhotPipeline') can be directly imported using a helper function ```data_readin_LPP```.
###Code
data = PIPS.data_readin_LPP('../sample_data/005.dat',filter='V')
x,y,yerr = data
print('data shape:\t',np.array(data).shape)
print('x shape:\t',x.shape)
print('y shape:\t',y.shape)
print('y-error shape:\t',yerr.shape)
###Output
data shape: (3, 103)
x shape: (103,)
y shape: (103,)
y-error shape: (103,)
###Markdown
Create ```photdata``` objectMost of the functions in ```astroPIPS``` is implemented as methods in ```photdata``` object. Once the ```photdata``` object is initialized, various operations, such as period detection and data manipulation, can be done directly to the object.
###Code
star = PIPS.photdata(data)
###Output
_____no_output_____
###Markdown
This object initially contains raw data, and as the user performs analyses using various functions, more information, such as cleaned data, period, or amplitude, will be stored.The list of variables in the object can be printed with the following code:
###Code
print('Initially defined variables: ')
# for att in dir(star): print('- ',att) )
[print('- '+att) for att in dir(star) if not callable(getattr(star, att)) and not att.startswith('__')];
print('\nAvailable functions: ')
[print('- '+att+'()') for att in dir(star) if callable(getattr(star, att)) and not att.startswith('__')];
###Output
_____no_output_____
###Markdown
It is always a good idea to keep track of the name and photometric band of the object. For instance, the name of data file can be used as a label:
###Code
star.label = '005.dat'
star.band = 'V'
###Output
_____no_output_____
###Markdown
Generating periodogram```periodogram()``` function provides the most basic yet most valuable information on variability analysis. This is an extended application of Fourier transform in the period space (1/frequency). This function requires arguments ```p_min``` and ```p_max```, which limits the range of periodogram (hence period search). The unit has to be the same (often _days_ in astronomy) as the x-axis in the input data. basic methodMost simply, users can call ```periods,power = star.periodogram(p_min,p_max)``` and plot the results to generate periodogram. A slightly fancier way to plot is shown below.A few things to note:- This function shouldn't take more than a few seconds to run. If no result is returned, it may be because your platform does not support ```multiprocessing``` operation, in which case we recommend adding another argument ```multiprocessing=False``` to ```periodogram()``` function call.- If no ```p_min``` or ```p_max``` is given, periodogram is generated between 0.1 and 4 (days if your data is in days).
###Code
def plot_periodogram(periods,power):
'''plot helper'''
plt.figure(figsize=(15,3));
plt.plot(periods,power);
plt.fill_between(periods,0,power);
plt.xlabel('period');
plt.xlim(periods.min(),periods.max());
plt.ylim(0,1);
plt.axvline(periods[power==power.max()],c='orange',lw=5,zorder=0);
plt.show()
print('Peak signal in periodogram: ',periods[power==power.max()],' day')
# periodogram: searching the period between 0.1-day and 1-day
periods,power = star.periodogram(p_min=0.1,p_max=1.0)
# plot
plot_periodogram(periods,power)
###Output
_____no_output_____
###Markdown
More advanced methodBy default, ```periodogram``` function uses 5-term Fourier model, for which a linear algebra-based faster method is available. For instance, the basic method shown above is equivalent to calling ```periodogram(p_min=0.1, p_max=1.0, method='fast', model='Fourier', Nterms=5)```.Users can change the template model based on the expected shape of light curve. Another pre-implemented function is Gaussian Mixture Model (GMM), which can be specified by changing the ```model``` argument:```periodogram(p_min=0.1, p_max=1.0, method='custom', model='Gaussian', Nterms=5)```.Since GMM is integrated with Super-Gaussian function in PIPS, users gan give another argument ```p```, which changes the power parameter in Super-Gaussian.Note the change in ```method``` argument as well: while we implemented Gaussian fitting in log-linear form (```method='fast'```), the resulting fit is often erraneous and thus linear regression (```method='custom'```) is preferred for Gaussian model. More discussion on this topic can be found in our paper.We internally use ```scipy.optimize.curve_fit()``` for linear regression. Since this is significantly slower than linear-algebra method, we recommend users to try finding the optimal maximum iteration by changing ```maxfev``` argument as shown below.
###Code
# periodogram test w/ Gaussian
periods,power = star.periodogram(p_min=0.1,p_max=1, ## period search range
method='custom',model='Gaussian', ## model selection
Nterms=1, p=1, ## arguments for the model
maxfev=100 ## max iteration in linear regression
)
# plot
plot_periodogram(periods,power)
###Output
_____no_output_____
###Markdown
Using your own custom functionUsers can use any custom function as a model to be used in periodogram.The model must be accompanied by initial-guess generator (```p0_func```), both of which needs to take speficic format in the argument.See the function docstrings below.
###Code
### define custom functions
from numba import njit
@njit
def polynomial(x,period,params,arg1,arg2=2):
'''
An example custom function (model).
Any custom function must take the arguments (x,period,params),
and users can add as many fixed (not fitted) arguments as needed.
In this function, users can define the exponent in the polynomial
by providing arg1 and arg2.
'''
mod = np.remainder(x,period)
return params[0] + params[1]*(mod-params[3])**arg1 + params[2]*(mod-params[4])**arg2
def poly_p0(x,y,yerr,period,**kwargs):
'''
An example of initial-guess generator (p0_func).
Any p0_func must take the argments (x,y,yerr,period,**kwargs).
The output array or list must be in the same shape as "params" in the model functon.
'''
return [np.mean(y),1,1,period/2,period/2]
### generate periodogram with the custom function
periods,power = star.periodogram(
p_min=0.1, p_max=1, ## period search between 0.1 to 1 day
method ='custom', ## for any custom function this argument needs to be given
model = polynomial, ## now you can pass the function itself!
p0_func = poly_p0, ## initial-guess generator function must be given as well
arg1 = 1, ## users MUST specify arguments if not default is speficied
arg2 = 4, ## since arg2=2 is speficified by default, this is optional
maxfev = 100 ## start with small maxfev and increase later
)
# plot
plot_periodogram(periods,power)
###Output
_____no_output_____
###Markdown
Period detectionPeriod detection function utilizes ```periodogram()``` and automatically detects the peak. The periodogram is then refined near the detected peak for accurate period detection. This is followed by linear regression to estimate the uncertainty of detected period.A few things to note:- ```photdata.get_period()``` function by default uses 5-term Fourier model.- Users can simply run the function without any arguments to search period between 0.1-4.0 (days).- Detected period and period error is stored in ```photdata.period``` and ```photdata.period_err```.- This function also returns period and period error. Basic method
###Code
star.get_period(); # no argument -> 5-term Fourier, searches period between 0.1-4 day
print(star.period, star.period_err)
period,period_err = star.get_period(p_min=0.1,p_max=1,debug=True) # debug option enables the progress printing
print(period,period_err)
###Output
0.000s --- starting the process...
0.000s --- preparing data...
0.000s --- getting a periodogram...
0.509s --- detecting top 5 peaks...
0.510s --- preparing for finer sampling near peaks...
0.511s --- performing finer sampling near peaks...
0.916s --- period candidate: 0.6968767193610299
0.930s --- detecting aliasing...
0.930s --- alias factor: 1
0.931s --- period candidate: 0.6968767193610299
0.932s --- estimating the uncertainty...
0.947s --- period candidate: 0.6968767193610299
0.947s --- period fitted*: 0.6968786839335414
0.947s --- period error: 2.2667570909410562e-05
0.947s --- refining samples...
0.948s --- refining search width = 6.588e-04
1.315s --- period candidate: 0.6968899220719549
1.316s --- period fitted*: 0.6968946264691298
1.316s --- period error: 2.285551532900411e-05
1.316s --- * validating period error...
1.316s --- * fitted period - peak period = 4.70e-06
1.316s --- * expected deviation size = 2.29e-05
1.316s --- * period error validated
1.316s --- period = 0.696890 +- 0.000023d
1.316s --- process completed.
0.6968899220719549 2.285551532900411e-05
###Markdown
Advanced methodSince ```get_period()``` internally calls ```periodogram()``` function, any arguments that change the setting for ```periodogram()``` can be applied. For example, users can change the model:
###Code
star.get_period(p_min=0.1,p_max=1.0,method='custom',model='Gaussian')
###Output
_____no_output_____
###Markdown
Similarly, any custom model can be implemented:
###Code
star.get_period(p_min=0.1, p_max=1.0,
method='custom',
model=polynomial,
p0_func=poly_p0,
arg1 = 1,
arg2 = 4,
multiprocessing=False)
###Output
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
###Markdown
VisualizationPIPS provides a tool for easy plotting with ```plot_lc()```. This function automatically uses the most recently updated period value in ```photdata``` and returns phase-folded data. There is also an easy way to overplot the best-fit model at the period using ```get_bestfit_curve()``` function. Like many other functions in ```photdata```, users can specify the model and other parameters.In addition, ```get_epoch_offset()``` returns the time of maxima offset in phase-folded data (in units of original x-axis: not normalized to unitless phase) and enables easy offsetting / visualization of epoch.
###Code
# detect period
star.get_period()
# phase-folded plot
star.plot_lc() # plots (x%period, y) scatter: normalized to phase
x_th,y_th = star.get_bestfit_curve()
epoch_offset = star.get_epoch_offset() # the epoch offset in the unit of [days] (not normalized to phase)
# plot
plt.plot(x_th/star.period,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.plot(x_th/star.period+1,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.axvline(epoch_offset/star.period,color='red')
plt.axvline(epoch_offset/star.period+1,color='red')
# get period with Gaussian model
p_Gaussian,p_err_Gaussian = star.get_period(p_min=0.1,p_max=1.0,method='custom',model='Gaussian')
# auto plot at specified period
star.plot_lc(period=p_Gaussian)
x_th,y_th = star.get_bestfit_curve(period=p_Gaussian,model='Gaussian',Nterms=1,p=1,maxfev=1000000)
# plot
plt.plot(x_th/star.period,y_th,c='yellowgreen',lw=3,alpha=0.7)
plt.plot(x_th/star.period+1,y_th,c='yellowgreen',lw=3,alpha=0.7)
###Output
_____no_output_____
###Markdown
Multi-period detectionWhen the object is expected to have more than one period (e.g., double-mode pulsator or variable binaries), the light curve can be a superposition of periodic variation at two or more periods. PIPS can automatically generate the amplitude spectrum of multi-periodic objects.When ```get_period_multi``` is called, it returns the detected period and amplitude of top ```N``` periods. ```amplitude_spectrum``` internally calls it and generates the amplitude spectrum. It should be noted that, however, PIPS forces the detection, even if the signal is just one of the tallest spikes in the background noise and not the real period.
###Code
# multi-period detection
period,spectrum = star.amplitude_spectrum(p_min=0.1,p_max=0.9,N=10,multiprocessing=False)
plt.figure(figsize=(10,3))
plt.plot(period,spectrum)
plt.xlim(0.1,0.9)
plt.xlabel('period (d)')
plt.ylabel('amplitude (mag)')
plt.show()
###Output
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
warning: provided uncertainty may not be accurate. Try increasing sampling size (N_peak_test, default 500) and/or turn on the force_refine option.
warning: error size infinity: replacing with periodogram peak width
###Markdown
Tutorial IntroductionThis draft of the modelling framework has a code-name: `cm4twc` for Community Model for the Terrestrial Water Cycle.
###Code
import cm4twc
import tests
print(cm4twc.__version__)
###Output
0.0.1
###Markdown
Core concepts structuring the framework `TimeDomain` classThis class characterises the time dimension of a `Component`.
###Code
from datetime import datetime
timedomain = cm4twc.TimeDomain.from_datetime_sequence(
datetimes=(datetime(2019, 1, 1, 9, 0, 0),
datetime(2019, 1, 2, 9, 0, 0),
datetime(2019, 1, 3, 9, 0, 0))
)
print(timedomain)
###Output
TimeDomain(
time (3,): [2019-01-01 09:00:00, 2019-01-02 09:00:00, 2019-01-03 09:00:00] gregorian
bounds (3, 2): [[2019-01-01 09:00:00, ..., 2019-01-04 09:00:00]] gregorian
calendar: gregorian
units: seconds since 1970-01-01 00:00:00Z
timedelta: 1 day, 0:00:00
)
###Markdown
`SpaceDomain` classThis class characterises the space dimensions of a `Component`. It is intended as an umbrella class from which to subclass. A first subclass available is the `Grid`, itself discretised into `LatLonGrid` and `RotatedLatLonGrid`.
###Code
spacedomain = cm4twc.RotatedLatLonGrid(
grid_latitude=[2.2, 1.76, 1.32, 0.88, 0.44, 0., -0.44, -0.88, -1.32, -1.76],
grid_longitude=[-4.7, -4.26, -3.82, -3.38, -2.94, -2.5, -2.06, -1.62, -1.18],
grid_latitude_bounds=[[2.42, 1.98], [1.98, 1.54], [1.54, 1.1], [1.1, 0.66],
[0.66, 0.22], [0.22, -0.22], [-0.22, -0.66],
[-0.66, -1.1], [-1.1, -1.54], [-1.54, -1.98]],
grid_longitude_bounds=[[-4.92, -4.48], [-4.48, -4.04], [-4.04, -3.6],
[-3.6, -3.16], [-3.16, -2.72], [-2.72, -2.28],
[-2.28, -1.84], [-1.84, -1.4], [-1.4, -0.96]],
altitude=1.5, altitude_bounds=[1.0, 2.0],
earth_radius=6371007., grid_north_pole_latitude=38.0,
grid_north_pole_longitude=190.0
)
print(spacedomain)
spacedomain2 = cm4twc.LatLonGrid.from_extent_and_resolution(
latitude_extent=(30, 70),
latitude_resolution=5,
longitude_extent=(0, 90),
longitude_resolution=10
)
print(spacedomain2)
###Output
LatLonGrid(
shape {Y, X}: (8, 9)
Y, latitude (8,): [32.5, ..., 67.5] degrees_north
X, longitude (9,): [5.0, ..., 85.0] degrees_east
Y_bounds (8, 2): [[30.0, ..., 70.0]] degrees_north
X_bounds (9, 2): [[0.0, ..., 90.0]] degrees_east
)
###Markdown
`DataSet` classThis class exists to host all of the data required to run a `Component` of `Model` . It is a dictionary-like object that stores references to `cf.Field` instances.
###Code
dataset = cm4twc.DataSet(
files=['in/dummy_driving_data_1day.nc', 'in/dummy_ancillary_data.nc'],
name_mapping={
'rainfall_flux': 'rainfall',
'snowfall_flux': 'snowfall',
'air_temperature': 'air_temperature',
'soil_temperature': 'soil_temperature'
}
)
print(dataset)
###Output
DataSet{
air_temperature: <CF Field: air_temperature(time(6), atmosphere_hybrid_height_coordinate(1), grid_latitude(10), grid_longitude(9)) K>
rainfall: <CF Field: rainfall_flux(time(6), atmosphere_hybrid_height_coordinate(1), grid_latitude(10), grid_longitude(9)) kg m-2 s-1>
snowfall: <CF Field: snowfall_flux(time(6), atmosphere_hybrid_height_coordinate(1), grid_latitude(10), grid_longitude(9)) kg m-2 s-1>
soil_temperature: <CF Field: soil_temperature(time(6), atmosphere_hybrid_height_coordinate(1), grid_latitude(10), grid_longitude(9)) K>
vegetation_fraction: <CF Field: vegetation_fraction(atmosphere_hybrid_height_coordinate(1), grid_latitude(10), grid_longitude(9)) 1>
}
###Markdown
`Component` classThis class is an umbrella class which is subclassed into three distinct classes for surface, sub-surface, and open water parts of the water cycle: `SurfaceLayerComponent`, `SubSurfaceComponent`, and `OpenWaterComponent` respectively.
###Code
component = tests.dummy_components.surfacelayer.Dummy(
timedomain=timedomain,
spacedomain=spacedomain,
dataset=dataset,
parameters={}
)
print(component)
###Output
Dummy(
category: surfacelayer
inwards:
soil_water_stress [1]
outwards:
throughfall [kg m-2 s-1]
snowmelt [kg m-2 s-1]
transpiration [kg m-2 s-1]
evaporation_soil_surface [kg m-2 s-1]
evaporation_ponded_water [kg m-2 s-1]
evaporation_openwater [kg m-2 s-1]
driving data:
rainfall [kg m-2 s-1]
snowfall [kg m-2 s-1]
air_temperature [K]
ancillary data:
vegetation_fraction [1]
states:
canopy [kg m-2]
snowpack [kg m-2]
solver history: 1
)
###Markdown
`Model` classThis class constitutes the actual modelling framework, and it needs to be instantiated with three `Component` instances, one for each of the three `Component`s of the water cycle.
###Code
model = cm4twc.Model(
surfacelayer=tests.dummy_components.surfacelayer.Dummy(
timedomain=timedomain,
spacedomain=spacedomain,
dataset=dataset,
parameters={}
),
subsurface=tests.dummy_components.subsurface.Dummy(
timedomain=timedomain,
spacedomain=spacedomain,
dataset=dataset,
parameters={'saturated_hydraulic_conductivity': 2}
),
openwater=tests.dummy_components.openwater.Dummy(
timedomain=timedomain,
spacedomain=spacedomain,
dataset=dataset,
parameters={'residence_time': 1}
)
)
print(model)
model2 = cm4twc.Model.from_yaml('test.yaml')
print(model2)
###Output
Model(
surfacelayer: Dummy
subsurface: Dummy
openwater: Dummy
)
###Markdown
Using the frameworkThis instance of `Model` can now be used to start a spin up run and/or a simulation run.
###Code
model.spin_up(start=datetime(2019, 1, 1, 9, 0, 0),
end=datetime(2019, 1, 2, 9, 0, 0),
cycles=2)
outputs = model.simulate()
###Output
_____no_output_____
###Markdown
The sqlite-utils tutorial[sqlite-utils](https://sqlite-utils.datasette.io/en/stable/python-api.html) is a Python library (and [command-line tool](https://sqlite-utils.datasette.io/en/stable/cli.html) for quickly creating and manipulating SQLite database files.This tutorial will show you how to use the Python library to manipulate data. InstallationTo install the library, run: pip install sqlite-utilsYou can run this in a Jupyter notebook cell by executing: %pip install sqlite-utils Or use `pip install -U sqlite-utils` to ensure you have upgraded to the most recent version.
###Code
%pip install -U sqlite_utils
import sqlite_utils
###Output
_____no_output_____
###Markdown
You can use the library with a database file on disk by running: db = sqlite_utils.Database("path/to/my/database.db")In this tutorial we will use an in-memory database. This is a quick way to try out new things, though you should note that when you close the notebook the data store in the in-memory database will be lost.
###Code
db = sqlite_utils.Database(memory=True)
db
###Output
_____no_output_____
###Markdown
Creating a tableWe are going to create a new table in our database called `creatures` by passing in a Python list of dictionaries.`db[name_of_table]` will access a database table object with that name.Inserting data into that table will create it if it does not already exist.
###Code
db["creatures"].insert_all([{
"name": "Cleo",
"species": "dog",
"age": 6
}, {
"name": "Lila",
"species": "chicken",
"age": 0.8,
}, {
"name": "Bants",
"species": "chicken",
"age": 0.8,
}])
###Output
_____no_output_____
###Markdown
Let's grab a `table` reference to the new creatures table:
###Code
table = db["creatures"]
###Output
_____no_output_____
###Markdown
`sqlite-utils` automatically creates a table schema that matches the keys and data types of the dictionaries that were passed to `.insert_all()`.We can see that schema using `table.schema`:
###Code
print(table.schema)
###Output
CREATE TABLE [creatures] (
[name] TEXT,
[species] TEXT,
[age] FLOAT
)
###Markdown
Accessing dataThe `table.rows` property lets us loop through the rows in the table, returning each one as a Python dictionary:
###Code
for row in table.rows:
print(row)
###Output
{'name': 'Cleo', 'species': 'dog', 'age': 6.0}
{'name': 'Lila', 'species': 'chicken', 'age': 0.8}
{'name': 'Bants', 'species': 'chicken', 'age': 0.8}
###Markdown
The `db.query(sql)` method can be used to execute SQL queries and return the results as dictionaries:
###Code
list(db.query("select * from creatures"))
###Output
_____no_output_____
###Markdown
Or in a loop:
###Code
for row in db.query("select name, species from creatures"):
print(f'{row["name"]} is a {row["species"]}')
###Output
Cleo is a dog
Lila is a chicken
Bants is a chicken
###Markdown
SQL parametersYou can run a parameterized query using `?` as placeholders and passing a list of variables. The variables you pass will be correctly quoted, protecting your code from SQL injection vulnerabilities.
###Code
list(db.query("select * from creatures where age > ?", [1.0]))
###Output
_____no_output_____
###Markdown
As an alternative to question marks we can use `:name` parameters and feed in the values using a dictionary:
###Code
list(db.query("select * from creatures where species = :species", {"species": "chicken"}))
###Output
_____no_output_____
###Markdown
Primary keysWhen we created this table we did not specify a primary key. SQLite automatically creates a primary key called `rowid` if no other primary key is defined.We can run `select rowid, * from creatures` to see this hidden primary key:
###Code
list(db.query("select rowid, * from creatures"))
###Output
_____no_output_____
###Markdown
We can also see that using `table.pks_and_rows_where()`:
###Code
for pk, row in table.pks_and_rows_where():
print(pk, row)
###Output
1 {'rowid': 1, 'name': 'Cleo', 'species': 'dog', 'age': 6.0}
2 {'rowid': 2, 'name': 'Lila', 'species': 'chicken', 'age': 0.8}
3 {'rowid': 3, 'name': 'Bants', 'species': 'chicken', 'age': 0.8}
###Markdown
Let's recreate the table with our own primary key, which we will call `id`.`table.drop()` drops the table:
###Code
table.drop()
table
###Output
_____no_output_____
###Markdown
We can see a list of tables in the database using `db.tables`:
###Code
db.tables
###Output
_____no_output_____
###Markdown
We'll create the table again, this time with an `id` column.We use `pk="id"` to specify that the `id` column should be treated as the primary key for the table:
###Code
db["creatures"].insert_all([{
"id": 1,
"name": "Cleo",
"species": "dog",
"age": 6
}, {
"id": 2,
"name": "Lila",
"species": "chicken",
"age": 0.8,
}, {
"id": 3,
"name": "Bants",
"species": "chicken",
"age": 0.8,
}], pk="id")
print(table.schema)
###Output
CREATE TABLE [creatures] (
[id] INTEGER PRIMARY KEY,
[name] TEXT,
[species] TEXT,
[age] FLOAT
)
###Markdown
Inserting more recordsWe can call `.insert_all()` again to insert more records. Let's add two more chickens.
###Code
table.insert_all([{
"id": 4,
"name": "Azi",
"species": "chicken",
"age": 0.8,
}, {
"id": 5,
"name": "Snowy",
"species": "chicken",
"age": 0.9,
}], pk="id")
list(table.rows)
###Output
_____no_output_____
###Markdown
Since the `id` column is an integer primary key, we can insert a record without specifying an ID and one will be automatically added.Since we are only adding one record we will use `.insert()` instead of `.insert_all()`.
###Code
table.insert({"name": "Blue", "species": "chicken", "age": 0.9})
###Output
_____no_output_____
###Markdown
We can use `table.last_pk` to see the ID of the record we just added.
###Code
table.last_pk
###Output
_____no_output_____
###Markdown
Here's the full list of rows again:
###Code
list(table.rows)
###Output
_____no_output_____
###Markdown
If you try to add a new record with an existing ID, you will get an `IntegrityError`:
###Code
table.insert({"id": 6, "name": "Red", "species": "chicken", "age": 0.9})
###Output
_____no_output_____
###Markdown
You can use `replace=True` to replace the matching record with a new one:
###Code
table.insert({"id": 6, "name": "Red", "species": "chicken", "age": 0.9}, replace=True)
list(table.rows)
###Output
_____no_output_____
###Markdown
Updating a recordWe will rename that row back to `Blue`, this time using the `table.update(pk, updates)` method:
###Code
table.update(6, {"name": "Blue"})
list(db.query("select * from creatures where id = ?", [6]))
###Output
_____no_output_____
###Markdown
Extracting one of the columns into another tableOur current table has a `species` column with a string in it - let's pull that out into a separate table.We can do that using the [table.extract() method](https://sqlite-utils.datasette.io/en/stable/python-api.htmlextracting-columns-into-a-separate-table).
###Code
table.extract("species")
###Output
_____no_output_____
###Markdown
We now have a new table called `species`, which we can see using the `db.tables` method:
###Code
db.tables
###Output
_____no_output_____
###Markdown
Our creatures table has been modified - instead of a `species` column it now has `species_id` which is a foreign key to the new table:
###Code
print(db["creatures"].schema)
print(list(db["creatures"].rows))
###Output
CREATE TABLE "creatures" (
[id] INTEGER PRIMARY KEY,
[name] TEXT,
[species_id] INTEGER,
[age] FLOAT,
FOREIGN KEY([species_id]) REFERENCES [species]([id])
)
[{'id': 1, 'name': 'Cleo', 'species_id': 1, 'age': 6.0}, {'id': 2, 'name': 'Lila', 'species_id': 2, 'age': 0.8}, {'id': 3, 'name': 'Bants', 'species_id': 2, 'age': 0.8}, {'id': 4, 'name': 'Azi', 'species_id': 2, 'age': 0.8}, {'id': 5, 'name': 'Snowy', 'species_id': 2, 'age': 0.9}, {'id': 6, 'name': 'Blue', 'species_id': 2, 'age': 0.9}]
###Markdown
The new `species` table has been created and populated too:
###Code
print(db["species"].schema)
print(list(db["species"].rows))
###Output
CREATE TABLE [species] (
[id] INTEGER PRIMARY KEY,
[species] TEXT
)
[{'id': 1, 'species': 'dog'}, {'id': 2, 'species': 'chicken'}]
###Markdown
We can use a join SQL query to combine data from these two tables:
###Code
list(db.query("""
select
creatures.id,
creatures.name,
creatures.age,
species.id as species_id,
species.species
from creatures
join species on creatures.species_id = species.id
"""))
###Output
_____no_output_____
###Markdown
Tutorial This tutorial shows the basic steps of using CyMorph to extract metrics from an image. First we need to load a `fit` image.
###Code
# additional libraries for data loading and manipulation
import astropy.io.fits as fits
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [10., 8.]
# image is from SDSS DR7 (objid=587725472806600764)
fits_file = fits.open('data/image.fits')
# all data arrays should be np.float32
image = np.array(fits_file[0].data, np.float32)
# show the image
m, s = np.mean(image), np.std(image)
plt.imshow(image, interpolation='nearest', cmap='gray', vmin=m-s, vmax=m+s, origin='lower')
plt.colorbar();
###Output
_____no_output_____
###Markdown
It is essential to point out that this image should be preprocessed (as it contains secondary sources):1. Cleaned - remove secondary objects2. Recenter - barycenter of the target object should match the central pixel of the image.3. A segmented mask should be applied to assign 0 value for all the pixels that do not belong to the target object. All these routines are not included in CyMorph code and are the end-users responsibility. It is dictated by the fact that each survey is different in optics, resolution, and storage routines (sky subtraction, object identification, or even cleaning). Here we will present an example of basic methods to perform these steps. All steps heavily rely on the `sep` package and its methods. Object detection
###Code
import sep
# background estimation
bkg = sep.Background(image)
# background subtraction
image_sub = image - bkg
# source detection
detection_threshold = 1.5
objects, segmented_mask = sep.extract(image_sub, detection_threshold, err=bkg.globalrms, segmentation_map=True)
###Output
_____no_output_____
###Markdown
It is important to point out that we need to pass `segmentation_map=True` to receive a segmented map (mask) from `sep.extract`.
###Code
from matplotlib.patches import Ellipse
# plot background-subtracted image
fig, ax = plt.subplots(1, 2)
m, s = np.mean(image_sub), np.std(image_sub)
ax[0].set_title('Identified objects')
im = ax[0].imshow(image_sub, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
# plot an ellipse for each object
for i in range(len(objects)):
e = Ellipse(xy=(objects['x'][i], objects['y'][i]),
width=4*objects['a'][i],
height=4*objects['b'][i],
angle=objects['theta'][i] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax[0].add_artist(e)
# plot segmented mask for comparison
ax[1].set_title('Segmented mask')
ax[1].imshow(segmented_mask, origin='lower', cmap='gray')
###Output
_____no_output_____
###Markdown
Target object identification Input image should contain the target object in the center, so to identify it, locate the object that is the nearest to the central pixel of the image. This will yield only the target object from an array of all detected objects.
###Code
dx = (objects['x'] - (len(image_sub)/2))**2
dy = (objects['y'] - (len(image_sub)/2))**2
distance = np.sqrt(dx + dy)
# this will return only target object
main_object = objects[distance == min(distance)]
# we need to find index of the target object, to identify it on the segmented mask
# each object on the segmented mask is filled with values index+1
main_object_index, = np.where(objects['x'] == main_object['x'])
main_object_index = int(main_object_index) + 1
###Output
_____no_output_____
###Markdown
Cleaning the image Several metrics require a clean image to produce the result. Secondary sources will hinder the quality of the extracted morphometry.
###Code
# clean_mask is different from segmented mask, is a sense that all pixels have values equal to 1
# and secondary sources have values as 0
clean_mask = np.copy(segmented_mask)
# assign 0 to main object pixels
clean_mask[clean_mask==main_object_index] = 0
# everything that is not 0 (only pixels that belong to secondary objects) will receive 1
clean_mask[clean_mask!=0] = 1
# invert the matrix
clean_mask=1-clean_mask
# apply clean mask on image
clean_image = image * clean_mask
# plot background-subtracted image
fig, ax = plt.subplots(1, 3)
m, s = np.mean(image_sub), np.std(image_sub)
ax[0].set_title('Identified objects')
im = ax[0].imshow(image_sub, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
# plot an ellipse for each object
for i in range(len(objects)):
e = Ellipse(xy=(objects['x'][i], objects['y'][i]),
width=4*objects['a'][i],
height=4*objects['b'][i],
angle=objects['theta'][i] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax[0].add_artist(e)
ax[1].set_title('Clean mask')
ax[1].imshow(clean_mask, origin='lower', cmap='gray')
ax[2].set_title('Clean image')
m, s = np.mean(clean_image), np.std(clean_image)
ax[2].imshow(clean_image, origin='lower', cmap='gray',vmin=m-s, vmax=m+s)
###Output
_____no_output_____
###Markdown
To maintain coherence with the rest of the background, it is necessary to fill these patches with matching pixel values. `sep` provides `bkg.globalback` and `bkg.globalrms`. That is a "global" mean and noise of the image background respectively.
###Code
# getting all the pixels belonging to secondary objects (they are all equal to 0)
x,y = np.where(clean_image==0)
# applying random values drawn from normal distribution
mu, sigma = bkg.globalback, bkg.globalrms
for key,value in enumerate(x):
clean_image[x[key],y[key]] = np.random.normal(mu, sigma, 1)
###Output
_____no_output_____
###Markdown
This will produce images with masked secondary objects and minimize the influence of the pixel variation for the metrics extraction.
###Code
# plot background-subtracted image
fig, ax = plt.subplots(1, 2)
m, s = np.mean(image_sub), np.std(image_sub)
ax[0].set_title('Input image with secondary objects')
im = ax[0].imshow(image_sub, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
# plot an ellipse for each object
for i in range(len(objects)):
e = Ellipse(xy=(objects['x'][i], objects['y'][i]),
width=4*objects['a'][i],
height=4*objects['b'][i],
angle=objects['theta'][i] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax[0].add_artist(e)
m, s = np.mean(clean_image), np.std(clean_image)
ax[1].set_title('Clean image')
im = ax[1].imshow(clean_image, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
###Output
_____no_output_____
###Markdown
Image recentering Multiple metrics rely on two caracteristics of the image: - Image should have odd shape - The central pixel should match the central pixel of the target object
###Code
# getting image center
from math import floor
height, width = clean_image.shape
center_x, center_y = floor(height/2), floor(width/2)
print(f'Height = {height}, Width = {width}')
print(f'Center x = {center_x}, Center y = {center_y}')
# getting target object center
object_center_x, object_center_y = round(main_object['x'].item()), round(main_object['y'].item())
print(f'Object center x = {object_center_x}, Object center y = {object_center_y}')
# comparing image center and object center
center_x == object_center_x and center_y == object_center_y
###Output
_____no_output_____
###Markdown
In this case, the image center matches the central pixel of the object, so no additional actions are necessary. Segmenting image (isolating target object) The difference between `clean_image` and `segented_image` consists in the fact that `clean_image` contains flux counts in all pixels around the target object. In the case of `segented_image`, all pixels that do not belong to the target object will be set to 0.
###Code
# assign 0 to all pixels that do not belong to target object
segmented_mask[segmented_mask!=main_object_index] = 0
# applying mask on the input image
segmented_image = image * segmented_mask
fig, ax = plt.subplots(1, 3)
m, s = np.mean(image_sub), np.std(image_sub)
ax[0].set_title('Identified objects')
im = ax[0].imshow(image_sub, interpolation='nearest', cmap='gray',
vmin=m-s, vmax=m+s, origin='lower')
# plot an ellipse for each object
for i in range(len(objects)):
e = Ellipse(xy=(objects['x'][i], objects['y'][i]),
width=4*objects['a'][i],
height=4*objects['b'][i],
angle=objects['theta'][i] * 180. / np.pi)
e.set_facecolor('none')
e.set_edgecolor('red')
ax[0].add_artist(e)
ax[1].set_title('Segmented mask')
ax[1].imshow(segmented_mask, origin='lower',cmap='gray')
ax[2].set_title('Segmented image')
m, s = np.mean(image_sub), np.std(image_sub)
ax[2].imshow(segmented_image, interpolation='nearest', cmap='gray',
origin='lower')
# converting all the necessary images to np.float32
clean_image = np.array(clean_image, dtype=np.float32)
segmented_image = np.array(segmented_image, dtype=np.float32)
segmented_mask = np.array(segmented_mask, dtype=np.float32)
###Output
_____no_output_____
###Markdown
Exatracting metrics Basic showcase of extracting metrics. For additional details see corresponding sections. Concentration (C)
###Code
# importing class
from cymorph.concentration import Concentration
# values for two radii of the flux concentration
radius1 = 0.8
radius2 = 0.2
# create concentration object
c = Concentration(clean_image, radius1, radius2)
# retrieve the concentration value
print(f'Concentration metric = {c.get_concentration()}')
###Output
Concentration metric = 0.6697492907260892
###Markdown
Asymmetry (A)
###Code
# importing class
from cymorph.asymmetry import Asymmetry
# creating object
a = Asymmetry(segmented_image)
# asymmetry contains two coeficients of correlation
print(f'Asymmetry (pearson) metric = {a.get_pearsonr()}')
print(f'Asymmetry (spearman) metric = {a.get_spearmanr()}')
###Output
Asymmetry (pearson) metric = 0.011054729880220271
Asymmetry (spearman) metric = 0.1103719041925253
###Markdown
Smoothness (S)
###Code
# importing the class
from cymorph.smoothness import Smoothness
# parameters of image smoothing
butterworth_order = 2
smoothing_degradation = 0.2
# creating the object
s = Smoothness(clean_image, segmented_mask, smoothing_degradation, butterworth_order)
# smoothness contains two coeficients of correlation
print(f'Smoothness (pearson) metric = {s.get_pearsonr()}')
print(f'Smoothness (spearman) metric = {s.get_spearmanr()}')
###Output
Smoothness (pearson) metric = 0.009534401996613262
Smoothness (spearman) metric = 0.062444547493745284
###Markdown
Entropy (H)
###Code
# importing the class
from cymorph.entropy import Entropy
# entropy bins parameters
bins = 130
# creating the object
e = Entropy(segmented_image, bins)
# retrieving the metric
print(f'Entropy metric = {e.get_entropy()}')
###Output
Entropy metric = 0.49624255299568176
###Markdown
Gradient Pattern Analysis (G2)
###Code
# importing the class
from cymorph.g2 import G2
# setting the tolerances
g2_modular_tolerance = 0.03
g2_phase_tolerance = 0.02
# creating g2 object
g2 = G2(segmented_image, g2_modular_tolerance, g2_phase_tolerance)
# retriving the metric
print(f'G2 metric = {g2.get_g2()}')
###Output
G2 metric = 0.5578498244285583
###Markdown
[](https://colab.research.google.com/github/emorynlp/elit/blob/dev/docs/tutorial.ipynb)ELIT can be installed using pip, though it's not officially on PyPi yet.
###Code
!pip install -U git+https://github.com/emorynlp/elit.git@dev
###Output
_____no_output_____
###Markdown
The common workflow for ELIT is to load a model then call it as a function. Models in ELIT are represented as string typed indentifiers which are grouped by tasks. For example, let's list all the models in ELIT.
###Code
import elit
elit.pretrained.ALL
###Output
_____no_output_____
###Markdown
List all the MultiTaskLearning models:
###Code
elit.pretrained.mtl.ALL
###Output
_____no_output_____
###Markdown
ELIT offers several models for the same task with different settings. For example, the `LEM_POS_NER_DEP_SDP_CON_AMR_ROBERTA_BASE_EN` model is finetuned with RoBERTa-base. Let's load it and see what it can do.
###Code
mtl = elit.load(elit.pretrained.mtl.LEM_POS_NER_DEP_SDP_CON_AMR_ROBERTA_BASE_EN)
###Output
_____no_output_____
###Markdown
Once you call `load` on a model, ELIT will download it and load it into the main memory or the GPU if you have one. The loaded model behaves just like a function which you can pass in a list of tokenized sentences as arguments and get the annotations as the returned value.
###Code
doc = mtl([
["Emory", "NLP", "is", "a", "research", "lab", "in", "Atlanta", "."],
["It", "is", "founded", "by", "Jinho", "D.", "Choi", "in", "2014", ".", "Dr.", "Choi", "is", "a", "professor", "at", "Emory", "University", "."]
])
doc
###Output
_____no_output_____
###Markdown
As you can see, the returned `doc` is a Python dict storing outputs from different models. Refer to our GitHub docs for its format and guidelines.
###Code
As you can see, the returned `doc` is a Python dict storing outputs from different models. Refer to our GitHub docs for its format and guidelines.
###Output
_____no_output_____
###Markdown
The common workflow for ELIT is to load a model then call it as a function. Models in ELIT are represented as string typed indentifiers which are grouped by tasks. For example, let's list all the models in ELIT.
###Code
import elit
elit.pretrained.ALL
###Output
_____no_output_____
###Markdown
List all the MultiTaskLearning models:
###Code
elit.pretrained.mtl.ALL
###Output
_____no_output_____
###Markdown
ELIT offers several models for the same task with different settings. For example, the `LEM_POS_NER_DEP_SDP_CON_AMR_ROBERTA_BASE_EN` model is finetuned with RoBERTa-base. Let's load it and see what it can do.
###Code
mtl = elit.load(elit.pretrained.mtl.LEM_POS_NER_DEP_SDP_CON_AMR_ROBERTA_BASE_EN)
###Output
_____no_output_____
###Markdown
Once you call `load` on a model, ELIT will download it and load it into the main memory or the GPU if you have one. The loaded model behaves just like a function which you can pass in a list of tokenized sentences as arguments and get the annotations as the returned value.
###Code
doc = mtl([
["Emory", "NLP", "is", "a", "research", "lab", "in", "Atlanta", "."],
["It", "is", "founded", "by", "Jinho", "D.", "Choi", "in", "2014", ".", "Dr.", "Choi", "is", "a", "professor", "at", "Emory", "University", "."]
])
doc
###Output
_____no_output_____
###Markdown
.. image:: ../artwork/logo.png :width: 300px :align: right.. _tutorial:Tutorial========Imagine that you have a workflow made up of three tasks "A", "B", and "C", and the tasks must always be perfomed in the right order, because task "C" depends on the output of task "A", and also depends on the output of task "B". Further, imagine that the individual tasks are time-consuming, so that you don't want to execute a task unless it's really necessary: if something has changed that only affects task "C", and tasks "A" and "B" have already been completed, then you should only need to redo task "C". Over time, keeping track of which tasks need to be executed can become extremely complex as your workflow grows, branches, and merges.Graphcat is a tool that allows you to explicitly capture a workflow in a *computational graph*, managing the details of executing each task in the proper order and at the proper time, no matter the state of the tasks or the complexity of the workflow. Graphcat doesn't care what kind of data your graph manages, doesn't dictate how you name the entities in the graph, provides advanced functionality like expression-based tasks, and is easy to learn.Intrigued? Let's look at some code!
###Code
## The Basics
###Output
_____no_output_____
###Markdown
First, we import :mod:`graphcat`, which includes all of the functionality for managing computational graphs. If you're using Graphcat in your scripts, this will likely be all you need. For this tutorial we also import :mod:`graphcat.notebook`, so we can see the state of our graphs as we work.
###Code
import graphcat
import graphcat.notebook
###Output
_____no_output_____
###Markdown
Next, let's reproduce the example workflow from above, starting with an (initially empty) computational graph:
###Code
graph = graphcat.StaticGraph()
###Output
_____no_output_____
###Markdown
Next, we will add tasks to the graph, identified using unique string names:
###Code
graph.add_task("A")
graph.add_task("B")
graph.add_task("C")
###Output
_____no_output_____
###Markdown
Note that a task name can be any hashable object, not just a string - we used strings in this case because they map well to our particular problem.Now, we can define the links that determine which tasks depend on previous tasks:
###Code
graph.add_links(source="A", targets="C")
graph.add_links(source="B", targets="C")
###Output
_____no_output_____
###Markdown
There are two ways to think about links. One way is to picture data "flowing" through the links from the source tasks to the target tasks, which is why we sometimes call the sources "upstream" and the targets "downstream". Alternatively, you can say that the target of a link "depends on" the source - anytime the source changes, the target needs to change, along with all of *its* targets, and-so-on. Both viewpoints are completely valid, and you will find that both are useful, depending on the context.Finally, because a picture is worth $1\times10^3$ words, let's see what the graph looks like so far:
###Code
graphcat.notebook.display(graph)
###Output
_____no_output_____
###Markdown
Notice that each task is drawn as a box, labelled with the task name, and the links are drawn as arrows that point from sources to targets, i.e. the arrows point in the direction of data flow. Of course, all we've done so far is define how our tasks relate to one another - we haven't actually executed any of them. Before we do so, let's introduce some logging so we can see what Graphcat is doing under the hood. We'll import the standard Python :mod:`logging` module and configure it to log informational messages. Then, we create a special :class:`graphcat.Logger` object that will watch the computational graph and log events as they happen:
###Code
import logging
logging.basicConfig(level=logging.INFO)
logger = graphcat.Logger(graph)
###Output
_____no_output_____
###Markdown
By default, newly-created tasks are considered *unfinished*, because they haven't been executed yet. Let's finish task "A" by *updating* it:
###Code
graph.update("A")
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task A executing. Inputs: {}
INFO:graphcat.common:Task A finished. Output: None
###Markdown
The call to :meth:`update` *executes* the unfinished task, which we see in the second line of logging; once the task has been executed, the third line in the log shows that its state is now *finished* (ignore the "Inputs: ..." and "Output: ..." text in the log, we will explain their meaning shortly). Note that in our visualization task "A" is now rendered with a black background to show that the task is finished.Continuing on, let's update task "C" and see what happens:
###Code
graph.update("C")
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task B executing. Inputs: {}
INFO:graphcat.common:Task B finished. Output: None
INFO:graphcat.common:Task C updating.
INFO:graphcat.common:Task C executing. Inputs: {None: None, None: None}
INFO:graphcat.common:Task C finished. Output: None
###Markdown
Looking closely at the log, we see that Task "C" is executed, but only *after* Task "B". Task "A" isn't executed, because it was already finished before :meth:`update` was called. Note that this conforms to our original goals for our workflow: tasks "A" and "B" must be completed before task "C", and we never re-execute tasks that are already finished.To reinforce this point, let's look at what happens if a task becomes *unfinished* again. Imagine that some outside change has made the results of task "A" obsolete. We can notify Graphcat that this has happened using :meth:`mark_unfinished`:
###Code
graph.mark_unfinished("A")
graphcat.notebook.display(graph)
###Output
_____no_output_____
###Markdown
Notice that both "A" and "C" have become unfinished: because "A" is unfinished and "C" depends on "A", "C" becomes unfinished too. "B" is unaffected because it doesn't depend on "A". Let's update "C" again:
###Code
graph.update("C")
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task A executing. Inputs: {}
INFO:graphcat.common:Task A finished. Output: None
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task C updating.
INFO:graphcat.common:Task C executing. Inputs: {None: None, None: None}
INFO:graphcat.common:Task C finished. Output: None
###Markdown
This time "C" is executed, but only after "A". As expected, "B" isn't executed because it was already finished.Hopefully, we've convinced you that Graphcat always knows which tasks to execute, and in what order. This is true no matter how complex your computational graph becomes. In the next section, we will explore how to configure the graph to perform real work. Task FunctionsIn the previous section, we learned how to represent our workflow using tasks and links, but the tasks themselves didn't actually do anything when executed. To rectify this, we will assign *task functions* that define what a task does when executed. A task function is simply a Python function (technically: a Python *callable*) that is called when a task is executed, returning a value that is stored as the *output* for the task. When downstream tasks are executed, their task functions have access to the outputs from their upstream dependencies. Thus, upstream task function *outputs* become downstream task function *inputs*.Let's turn our current example into a simple calculator. Tasks "A" and "B" will have task functions that return numbers, and task "C" will return the sum of its inputs. First, we define the task functions for each task:
###Code
def task_a(graph, name, inputs):
return 2
def task_b(graph, name, inputs):
return 3
def add(graph, name, inputs):
return sum([value() for value in inputs.values()])
###Output
_____no_output_____
###Markdown
Note that every task function must accept three keyword arguments: `graph`, `name` and `inputs`. The `graph` argument is the graph that this task is a part of; `name` is the name of the task being executed, and is useful for logging or changing the function's behavior based on the task's identity; `inputs` is an object that behaves like a Python dict and contains the outputs from upstream tasks.Don't worry too much about how `add()` is implemented, we'll discuss that in detail in a bit. Let's assign our task functions to each task in the graph:
###Code
graph.set_task("A", task_a)
graph.set_task("B", task_b)
graph.set_task("C", add)
graphcat.notebook.display(graph)
###Output
_____no_output_____
###Markdown
Notice that changing the task functions with :meth:`set_task` also marks the tasks as unfinished. This is an example of how Graphcat always ensures that changes to the graph will propagate to its results. Let's update the graph and see what happens:
###Code
graph.update("C")
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task A executing. Inputs: {}
INFO:graphcat.common:Task A finished. Output: 2
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task B executing. Inputs: {}
INFO:graphcat.common:Task B finished. Output: 3
INFO:graphcat.common:Task C updating.
INFO:graphcat.common:Task C executing. Inputs: {None: 2, None: 3}
INFO:graphcat.common:Task C finished. Output: 5
###Markdown
Now, the full meaning of the log messages should be clearer - tasks "A" and "B" have no inputs when they execute, returning the values `2` and `3` respectively as their outputs. Those outputs become the inputs to "C" when it executes, where they are summed, so that the output of "C" is `5`, as expected.Of course, you normally want to retrieve the outputs from your graph so you can do something with them. So far, all we've seen are log messages. To retrieve the most recent output for a task, use :meth:`output` instead of :meth:`update`:
###Code
print("Result:", graph.output("C"))
###Output
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task C updating.
###Markdown
Note that :meth:`output` implicitly calls :meth:`update` for you, so you can simply use it whenever you need to execute your graph and retrieve an output.Now that our graph is performing a real (albeit trivial) task, let's look at some ways to simplify setting it up:First, it is extremely common for a graph to have "parameter" tasks that simply return a value, as tasks "A" and "B" do in our example. Having to create a separate function for every parameter would be perverse. Fortunately, Graphcat provides a helper function, :func:`graphcat.constant`, that you can use instead:
###Code
graph.set_task("A", graphcat.constant(4))
graph.set_task("B", graphcat.constant(5))
print("Result:", graph.output("C"))
###Output
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task A executing. Inputs: {}
INFO:graphcat.common:Task A finished. Output: 4
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task B executing. Inputs: {}
INFO:graphcat.common:Task B finished. Output: 5
INFO:graphcat.common:Task C updating.
INFO:graphcat.common:Task C executing. Inputs: {None: 4, None: 5}
INFO:graphcat.common:Task C finished. Output: 9
###Markdown
:func:`graphcat.constant` is a factory for task functions that always return a value you provide, eliminating the need to create dedicated task functions of your own for parameters. Use :func:`graphcat.constant` with :meth:`set_task` any time you need to change the parameters in your workflow, whether due to user input, changes in the environment, network traffic, or any other externality that affects your workflow outputs.Next, you may wonder why we had to call both :meth:`add_task` and :meth:`set_task` just to create a working task. In fact, we didn't - either method can create a task and assign its function in a single step:
###Code
graph.set_task("D", graphcat.constant(6))
###Output
_____no_output_____
###Markdown
The difference between :meth:`add_task` and :meth:`set_task` is that the former will fail if a task with the given name already exists, while the latter will quietly overwrite it.Let's connect our newly created task "D" to "C", and see that it integrates nicely with the rest of the computation:
###Code
graph.set_links(source="D", targets="C")
print("Result:", graph.output("C"))
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task D updating.
INFO:graphcat.common:Task D executing. Inputs: {}
INFO:graphcat.common:Task D finished. Output: 6
INFO:graphcat.common:Task C updating.
INFO:graphcat.common:Task C executing. Inputs: {None: 4, None: 5, None: 6}
INFO:graphcat.common:Task C finished. Output: 15
###Markdown
Named InputsBy now, you should have questions about the way inputs are passed to task functions. From the log message in the preceding example - `{None: 4, None: 5, None: 6}` - it's obvious that the results from "A", "B", and "D" are passed to "C" using something that looks like a dict, but what's with the key `None`, and why does it appear multiple times (something that can't happen with an actual dict)?
###Code
What's happening is that when you create a link between a source and a target, you also - implicitly or explicitly - specify a *named input* on the target. When the target task function is executed, the named inputs become the keys used to access the corresponding values. This makes it possible for task functions with multiple inputs to tell those inputs apart. If you don't specify a named input when you create a link, the name defaults to :any:`None`.
Let's look back at the implementation of the ``add()`` function::
def add(graph, name, inputs):
return sum([value() for value in inputs.values()])
Here, the function doesn't need to know the names of its inputs, since all it does is add them together. That is why it uses the ``values()`` method of the `inputs` object - like a normal Python dict, ``values()`` provides access to just the values, ignoring the input names. Note though, that *unlike* a Python dict, the objects returned by ``values()`` aren't the values themselves - they are callables that have to be executed to return the values - which is why the code is ``sum([value() ...`` instead of ``sum([value ...``.
Let's modify our current example to access inputs by name. Instead of adding values, we'll create a new task function that generates a familiar greeting:
###Output
_____no_output_____
###Markdown
def greeting(graph, name, inputs): return f"{inputs.getone('greeting')}, {inputs.getone('subject')}!"
###Code
Note that the `greeting()` task function uses two inputs named `"greeting"` and `"subject"`. Each call to ``inputs.getone(<name>)`` will return the value of the named input. If there isn't an input with the given name, or there's more than one, the call will fail.
Now we can setup the parameter and greeting task functions for our existing graph:
###Output
_____no_output_____
###Markdown
graph.set_task("A", graphcat.constant("Hello"))graph.set_task("B", graphcat.constant("World"))graph.set_task("C", greeting)
###Code
And we'll replace our existing links with links that connect to the named inputs required by the ``greeting()`` function (note that :meth:`set_links<graphcat.graph.Graph.set_links>` replaces all of the outgoing links for a given source, instead of :meth:`add_links<graphcat.graph.Graph.add_links>`, which adds new links):
###Output
_____no_output_____
###Markdown
graph.set_links(source="A", targets=("C", "greeting"))graph.set_links(source="B", targets=("C", "subject"))
###Code
... instead of passing just a task name as the target for :meth:`set_links<graphcat.graph.Graph.set_links>`, we pass a ``(task name, input name)`` tuple instead. Like task names, input names don't have to be strings - they can be any hashable object. Let's see the result:
###Output
_____no_output_____
###Markdown
print("Result:", graph.output("C"))graphcat.notebook.display(graph)
###Code
Note that the notebook diagram links are labelled when they're connected to inputs with names other than :any:`None`.
Now, the input dict for "C" printed to the log should make more sense - it contains all of the named inputs and corresponding upstream outputs for the task. Note that task "D" is still connected to input :any:`None`, but it's ignored by the ``greeting()`` implementation.
It should also be clear now why a name can appear more than once in a task's inputs: you can connect multiple tasks to a single input, one task to multiple inputs, or any combination of the two.
By examining the input object, a task function can implement any desired behavior, from very strict (failing unless the input contains a specific set of names, numbers, and types of values) to very permissive (adjusting functionality based on names, numbers, and types of values in the input dict), or anywhere in-between.
###Output
_____no_output_____
###Markdown
ErrorsWhat happens when things go wrong and your task function fails? Let's find out, using a special Graphcat helper function for generating task functions that throw exceptions:
###Code
graph.set_task("D", graphcat.raise_exception(RuntimeError("Whoops!")))
###Output
_____no_output_____
###Markdown
(In case you're wondering, we use this for testing and debugging)
###Code
try:
print("Result:", graph.output("C"))
except Exception as e:
print(f"Exception: {e!r}")
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task D updating.
INFO:graphcat.common:Task D executing. Inputs: {}
ERROR:graphcat.common:Task D failed. Exception: Whoops!
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task C updating.
###Markdown
As always, Graphcat ensures that task states are always consistent - when a task functions fails ("D" in this case), execution stops, the task and its dependents are marked as being in the "error" state, and the :meth:`update` or :meth:`output` methods that initiated the update re-raise the exception. This will keep happening as long as the error condition persists:
###Code
try:
print("Result:", graph.output("C"))
except Exception as e:
print(f"Exception: {e!r}")
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task D updating.
INFO:graphcat.common:Task D executing. Inputs: {}
ERROR:graphcat.common:Task D failed. Exception: Whoops!
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task C updating.
###Markdown
Once, the error is cleared-up, things will return to normal:
###Code
graph.set_task("D", graphcat.constant(42))
print("Result:", graph.output("C"))
graphcat.notebook.display(graph)
###Output
INFO:graphcat.common:Task D updating.
INFO:graphcat.common:Task D executing. Inputs: {}
INFO:graphcat.common:Task D finished. Output: 42
INFO:graphcat.common:Task A updating.
INFO:graphcat.common:Task B updating.
INFO:graphcat.common:Task C updating.
INFO:graphcat.common:Task C executing. Inputs: {None: 42, greeting: Hello, subject: World}
INFO:graphcat.common:Task C finished. Output: Hello, World!
###Markdown
TutorialThis repository contains several utility functions that enable easier analysis acrossCMIP6 model data.It offers solutions to the following problems:1. [Inconsistent naming of dimensions and coordinates](rename)2. [Inconsistent values,shape and dataset location of coordinates](coords)3. [Inconsistent longitude conventions](lon)4. [Inconsistent units](units)5. [Inconsistent longitude/latitude bounds](bounds)5. [TL;DR How to put it all together](combo)
###Code
import matplotlib.pyplot as plt
import intake
%matplotlib inline
url = "https://raw.githubusercontent.com/NCAR/intake-esm-datastore/master/catalogs/pangeo-cmip6.json"
col = intake.open_esm_datastore(url)
###Output
_____no_output_____
###Markdown
Inconsistent naming of dimensions and coordinatesAll cmip6 models (except for the unstructured grid of the AWI model) have in principal the same datastructure and **should** have a consistent naming, such that the user can test an analysis on one model and then seamlessly apply it on another. In practice some models have alternate naming for e.g. the logical (x,y,z) dimensions. `cmip6_preprocessing.preprocessing.rename_cmip6` accesses an internal dictionary to rename all models consistently to the following scheme:- `x`, `y`,`lev` for the logical grid index in the x,y,z direction- `lon`, `lat` for geographical position coordinates- `bnds`, `vertex` for cell bounds or vertex indicies- `time_bounds`, `lev_bounds`, `lon_bounds`, `lat_bounds` for cell bounding values
###Code
# load a few models to illustrate the problem
query = dict(experiment_id=['piControl'], table_id='Oyr',
variable_id='o2', grid_label=['gn', 'gr'],
source_id=['IPSL-CM6A-LR', 'CanESM5', 'GFDL-ESM4']
)
cat = col.search(**query)
cat.df['source_id'].unique()
z_kwargs = {'consolidated': True, 'decode_times':False}
dset_dict = cat.to_dataset_dict(zarr_kwargs=z_kwargs)#
# show coordinates
for k, ds in dset_dict.items():
print(k)
print(list(ds.dims))
###Output
CMIP.CCCma.CanESM5.piControl.Oyr.gn
['bnds', 'i', 'j', 'lev', 'member_id', 'time', 'vertices']
CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn
['axis_nbounds', 'member_id', 'nvertex', 'olevel', 'time', 'x', 'y']
CMIP.NOAA-GFDL.GFDL-ESM4.piControl.Oyr.gr
['bnds', 'lat', 'lev', 'lon', 'member_id', 'time']
###Markdown
You can see that e.g. the x dimension is not consistently labelled. E.g. in one model it is called `i` in the other `x`. We can fix this by passing `rename_cmip6` as `preprocess` argument to `to_dataset_dict`:
###Code
from cmip6_preprocessing.preprocessing import rename_cmip6
# load a few models to illustrate the problem
cat = col.search(**query)
cat.df['source_id'].unique()
# pass the preprocessing directly
dset_dict_renamed = cat.to_dataset_dict(zarr_kwargs=z_kwargs, preprocess=rename_cmip6)
for k, ds in dset_dict_renamed.items():
print(k)
print(list(ds.dims))
###Output
--> The keys in the returned dictionary of datasets are constructed as follows:
'activity_id.institution_id.source_id.experiment_id.table_id.grid_label'
--> There is/are 3 group(s)
CMIP.CCCma.CanESM5.piControl.Oyr.gn
['bnds', 'lev', 'member_id', 'time', 'vertex', 'x', 'y']
CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn
['bnds', 'lev', 'member_id', 'time', 'vertex', 'x', 'y']
CMIP.NOAA-GFDL.GFDL-ESM4.piControl.Oyr.gr
['bnds', 'lev', 'member_id', 'time', 'x', 'y']
###Markdown
Beautiful! They have exactly the same dimensions! > You can also always apply the utility functions after loading the data, but be aware that some models have even inconsistent namings between timesteps and ensemble members. This can cause problems with the concatenation that `intake_esm` does. Passing the function will apply it before concatenation, which works nicely above. Here is an example of how it causes problems when applied afterwards:
###Code
# IPSL data is a bit of a mess
ds = dset_dict['CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn']
ds = rename_cmip6(ds)
ds
###Output
_____no_output_____
###Markdown
~See how the data_variable `o2` has several depth variables `o2(member_id, time, olevel, y, x, lev)`~> This has recently been fixed in the pangeo google store, but does still apply if you are e.g. working with a local copy of the CMIP6 netdcf file.**I strongly recommend applying all preprocessing using the `preprocess` keyword, but it is crucial to do so with the initial renaming step** Inconsistent values,shape and dataset location of coordinatesThe naming of the dimensions/coordinates is only the beginning: Some datasets use only index values for the x,y dimensions, while others use nominal longitudes, latitudes (usefull for rough region selection) or the longitudes and latitudes are only 1d arrays (e.g. for regridded outputs). Our goal is to work with all datasets int the same way, and hence we convert all datasets in this form:- `x`, `y` area given as 1D 'nominal' longitudes and latitudes. This means the `x` is the zonal average latitude (can become difficult near the Arctic, but is otherwise very useful) and `y` is the longitude at the equator.- `lon` and `lat` are 2-dimensional coordinate arrays with the 'true' position of grid cells (if the values were initially given as 1d arrays, they are broadcasted appropriately)We achieve this by applying `promote_empty_dims` (give empty dimensions values), `broadcast_lonlat` (convert 1d lon and lat arrays to 2d arrays) and `replace_x_y_nominal_lat_lon` (calculate nominal lon and lat and replace `x` and `y` with them)
###Code
from cmip6_preprocessing.preprocessing import promote_empty_dims, broadcast_lonlat, replace_x_y_nominal_lat_lon
# check out the previous datasets
ds = dset_dict_renamed['CMIP.IPSL.IPSL-CM6A-LR.piControl.Oyr.gn']
ds
###Output
_____no_output_____
###Markdown
> Note how the dimensions x and y dont have values (e.g. are not listed as coords)
###Code
ds = promote_empty_dims(ds)
ds
dset_dict_renamed.keys()
###Output
_____no_output_____
###Markdown
Nice. Now check out the `GFDL` model...
###Code
ds = dset_dict_renamed['CMIP.NOAA-GFDL.GFDL-ESM4.piControl.Oyr.gr']
ds
###Output
_____no_output_____
###Markdown
This dataset is from regridded output and has thus only 1D longitude and latitude values (which are called `x` and `y` due to the previous renaming step. `broadcast_lonlat` adds the `lon` and `lat` arrays back as 2d arrays.
###Code
ds = broadcast_lonlat(ds)
ds
###Output
_____no_output_____
###Markdown
When you look back at the `IPSL` model you notice that the `x` and `y` values are given just as indicies, making rough selection of regions using xarrays `.sel` rather useless. To gain back this functionality, we replace `x` and `y` with nominal longitudes and latitudes using `replace_x_y_nominal_lat_lon`:
###Code
ds = dset_dict_renamed['CMIP.CCCma.CanESM5.piControl.Oyr.gn']
print(ds.y.data)
ds = replace_x_y_nominal_lat_lon(ds)
ds.y.data
###Output
[ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215
216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
288 289 290]
###Markdown
We can put all of this together in a wrapper function and plot some data
###Code
def wrapper(ds):
ds = ds.copy()
ds = rename_cmip6(ds)
ds = promote_empty_dims(ds)
ds = broadcast_lonlat(ds)
ds = replace_x_y_nominal_lat_lon(ds)
return ds
# pass the preprocessing directly
dset_dict_processed1 = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=wrapper)
fig, axarr = plt.subplots(nrows=3, figsize=[10,15])
for ax, (k, ds) in zip(axarr.flat, dset_dict_processed1.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=-1)
ds.o2.isel(time=0, lev=0).sel(y=slice(-15,15)).plot(ax=ax)
ax.set_title(k)
ax.set_aspect(2)
###Output
_____no_output_____
###Markdown
The naming and units are still inconsistent (not implemented yet) and the longitude is not consistent (we will deal with this below) But this is a big step forward. With the 'unprocessed' datasets this would have needed a lot more logic in the print loop. Inconsistent longitude conventionsWe saw above that not all models have a '0-360' longitude convention. We can fix this very quickly using `correct_lon`:
###Code
from cmip6_preprocessing.preprocessing import correct_lon
# same as above
def wrapper(ds):
ds = ds.copy()
ds = rename_cmip6(ds)
ds = promote_empty_dims(ds)
ds = broadcast_lonlat(ds)
ds = replace_x_y_nominal_lat_lon(ds)
ds = correct_lon(ds)
return ds
# pass the preprocessing directly
dset_dict_processed2 = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=wrapper)
fig, axarr = plt.subplots(nrows=3, figsize=[10,15])
for ax, (k, ds) in zip(axarr.flat, dset_dict_processed2.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=-1)
ds.o2.isel(time=0, lev=0).sel(y=slice(-15,15)).plot(ax=ax)
ax.set_title(k)
ax.set_aspect(2)
###Output
_____no_output_____
###Markdown
 Inconsistent unitsBut of course this is not all. Some models, give the depth in centimeters (so far I have only seen this in the NCAR models). We can fix this with `correct_units`:
###Code
from cmip6_preprocessing.preprocessing import correct_units
query = dict(experiment_id = ['historical'],variable_id='thetao', grid_label=['gn'],source_id=['CESM2', 'CanESM5']
)
cat = col.search(**query)
# raw data read in
dset_dict = cat.to_dataset_dict(zarr_kwargs=z_kwargs)
# fixed units
dset_dict_fixed_unit = cat.to_dataset_dict(zarr_kwargs=z_kwargs, preprocess=correct_units)
dset_dict['CMIP.NCAR.CESM2.historical.Omon.gn'].lev.plot()
plt.figure()
dset_dict_fixed_unit['CMIP.NCAR.CESM2.historical.Omon.gn'].lev.plot()
###Output
_____no_output_____
###Markdown
This helps tremendously when you are trying to slice a common depth from a series of models
###Code
fig, axarr = plt.subplots(nrows=2, figsize=[10,10])
for ax, (k, ds) in zip(axarr.flat, dset_dict_fixed_unit.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=0)
ds.thetao.isel(time=0).sel(lev=5000, method='nearest').plot(ax=ax, vmin=-1, vmax=5)
ax.set_title(k)
###Output
_____no_output_____
###Markdown
As a comparison, for the unprocessed data this would have picked the depth at 50m for the `CESM2` model instead of 5000m:
###Code
fig, axarr = plt.subplots(nrows=2, figsize=[10,10])
for ax, (k, ds) in zip(axarr.flat, dset_dict.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=0)
ds.thetao.isel(time=0).sel(lev=5000, method='nearest').plot(ax=ax, vmin=-1, vmax=5)
ax.set_title(k)
###Output
_____no_output_____
###Markdown
Consistent CF boundsMany of the CMIP6 models come with 'bound' dataarrays, that describe the extent of the finite grid cells.For the longitude and latitude there are two conventions: 2-element 'bounds' (describing the width of a cell along the center) and 4 element 'verticies' (describing the 4 corner coordinates of the cell).`cmip6_preprocessing` automatically renames these variables consistently and converts them so that every dataset has both conventions available.
###Code
from cmip6_preprocessing.preprocessing import correct_coordinates,parse_lon_lat_bounds, maybe_convert_bounds_to_vertex, maybe_convert_vertex_to_bounds
# same as above
def wrapper(ds):
ds = ds.copy()
ds = rename_cmip6(ds)
ds = promote_empty_dims(ds)
ds = broadcast_lonlat(ds)
ds = replace_x_y_nominal_lat_lon(ds)
ds = correct_lon(ds)
ds = correct_coordinates(ds)
ds = parse_lon_lat_bounds(ds)
ds = maybe_convert_bounds_to_vertex(ds)
ds = maybe_convert_vertex_to_bounds(ds)
return ds
# pass the preprocessing directly
dset_dict_processed3 = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=wrapper)
for k, ds in dset_dict_processed3.items():
print(ds)
###Output
<xarray.Dataset>
Dimensions: (bnds: 2, lev: 45, member_id: 35, time: 1980, vertex: 4, x: 360, y: 291)
Coordinates:
lev_bounds (time, lev, bnds) float64 dask.array<chunksize=(12, 45, 2), meta=np.ndarray>
time_bounds (time, bnds) float64 dask.array<chunksize=(1980, 2), meta=np.ndarray>
lat (y, x) float32 dask.array<chunksize=(291, 360), meta=np.ndarray>
lon_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 291, 360), meta=np.ndarray>
lat_verticies (y, x, vertex) float32 dask.array<chunksize=(291, 360, 4), meta=np.ndarray>
lon_verticies (y, x, vertex) float32 dask.array<chunksize=(291, 360, 4), meta=np.ndarray>
lat_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 291, 360), meta=np.ndarray>
lon (y, x) float32 dask.array<chunksize=(291, 360), meta=np.ndarray>
* vertex (vertex) int64 0 1 2 3
* time (time) int64 0 708 1416 2148 ... 1443192 1443924 1444656
* lev (lev) float64 3.047 9.454 16.36 ... 5.375e+03 5.625e+03
* x (x) float32 0.5 1.5 2.5 3.5 4.5 ... 356.5 357.5 358.5 359.5
* bnds (bnds) int64 0 1
* y (y) float32 -78.3935 -78.19058 ... 89.36653 89.74177
* member_id (member_id) <U9 'r10i1p1f1' 'r10i1p2f1' ... 'r9i1p2f1'
Data variables:
thetao (member_id, time, lev, y, x) float32 dask.array<chunksize=(1, 5, 45, 291, 360), meta=np.ndarray>
Attributes:
references: Geophysical Model Development Special issue ...
parent_time_units: days since 1850-01-01 0:0:0.0
experiment_id: historical
branch_method: Spin-up documentation
sub_experiment_id: none
tracking_id: hdl:21.14100/e1d38b3d-b9f2-4525-a84e-d7f3eca...
nominal_resolution: 100 km
parent_source_id: CanESM5
variant_label: r9i1p2f1
cmor_version: 3.4.0
mip_era: CMIP6
title: CanESM5 output prepared for CMIP6
parent_activity_id: CMIP
grid_label: gn
Conventions: CF-1.7 CMIP-6.2
forcing_index: 1
CCCma_runid: p2-his09
creation_date: 2019-05-30T09:47:43Z
history: 2019-03-14T09:34:44Z ;rewrote data to be con...
sub_experiment: none
realization_index: 9
grid: ORCA1 tripolar grid, 1 deg with refinement t...
contact: [email protected]
data_specs_version: 01.00.29
further_info_url: https://furtherinfo.es-doc.org/CMIP6.CCCma.C...
parent_mip_era: CMIP6
branch_time_in_child: 0.0
variable_id: thetao
branch_time_in_parent: 1496500.0
source_id: CanESM5
intake_esm_varname: thetao
parent_experiment_id: piControl
YMDH_branch_time_in_parent: 5950:01:01:00
realm: ocean
initialization_index: 1
CCCma_pycmor_hash: 7c87dd3a822d55650b3c17cd65db0ca251282530
YMDH_branch_time_in_child: 1850:01:01:00
CCCma_parent_runid: p2-pictrl
table_id: Omon
source_type: AOGCM
product: model-output
frequency: mon
license: CMIP6 model data produced by The Government ...
experiment: all-forcing simulation of the recent past
external_variables: areacello volcello
CCCma_model_hash: Unknown
activity_id: CMIP
institution_id: CCCma
<xarray.Dataset>
Dimensions: (bnds: 2, lev: 60, member_id: 11, time: 3960, vertex: 4, x: 320, y: 384)
Coordinates:
lev_bounds (lev, bnds) float32 dask.array<chunksize=(60, 2), meta=np.ndarray>
time_bounds (time, bnds) float64 dask.array<chunksize=(3960, 2), meta=np.ndarray>
lat (y, x) float64 dask.array<chunksize=(384, 320), meta=np.ndarray>
lon_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 384, 320), meta=np.ndarray>
lat_verticies (y, x, vertex) float32 dask.array<chunksize=(384, 320, 4), meta=np.ndarray>
lon_verticies (y, x, vertex) float32 dask.array<chunksize=(384, 320, 4), meta=np.ndarray>
lat_bounds (bnds, y, x) float32 dask.array<chunksize=(1, 384, 320), meta=np.ndarray>
lon (y, x) float64 dask.array<chunksize=(384, 320), meta=np.ndarray>
* vertex (vertex) int64 0 1 2 3
* time (time) float64 0.0 707.0 1.415e+03 ... 1.444e+06 1.445e+06
* lev (lev) float64 500.0 1.5e+03 2.5e+03 ... 5.125e+05 5.375e+05
* x (x) float64 1.062 2.187 3.312 4.437 ... 357.7 358.8 359.9
* bnds (bnds) int64 0 1
* y (y) float64 -79.22 -78.69 -78.15 -77.62 ... 89.11 89.66 89.71
* member_id (member_id) <U9 'r10i1p1f1' 'r11i1p1f1' ... 'r9i1p1f1'
Data variables:
thetao (member_id, time, lev, y, x) float32 dask.array<chunksize=(1, 8, 60, 384, 320), meta=np.ndarray>
Attributes:
physics_index: 1
parent_time_units: days since 0001-01-01 00:00:00
experiment_id: historical
branch_method: standard
sub_experiment_id: none
tracking_id: hdl:21.14100/19f9ed4d-daf4-4a51-8563-fe32b9c2a0cd...
parent_variant_label: r1i1p1f1
nominal_resolution: 100 km
parent_source_id: CESM2
variant_label: r9i1p1f1
mip_era: CMIP6
model_doi_url: https://doi.org/10.5065/D67H1H0V
parent_activity_id: CMIP
grid_label: gn
Conventions: CF-1.7 CMIP-6.2
forcing_index: 1
variant_info: CMIP6 20th century experiments (1850-2014) with C...
case_id: 23
creation_date: 2019-01-27T09:14:57Z
sub_experiment: none
realization_index: 9
grid: native gx1v7 displaced pole grid (384x320 latxlon)
contact: [email protected]
data_specs_version: 01.00.29
further_info_url: https://furtherinfo.es-doc.org/CMIP6.NCAR.CESM2.h...
parent_mip_era: CMIP6
branch_time_in_child: 674885.0
variable_id: thetao
branch_time_in_parent: 295650.0
source_id: CESM2
intake_esm_varname: thetao
institution: National Center for Atmospheric Research, Climate...
parent_experiment_id: piControl
realm: ocean
initialization_index: 1
status: 2019-10-25;created;by [email protected]
source: CESM2 (2017): atmosphere: CAM6 (0.9x1.25 finite v...
table_id: Omon
cesm_casename: b.e21.BHIST.f09_g17.CMIP6-historical.009
source_type: AOGCM BGC
product: model-output
frequency: mon
license: CMIP6 model data produced by <The National Center...
experiment: all-forcing simulation of the recent past
external_variables: areacello volcello
activity_id: CMIP
institution_id: NCAR
###Markdown
The vertex convention is consistent across models. The points are sorted from lower-left, upper-left, upper-right to lower-right. TL;DR How to put it all togetherTo combine all these (or just some you like), you can create a wrapper function as above, or you can use the provided `combined_preprocessing`, which does all the above:
###Code
from cmip6_preprocessing.preprocessing import combined_preprocessing
# lets load a bunch more models this time
# load a few models to illustrate the problem
query = dict(experiment_id=['piControl', 'historical'], table_id='Oyr',
variable_id='o2', grid_label=['gn', 'gr'])
cat = col.search(**query)
print(cat.df['source_id'].unique())
dset_dict = cat.to_dataset_dict(zarr_kwargs=z_kwargs,
preprocess=combined_preprocessing)
fig, axarr = plt.subplots(nrows=3, ncols=3, figsize=[25,15])
for ax,(k, ds) in zip(axarr.flat,dset_dict.items()):
if 'member_id' in ds.dims:
ds = ds.isel(member_id=0)
ds.o2.isel(time=0).sel(x=slice(100, 200), y=slice(-20,20)).interp(lev=2500).plot(ax=ax, vmax=0.25, vmin=0.05)
ax.set_title(k)
###Output
_____no_output_____
###Markdown
xDSL tutorial Imports and setup
###Code
from xdsl import *
from xdsl.ir import *
from xdsl.irdl import *
from xdsl.dialects.std import *
from xdsl.dialects.arith import *
from xdsl.dialects.builtin import *
from xdsl.parser import *
from xdsl.printer import *
from xdsl.util import *
# MLContext, containing information about the registered dialects
context = MLContext()
# Some useful dialects
std = Std(context)
arith = Arith(context)
builtin = Builtin(context)
# Printer used to pretty-print MLIR data structures
printer = Printer()
###Output
_____no_output_____
###Markdown
High-level presentation (TODO)Base ideas of what xDSL is. Example of a small program, and SSA. Base IR features Dialects Dialects are namespaces that contain a collection of attributes and operations. For instance, the Builtin dialect contains (but not exclusively) the attribute `!i32` and the operation `builtin.func`.A dialect is usually a single level of abstraction in the IR, and multiple dialects can be used together in the same MLIR program.Dialects are currently Python classes registering operations and attributes, and providing simple accessors to their attributes and dialects.This will however change in the near future to provide a better interface to dialects. Attributes Attributes represent compile-time information.In particular, each SSA-value is associated with an attribute, representing its type.Each attribute type has a name and belongs in a dialect. The textual representation of attributes is prefixed with `!`, and the dialect name.For instance, the `vector` attribute has the format `!builtin.vector`, where `T` is the expected parameter of the attribute.In Python, attributes are always expected to be immutable objects heriting from either `Data` or `ParametrizedAttribute`. Data attributes `Data` attributes are used to wrap python data structures. For instance, the `IntAttr` is an attribute containing an `int`, and the `StringAttr` is an attribute containing a `str`.`Data` attributes are parsed and printed with the format `dialect_name.attr_name`, where `custom_format` is the format defined by the parser and printer of each `Data` attribute.Note that some attributes, such as `StringAttr`, are shortened by the printer, and do not require the use of `dialect_name.attr_name`. For instance, `builtin.str` is shortened to `"foo"`. Here is an example on how to create and print an `IntAttr` attribute:
###Code
# Attribute definitions usually define a `get` method to create the attribute
my_int = IntAttr.from_int(42)
printer.print_attribute(my_int)
###Output
!int<42>
###Markdown
Note that here, the `IntAttr` does not print a dialect prefix. This will be fixed soon-ish.
###Code
# Access the data in the IntAttr:
print(my_int.data)
###Output
42
###Markdown
Parametrized attributesParametrized attributes are attributes containing optionally multiple attributes as parameters.For instance, the `integer` attribute from `builtin` is a parametrized attribute and expects two attributes as parameter.Parametrized attributes are printed with the format `!dialect.attr_name`, where `attr_i` are the attribute parameters.Here is an example on how to create and inspect an `integer_type` attribute, which represent a machine integer type. It is parametrized by a single `IntAttr` parameter, representing the bitwidth.
###Code
# Get the int that will be passed as parameter to the integer_type
int_64 = IntAttr.from_int(64)
i64 = IntegerType([int_64])
printer.print_attribute(i64)
# Get back the parameters of IntegerType
printer.print_attribute(i64.parameters[0])
# Use a custom `get` method from IntegerType to construct it
assert IntegerType.from_width(64) == i64
###Output
_____no_output_____
###Markdown
Note that parametrized attributes may define invariants that need to be respected.For instance, constructing an `integer_type` with wrong parameters will trigger an error:
###Code
# Try to create an IntegerType with wrong parameters
try:
bad_attr = IntegerType([i64])
except Exception as err:
print(err)
###Output
IntegerType(name='integer_type', parameters=[IntAttr(name='int', data=64)]) should be of base attribute int
###Markdown
Operations Operations represent the computation that a program can do. They span in all abstraction levels, and can be domain-specific.For instance, `arith.addi` will add two integers, while `scf.if` represent an if/else structure.Operations are composed of:* A base operation type, which represent the semantics of the operation;* Operands, which are SSA-values previously defined;* Results, which are new SSA-values defined by the operation;* Attributes, which encode compile-time information about the operation;* Regions, which contain operations, and are used to represent more complex control-flow;* Successors, which are basic block names for which the operation can give control to.The format of an operation is: `results = dialect_name.op_name(operands) (successors) [attributes] regions`Here is for example how to create a constant operation, representing a constant value:
###Code
const_op = Constant.create([], [i64], attributes={"value": IntegerAttr.from_int_and_width(62, 64)})
printer.print_op(const_op)
###Output
%0 : !i64 = arith.constant() ["value" = 62 : !i64]
###Markdown
Note that dialects usually define methods to ease the definition of such operations:
###Code
const_op2 = Constant.from_attr(IntegerAttr.from_int_and_width(62, 64), i64)
printer.print_op(const_op2)
###Output
%1 : !i64 = arith.constant() ["value" = 62 : !i64]
###Markdown
We can use the results from the operation to pass them as operands for a later operation. For instance, we will add the constant to itself using the `arith.addi` operation:
###Code
add_op = Addi.create([const_op.results[0], const_op.results[0]], [i64], {})
printer.print_op(const_op)
print()
printer.print_op(add_op)
###Output
%2 : !i64 = arith.constant() ["value" = 62 : !i64]
%3 : !i64 = arith.addi(%2 : !i64, %2 : !i64)
###Markdown
We can also put the operations in regions, which can be then used by other operations (such as func)
###Code
my_region = Region.from_operation_list([const_op, add_op])
printer._print_region(my_region)
###Output
{
%4 : !i64 = arith.constant() ["value" = 62 : !i64]
%5 : !i64 = arith.addi(%4 : !i64, %4 : !i64)
}
###Markdown
Functions are created using the `builtin.func` op, which contain a single region:
###Code
my_func = FuncOp.from_region("my_function", [], [], my_region)
printer.print_op(my_func)
###Output
builtin.func() ["sym_name" = "my_function", "type" = !fun<[], []>, "sym_visibility" = "private"] {
%6 : !i64 = arith.constant() ["value" = 62 : !i64]
%7 : !i64 = arith.addi(%6 : !i64, %6 : !i64)
}
###Markdown
Python tutorials Installation- [Anaconda](https://www.anaconda.com/products/individual): a comprehensive Python distribution with `conda` package manager and an array of scietific python packages Writing and running Plain Python files `*.py`* [spyder](https://www.spyder-ide.org/) (included in Anaconda dist.)* [VS Code](https://code.visualstudio.com/)* [Pycharm](https://www.jetbrains.com/pycharm/) Jupyter notebooks `*.ipynb`* jupyterlab/ jupyter notebook (included in Anaconda dist.)* [VS Code](https://code.visualstudio.com/)- [nteract](https://nteract.io) Tutorial and reference* [Automate the boring stuff with Python](https://automatetheboringstuff.com/2e/)* [Think Python](http://greenteapress.com/thinkpython2/html/index.html)* [Scipy Lecture Notes](https://scipy-lectures.org/)* [Matplotlib tutorials](https://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/)* [Scipy CookBook](https://scipy-cookbook.readthedocs.io/) Variable and data types* Assignment (`=`) : binding a label to an object (data) so you could use it later.* Data includes boolean (True/False), numbers (integers, floating-point numbers), text strings, and more.
###Code
x = 1
type(x)
# String, this is a comment and the Python intepreter will ignore it
greet= "Hello" # The same as greet= 'Hello'
type(greet)
# print function
print(greet, "world", x)
# Floats
y = 1.0
type(y)
# Boolean
type(True)
# Scientific notation
z = 1e-3
z
###Output
_____no_output_____
###Markdown
Python as a (fancy) calculator
###Code
# + - * / ** % //
# ----------------------------
x = 4
y = 3.0
x + y
x - y
x * y
x / y
x // y
x ** y
x % y
a = 10
a = a + 1
a
a = 10
a *= 2
a
# Multiple assignments
a, b = 11, 12
print(a, b)
# swap
b, a = a, b
print(a, b)
###Output
_____no_output_____
###Markdown
String operations
###Code
# Repeating string
repetition = 10
'abc' * repetition
# Concat strings
'a' + 'b'
# f-string to insert values to a string (Python 3.6+)
numofcats = 10
f"I have {numofcats} cat(s)."
x, y = "CAPITAL", 'smol'
x.lower(), y.upper()
'gmail.com'.find('.com')
string = 'qwqwqwqwqw'
string[2:5:2]
###Output
_____no_output_____
###Markdown
join()
###Code
toJoin = ['This', 'is', 'a', 'string']
# ' '.join(toJoin)
' + '.join(toJoin)
###Output
_____no_output_____
###Markdown
Containers* List* Tuple* Dictionary* (Set) List
###Code
# lis = []
# lis = list((2, 3, 4))
# lis
lis = [0, 1, 2, 3]
# lis.append(4)
lis.extend(lis)
lis.insert(0, -1)
lis
lis.remove(1)
lis
lis.count(0)
lis
lis = [[1, 1, 1], [2, 2, 2]]
lis[0][0]
lis = [0, 1, 2, 3, 2, 3, 5]
# print(sorted(lis))
# print(lis)
# print(lis)
# lis.sort(reverse=True)
# lis.reverse()
# lis
# print(lis)
# a = reversed(lis)
for i in lis:
print(i)
###Output
_____no_output_____
###Markdown
Tuple
###Code
# tuple list str dict set...
# (a, b) = (2, 3)
# a
# a = (2, 3)
# a
# a[1]
# a = ('one', 'two')
# a
# a = (2, 3, 4, 5)
# a
# print(a, len(a))
# a = tuple([1, 1])
# a
# a[1] = 4
lis = [4, 2, 1, 7, 9, 10]
# print(sorted(lis).index(1))
# for i in range(len(lis)):
# print(i, lis[i])
# for i, v in enumerate(lis):
# print(i, v)
# 4 in lis
# for i in range(10):
# if (i in lis):
# print('Found', i)
# lis[2:]
# lis[:2]
# lis[1:4:2]
lis = []
for i in range(10):
lis.append(2 * i)
lis
lis = [i for i in range(1, 10) if i % 2 == 0]
lis
# lis = [i ** 2 for i in lis]
# lis
lis1 = [2, 3, 4]
# sum(lis1)
min(lis1)
# max(lis1)
# shallow copy
lis1 = [2, 3, 4]
lis2 = lis1
lis1.append(5)
lis2
# lis1 == lis2
# lis1 is lis2
# deep copy
import copy
lis1 = [2, 3, 4]
lis2 = copy.deepcopy(lis1)
# lis1.append(5)
lis2
# lis1 == lis2
lis1 is lis2
###Output
_____no_output_____
###Markdown
Dictionary
###Code
d = dict()
d = {}
# d[1] = 'one'
# d
# d[2] = 'two'
# d
d = {'one': 1, 'two': 2}
d
d.update({'three': 3, 'two': 5})
d
# d['Three'] = 10
# d
# del d['three']
# d
# del d['Four']
# print(d.get('Four', 100000000000))
# d.get('Five', 'No such element')
d.setdefault('Five', 5)
d
# for i in d:
# print(i)
for k, v in d.items():
print(k, v)
# print(list(d.keys()))
# print(d.values())
# for i, j in zip([1, 2, 4], [2, 4, 6]):
# print(i, j)
for v, k in zip(d.values(), d.keys()):
print(v, k)
###Output
_____no_output_____
###Markdown
Set / Frozen Set
###Code
s = set()
# type(s)
s.add(1)
s.add(1)
# s
s.add(10)
s
s.remove(10)
s
# slis = [s, s]
# slis
# lisset = set([10, 9], 10)
# lisset
geneSet = set([1, 2, 3, 6, 10])
geneSet2 = set([2, 4, 6, 8, 10])
geneSet & geneSet2
# geneSet | geneSet2
# geneSet - geneSet2
geneSet.intersection(geneSet2)
###Output
_____no_output_____
###Markdown
Logical operators and control flows* Operators: `== >= and is not or`* Branching: `if` ... `elif`...`else`* Loops: `for`, `while`See also: [Truth table](https://en.wikipedia.org/wiki/Truth_table)
###Code
2 <= 3
2 + 2 == 5
'Z' > 'B'
# Chaining is possible
1 < 2 < 3
4 < 2 < 5
(1 > 2) and (1 < 2)
not 1 > 2
1 > 2 or 1 < 2
a = 1
b = 1
a is b
a = 1
b = 1.0
a == b
a is b
###Output
_____no_output_____
###Markdown
if ... else
###Code
# if else elif. Note the colon.
if 3 > 5:
print('Yes! It is true!')
else:
print('No! It is not true')
print('Inside of conditional !!!!')
print('Out of conditional')
a = 2 ** 3
if a % 2 == 0:
print('Even')
else:
print('Odd')
score = 0
print("score =", score)
if score > 100 :
print('What?')
elif 100 >= score > 80:
print('Good')
elif 80 >= score > 60:
print('Okay')
else :
print('Oops')
###Output
_____no_output_____
###Markdown
For loops
###Code
for i in range(2, 10):
print(i, end = ' ')
for i in range(10):
if (i == 3):
break
print(i, end = ' ')
###Output
_____no_output_____
###Markdown
While loops
###Code
# while loop
x = 0
while x < 4:
print(x)
x += 1
if (x == 3):
break
###Output
_____no_output_____
###Markdown
Useful functions
###Code
# print / input / type conversion / random / math
# print(10000)
# print('Hello', 'world')
# print('Hello', 'world')
# print('Hello', 'world')
# print('Hello', 'world')
# print('Hello', 'world', sep = ' strange ')
# print('Hello', end = '\t')
print('world', end = '\t')
# print(end = '\n')
print('world')
type(a)
# type(10)
# int(a) + int (b)
# bool(0)
# bool(1)
# int(True)
int(False)
# float(10)
int(10.9)
import random
random.random()
random.randint(0, 100)
random.uniform(0, 100)
random.randrange(0, 101, 2)
random.choice(['one', 'two', 'three'])
# random.sample((0, 1, 2), 2)
import math
math.ceil(9.1)
math.floor(9.1)
10 ** 0.5
math.log(10)
math.log10(10)
math.exp(1)
math.pi
math.sin(math.pi / 2)
math.cos(math.pi / 2)
math.sqrt(10)
###Output
_____no_output_____
###Markdown
This tutorial provides examples of codes for static Manhattan and QQ plots.
###Code
# In order to view the figures in this notebook it is necessary to included the following line
%matplotlib inline
# import pandas and qmplot functions
import pandas as pd
import matplotlib.pyplot as plt
from qmplot import manhattanplot, qqplot
# import data
df = pd.read_table("../tests/data/gwas_plink_result.tsv", sep="\t")
df = df.dropna(how="any", axis=0)
# Show the dataframe
df
###Output
_____no_output_____
###Markdown
Plot a basic manhattan plot by using the PLINK2.x association output directly
###Code
# Plot a basic manhattan plot with horizontal xtick labels and the figure will display in screen.
ax = manhattanplot(data=df)
###Output
_____no_output_____
###Markdown
The parameter of ``manhattanplot()`` defined the name of output figure fileand the format of the figure file depending on the file suffix, which couldbe ".png", ".jpg", or ".pdf".```pythonax = manhattanplot(data=df, is_show=False, Set False: No need to display the plot in Desktop screen, default: True figname="output_manhattan_plot.png")``` Rotate the x-axis tick label by setting ``xticklabel_kws``
###Code
ax = manhattanplot(data=df, xticklabel_kws={"rotation": "vertical"})
###Output
_____no_output_____
###Markdown
Or rotate the labels by other degrees:
###Code
ax = manhattanplot(data=df, xticklabel_kws={"rotation": 45})
###Output
_____no_output_____
###Markdown
Change horizontal line style by ``hline_kws``:
###Code
ax = manhattanplot(data=df,
hline_kws={"linestyle": "--", "lw": 1.3},
xlabel="Chromosome",
ylabel=r"$-log_{10}{(P)}$",
xticklabel_kws={"rotation": "vertical"})
###Output
_____no_output_____
###Markdown
When run with default parameters, the ``manhattanplot()`` function drawshorizontal lines drawn at $-log_{10}{(1e-5)}$ for "suggestive" associationsand $-log_{10}{(5e-8)}$ for the "genome-wide significant" threshold. Thesecan be move to different locations or turned off completely with the arguments``suggestiveline`` and ``genomewideline``, respectively.
###Code
ax = manhattanplot(data=df,
suggestiveline=None, # Turn off suggestiveline
genomewideline=None, # Turn off genomewideline
xticklabel_kws={"rotation": "vertical"})
###Output
_____no_output_____
###Markdown
specific a chromosomeThe behavior of the ``manhattanplot`` function changes slightly when resultsfrom only a single chromosome are used. Here, instead of plotting alternatingcolors and chromosome ID on the x-axis, the SNP's position on the chromosomeis plotted on the x-axis:
###Code
# plot only results on chromosome 8.
ax = manhattanplot(data=df,
CHR="chr8",
hline_kws={"linestyle": "--", "lw": 1.3},
xlabel="Chromosome 8")
###Output
_____no_output_____
###Markdown
Highlight SNPs with significant GWAS signal and annotate the Top SNP. Note: highlighting SNPs of interest can be combined with limiting to asingle chromosome to enable "zooming" into a particular region containing SNPsof interest.
###Code
ax = manhattanplot(data=df,
sign_marker_p=1e-6, # highline the significant SNP with ``sign_marker_color`` color.
is_annotate_topsnp=True, # annotate the top SNP
hline_kws={"linestyle": "--", "lw": 1.3},
xticklabel_kws={"rotation": "vertical"})
###Output
_____no_output_____
###Markdown
Plot other value in manhattan styleSpecific any other column data instead of ``P-value`` and plot in manhattan style by ``manhattanplot()``. Here's an example for plotting ``BETA`` value of PLINK2 GWAS result:
###Code
ax = manhattanplot(data=df,
chrom="#CHROM", # column name of chromosomal name
pos="POS", # column name of chromosomal position
pv="BETA", # The column name of BETA value.
logp=False, # Turn off -log10 scale.
suggestiveline=None, # Turn off suggestiveline
genomewideline=None, # Turn off genomewideline
title="Plot beta value",
ylabel="BETA Value", # set a new y axis label
xlabel="Chromosome",
xticklabel_kws={"rotation": "vertical"})
###Output
_____no_output_____
###Markdown
Show a better manhattan plotFuther graphical parameters can be passed to the ``manhattanplot()`` functionto control thing like plot title, point character, size, colors, etc. Here isthe example:
###Code
# common parameters for plotting
plt_params = {
"font.sans-serif": "Arial",
"legend.fontsize": 14,
"axes.titlesize": 18,
"axes.labelsize": 16,
"xtick.labelsize": 14,
"ytick.labelsize": 14
}
plt.rcParams.update(plt_params)
# Create a manhattan plot
f, ax = plt.subplots(figsize=(12, 4), facecolor='w', edgecolor='k')
xtick = set(['chr' + i for i in list(map(str, range(1, 10))) + ['11', '13', '15', '18', '21', 'X']])
manhattanplot(data=df,
marker=".",
sign_marker_p=1e-6, # Genome wide significant p-value
sign_marker_color="r",
snp="ID", # The column name of annotation information for top SNPs.
title="Test",
xtick_label_set=xtick,
xlabel="Chromosome",
ylabel=r"$-log_{10}{(P)}$",
sign_line_cols=["#D62728", "#2CA02C"],
hline_kws={"linestyle": "--", "lw": 1.3},
is_annotate_topsnp=True,
ld_block_size=50000, # 50000 bp
text_kws={"fontsize": 12,
"arrowprops": dict(arrowstyle="-", color="k", alpha=0.6)},
dpi=300,
# figname="output_manhattan_plot.png",
ax=ax)
###Output
_____no_output_____
###Markdown
QQ plot with defualt parameters. The ``qqplot()`` function can be used to generate a Q-Q plot to visualize the distribution of association "P-value".The ``qqplot()`` function takes a vector of P-values as its the only required argument.
###Code
ax = qqplot(data=df["P"])
###Output
_____no_output_____
###Markdown
Show a better QQ plotFuther graphical parameters can be passed to ``qqplot()`` to control the plot title, axis labels, point characters, colors, points sizes, etc. Here is the example:
###Code
# Create a Q-Q plot
f, ax = plt.subplots(figsize=(6, 6), facecolor="w", edgecolor="k")
_ = qqplot(data=df["P"],
marker="o",
title="Test",
xlabel=r"Expected $-log_{10}{(P)}$",
ylabel=r"Observed $-log_{10}{(P)}$",
dpi=300,
# figname="output_QQ_plot.png",
ax=ax)
###Output
_____no_output_____
###Markdown
GCM Filters Tutorial Synthetic DataIn this example, we are going to work with "synthetic data"; data we made up for the sake of keeping the example simple and self-contained. Create Input DataGcm-filters uses Xarray DataArrays for its inputs and outputs. So we will first import xarray (and numpy).
###Code
import gcm_filters
import numpy as np
import xarray as xr
###Output
_____no_output_____
###Markdown
Now we will create a random 3D cube of data.
###Code
nt, ny, nx = (10, 128, 256)
data = np.random.rand(nt, ny, nx)
da = xr.DataArray(data, dims=['time', 'y', 'x'])
da
###Output
_____no_output_____
###Markdown
To make things a bit more interesting, we will create a "land mask"; a binary array representing topography in our made-up ocean.The convention is here that the array is 1 in the ocean ("wet points") and 0 in the land ("dry points").
###Code
mask_data = np.ones((ny, nx))
mask_data[(ny // 4):(3 * ny // 4), (nx // 4):(3 * nx // 4)] = 0
wet_mask = xr.DataArray(mask_data, dims=['y', 'x'])
wet_mask.plot()
###Output
_____no_output_____
###Markdown
We have made a big island.We now use this to mask our data.
###Code
da_masked = da.where(wet_mask)
da_masked[0].plot()
###Output
_____no_output_____
###Markdown
Create a FilterThe main class we use from gcm-filters is the {class}`gcm_filters.Filter` object.When we create a filter, we specify how we want to smooth the data, including the filter shape and all the relevant parameters.To define a filter, we need to pick a few options from the predefined lists of filter shapes and grid types.The possible filter shapes are enumerated as follows:
###Code
list(gcm_filters.FilterShape)
###Output
_____no_output_____
###Markdown
The possible grid types are:
###Code
list(gcm_filters.GridType)
###Output
_____no_output_____
###Markdown
(This list will grow as we implement more Laplacians).For now, we will choose `REGULAR_WITH_LAND`, which matches our synthetic data.Each grid type has different "grid variables" that must be provided.To find out what these are, we can use this utility function.
###Code
gcm_filters.required_grid_vars(gcm_filters.GridType.REGULAR_WITH_LAND)
###Output
_____no_output_____
###Markdown
So if we use this grid type, we have to include a `wet_mask` grid variable.We are now ready to create our filter object.
###Code
filter = gcm_filters.Filter(
filter_scale=4,
dx_min=1,
filter_shape=gcm_filters.FilterShape.TAPER,
grid_type=gcm_filters.GridType.REGULAR_WITH_LAND,
grid_vars={'wet_mask': wet_mask}
)
filter
###Output
_____no_output_____
###Markdown
The repr for the filter object includes some of the parameters it was initiliazed with, to help us keep track of what we are doing.Next we plot the shape of the target filter and the approximation. Note that this is not the shape of the filter *kernel*, it is the shape 'in Fourier space,' meaning that we're plotting how the filter attenuates different scales in the data. The filter is 1 at large scales (small wavenumbers $k$, at the left side of the plot) and 0 at small scales (large wavenumbers $k$, at the right side of the plot), meaning that large scales are left unchanged and small scales are damped to zero.
###Code
filter.plot_shape()
###Output
_____no_output_____
###Markdown
By not specifying `n_steps`, we allow the filter to automatically select a value that leads to a very-good approximation of the target. In the above example using the Taper shape, the filter selects to use 16 steps to filter by a factor of 4.The user might want to use a smaller number of steps to reduce the cost. The caveat is that the accuracy will be reduced, so the filter might not act as expected. To illustrate, we create a new filter with a smaller number of steps and plot the result. (Note that using a value of `n_steps` lower than the default will raise a warning.)
###Code
filter_8 = gcm_filters.Filter(
filter_scale=4,
dx_min=1,
filter_shape=gcm_filters.FilterShape.TAPER,
n_steps=8,
grid_type=gcm_filters.GridType.REGULAR_WITH_LAND,
grid_vars={'wet_mask': wet_mask}
)
filter_8.plot_shape()
###Output
/home/ian/Documents/Projects/gcm-filters/gcm_filters/filter.py:255: UserWarning: Warning: You have set n_steps below the default. Results might not be accurate.
warnings.warn(
###Markdown
The example above shows that using `n_steps=8` still yields a very accurate approximation of the target filter, at half the cost of the default. The main drawback in this example is that the filter slightly *amplifies* large scales, which also implies that it will not conserve variance. Below we show what happens with `n_steps=4`. For this example of a Taper filter with a filter factor of 4, `n_steps=4` is simply not enough to get a good approximation of the target filter. The `filter_4` object created here will still "work" but it will not behave as expected; specifically, it will smooth more than expected - it will act like a filter with a larger filter scale.
###Code
filter_4 = gcm_filters.Filter(
filter_scale=4,
dx_min=1,
filter_shape=gcm_filters.FilterShape.TAPER,
n_steps=4,
grid_type=gcm_filters.GridType.REGULAR_WITH_LAND,
grid_vars={'wet_mask': wet_mask}
)
filter_4.plot_shape()
del filter_8, filter_4
###Output
/home/ian/Documents/Projects/gcm-filters/gcm_filters/filter.py:255: UserWarning: Warning: You have set n_steps below the default. Results might not be accurate.
warnings.warn(
###Markdown
Apply the FilterNow that we have our filter defined, we can use it on some data. We need to specify which dimension names to apply the filter over. In this case, it is y, x.
###Code
%time da_filtered = filter.apply(da_masked, dims=['y', 'x'])
da_filtered
###Output
CPU times: user 76.4 ms, sys: 6.78 ms, total: 83.1 ms
Wall time: 86.2 ms
###Markdown
Let's visualize what the filter did:
###Code
da_filtered[0].plot()
###Output
_____no_output_____
###Markdown
It can be useful to know where the land mask has influenced our results--for example, for assessing commutativity of the filter with differential operators.We can get at this by applying the filter to the land mask itself.We will create a new filter object that ignores the land.
###Code
filter_noland = gcm_filters.Filter(
filter_scale=4,
dx_min=1,
filter_shape=gcm_filters.FilterShape.TAPER,
grid_type=gcm_filters.GridType.REGULAR,
)
mask_filtered = filter_noland.apply(wet_mask, dims=['y', 'x'])
mask_filtered.plot()
###Output
_____no_output_____
###Markdown
Use DaskUp to now, we have operated "eagerly"; when we called `.apply`, the results were computed immediately and stored in memory.Gcm-filters is also designed to work seamlessly with Dask array inputs, deferring its computationg and possibly executing it in parallel.We can do this with our synthetic data by converting it to dask.
###Code
da_dask = da_masked.chunk({'time': 2})
da_dask
da_filtered_lazy = filter.apply(da_dask, dims=['y', 'x'])
da_filtered_lazy
###Output
_____no_output_____
###Markdown
Nothing has actually been computed yet.We can trigger computation as follows:
###Code
%time da_filtered_computed = da_filtered_lazy.compute()
###Output
CPU times: user 79.3 ms, sys: 3.04 ms, total: 82.4 ms
Wall time: 88.1 ms
###Markdown
TutorialThis tutorial will guide through the creation and analysis of a superconducting circuit with CircuitQ. It shows some of the core functionalities of CircuitQ and demonstrates how to use them. While the variety of possible input circuits is high, we will here focus on the Fluxonium as an example. Import statementsFirst, import `circuitq` and `networkx`. The latter toolbox is required to construct the circuit graph.
###Code
import circuitq as cq
import networkx as nx
###Output
_____no_output_____
###Markdown
It is also convenient to import `numpy` for the use of numerical expressions and `pyplot` to visualize some results.
###Code
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Create circuit graph A superconducting circuit can be related to a graph by relating all circuit elements between two nodes to an edge of the graph. Possible elements are capacitances (`'C'`), linear inductances (`'L'`) and Josephson junctions (`'J'`).Since multiple edges of such a graph can be in parallel we work with the `MultiGraph` class of `networkx`.Let's consider the Fluxonium as an example, which consists of a linear inductance and a Josephson junction in parallel together with a shunted capacity.To initialize the circuit, we execute the following:
###Code
graph = nx.MultiGraph()
graph.add_edge(0,1, element = 'C')
graph.add_edge(0,1, element = 'J')
graph.add_edge(0,1, element = 'L')
circuit = cq.CircuitQ(graph)
###Output
_____no_output_____
###Markdown
Here, we created a graph with three elements in between node 0 and node 1. The `CircuitQ` class is the core of the toolbox. An element of this class corresponds to a superconducting circuit. Ground nodes and charge offset nodes can be specified upon initialization. Please refer to the API reference for more details.CircuitQ's function `visualize_circuit_general()` can be used to visualize the graph. Symbolic HamiltonianCircuitQ performs an automated quantization of the input circuit. It provides a symbolic `SymPy` expression for the corresponding circuit Hamiltonian.
###Code
circuit.h
###Output
_____no_output_____
###Markdown
ParametersBesides the flux and charge variables $\phi$ and $q$, the Hamiltonian contains system parameters, whose values can be changed to tune the circuit.
###Code
circuit.h_parameters
###Output
_____no_output_____
###Markdown
In this case it contains the capacity $C_{01}$, the Josephson energy $E_{J010}$ and the inductance $L_{010}$. During quantization, CircuitQ automatically detects inductive loops and assigns loop fluxes to them, which is represented by $\tilde{\Phi}_{010}$. If a charge offset would have been assigned upon initialization, it would also appear as a parameter $qo$.Let's assign a numerical value for the Josephson energy and the external flux
###Code
EJ = circuit.c_v["E"]/3
phi_ext = np.pi*circuit.phi_0
###Output
_____no_output_____
###Markdown
`circuit.c_v` is a dictionary for characteristic parameter values. `circuit.phi_0` is the flux quantum, which we define as $\Phi_o = \frac{\hbar}{2e}$. Numerical HamiltonianTo analyze the circuit's quantum physical properties, a numerical representation of the Hamiltonian has to be generated.
###Code
h_num = circuit.get_numerical_hamiltonian(401,
parameter_values=[False, EJ, False, phi_ext ])
###Output
_____no_output_____
###Markdown
The method `get_numerical_hamiltonian()` takes the length of the matrices representing the flux and charge variables as the first parameter input (here `401`). CircuitQ automatically performs the numerical implementation of the node variables either in the charge basis, in the flux basis or also in a combination of both - depending on the periodicity of the potential along the individual flux variables. In the exemplary case of the Fluxonium, the flux basis will be used due to the harmonic contribution of the linear inductance. The length of the numerical flux coordinate grid can be set manually by using the keyword `grid_length`. This may have a crucial impact on the numerical results. If it is not specified (like in the example above) it is set to the default value $\Phi_{max} = 4 \pi \Phi_o$. For the cases which are implemented in the charge basis, `CircuitQ`'s method `transform_charge_to_flux()` can be used to be able to express the eigenstates still in the flux basis (see More Examples -> Flux Qubit).To assign specific parameter values, use an input list for `paramter_values`, with the same order as the `h_parameters` list. If a value is set to `False`, a characteristic default value will be assigned to it.The numerical Hamiltonian is represented as a sparse `csr_matrix` from the `SciPy` package. DiagonalizationTo get the (lowest) eigenvalues and eigenstates of the numerical Hamiltonian, use `get_eigensystem()` which is a wrapper around `SciPy`'s `eigsh` function for sparse matrices.
###Code
eigv, eigs = circuit.get_eigensystem()
###Output
_____no_output_____
###Markdown
Finally, we can plot the lowest eigenstates of the circuit.
###Code
h = 6.62607015e-34
y_scaling = 1/(h *1e9)
plt.plot(circuit.flux_list, np.array(circuit.potential)*y_scaling, lw=1.5)
for n in range(5):
plt.plot(circuit.flux_list,
(eigv[n]+ abs(eigs[:,n])**2*2e-23)*y_scaling
,label="Eigenstate " +str(n))
plt.legend()
plt.xlabel(r"$\Phi_1 / \Phi_o$")
plt.ylabel(r"Energy in GHz$\cdot$h")
plt.xticks(np.linspace(-2*np.pi, 1*np.pi, 4)*circuit.phi_0 ,
[r'$-2\pi$', r'$-\pi$',r'$0$',r'$\pi$'])
plt.xlim(-2.5*np.pi*circuit.phi_0, 1.5*np.pi*circuit.phi_0)
plt.ylim(1,10)
plt.show()
###Output
_____no_output_____
###Markdown
To plot the potential, the `potential` attribute of a `CircuitQ` instance can be used if the problem is 1-dimensional and formulated in flux basis. AnharmonicityTo operate the circuit as a qubit, a certain degree of the spectrum's anharmonicity is desired. CircuitQ provides a measure for this purpose, which can be given by `get_spectrum_anharmonicity()`. The method considers the transition that is the closest to the qubit transition and subsequently calculates the quotient between these two transitions and returns abs(1-quotient). A cutoff has been implemented, such that the returned anharmonicity is within $[0,1]$.
###Code
circuit.get_spectrum_anharmonicity()
###Output
_____no_output_____
###Markdown
$T1$ timeThe robustness against decoherence is a crucial property of a circuit. For this purpose, we developed three measures to evaluate the $T1$ time contribution due to quasiparticle tunneling,
###Code
T1_qp = circuit.get_T1_quasiparticles()
print("Quasiparticles noise contribution T1 = {:e} s".format(T1_qp))
###Output
Quasiparticles noise contribution T1 = 2.272398e-05 s
###Markdown
charge noise,
###Code
T1_c = circuit.get_T1_charge()
print("Charge noise contribution T1 = {:e} s".format(T1_c))
###Output
Charge noise contribution T1 = 1.891442e-04 s
###Markdown
and flux noise
###Code
T1_f = circuit.get_T1_flux()
print("Flux noise contribution T1 = {:e} s".format(T1_f))
###Output
Flux noise contribution T1 = 1.895929e-02 s
###Markdown
This leads to an overall $T1$ time measure of:
###Code
print("Total T1 = {:e} s".format( 1/( 1/T1_qp + 1/T1_c + 1/T1_f)))
###Output
Total T1 = 2.026503e-05 s
###Markdown
Please refer to our preprint for more details on how the noise contributions are calculated. Parameter sweepThe dependence of circuit properties on the value of one or more parameters is a crucial aspect to consider when analysing superconducting circuits. The exemplary spectrum above depends on the value of the external flux $\tilde{\Phi}_{010}$. For the previous plot, we have chosen the external flux to be $\pi \cdot \Phi_o$. If we set this value to $0$, the spectrum has the following form:
###Code
phi_ext = 0
circuit.get_numerical_hamiltonian(401, parameter_values=[False, EJ, False, phi_ext ])
eigv, eigs = circuit.get_eigensystem()
plt.plot(circuit.flux_list, np.array(circuit.potential)*y_scaling, lw=1.5)
for n in range(5):
plt.plot(circuit.flux_list,
(eigv[n]+ abs(eigs[:,n])**2*2e-23)*y_scaling
,label="Eigenstate " +str(n))
plt.legend()
plt.xlabel(r"$\Phi_1 / \Phi_o$")
plt.ylabel(r"Energy in GHz$\cdot$h")
plt.xticks(np.linspace(-2*np.pi, 2*np.pi, 5)*circuit.phi_0 ,
[r'$-2\pi$', r'$-\pi$',r'$0$',r'$\pi$',r'$2\pi$'])
plt.xlim(-2.5*np.pi*circuit.phi_0, 2.5*np.pi*circuit.phi_0)
plt.ylim(-5,20)
plt.show()
###Output
_____no_output_____
###Markdown
The harmonicity of the spectrum increased due to the change of the external flux. This fact will also be reflected when calculating the anahrmonicity.
###Code
circuit.get_spectrum_anharmonicity()
###Output
_____no_output_____
###Markdown
We can further study the dependence of the lowest eigenenergies as a function of the external flux.Let's start by calculating the eigenvalues for differnt values of the external flux. Note that the order of the `paramter_values` list stays the same.
###Code
eigenvalues = []
phi_ext_list = np.linspace(-2*np.pi*circuit.phi_0,2*np.pi*circuit.phi_0,100)
for phi_ext in phi_ext_list:
circuit.get_numerical_hamiltonian(401, parameter_values=[False, EJ, False, phi_ext])
eigv, eigs = circuit.get_eigensystem(5)
eigenvalues.append(eigv)
###Output
_____no_output_____
###Markdown
Finally, we can plot the parameter sweep.
###Code
plt.plot(phi_ext_list, np.array(eigenvalues)*y_scaling)
plt.xlabel(r"External Flux $ \tilde{\Phi}_{010} / \Phi_o$")
plt.ylabel(r"Energy in GHz$\cdot$h")
plt.xticks(np.linspace(-2*np.pi, 2*np.pi, 5)*circuit.phi_0 ,
[r'$-2\pi$', r'$-\pi$',r'$0$',r'$\pi$',r'$2\pi$'])
plt.show()
###Output
_____no_output_____
###Markdown
We are not restricted in analyzing only the impact of the external flux, as this variation can be done for any parameter in `paramter_values`. Charge offsetThe offset of a node charge can be tuned by an external source. To work with charge offsets, the corresponding nodes have to be specified when initializing a `CircuitQ` instance. As a demonstration let's choose node 1 to have a charge offset.
###Code
circuit = cq.CircuitQ(graph, offset_nodes=[1])
circuit.h
###Output
_____no_output_____
###Markdown
The node charge $q_1$ is now shifted by an offset charge $\tilde{q}_1$. This new variable now also appears in in the parameter list:
###Code
circuit.h_parameters
###Output
_____no_output_____
###Markdown
Finally, we can also study the impact of the external charge on the spectrum of the exemplary circuit.
###Code
eigenvalues = []
phi_ext = np.pi*circuit.phi_0
q_off_list = np.linspace(-10*2*circuit.e,10*2*circuit.e,30)
for q_off in q_off_list:
circuit.get_numerical_hamiltonian(401,
parameter_values=[False, EJ, False, phi_ext, q_off])
eigv, eigs = circuit.get_eigensystem(5)
eigenvalues.append(eigv)
plt.plot(q_off_list/(2*circuit.e), np.array(eigenvalues)*y_scaling)
plt.xlabel(r"Charge offset $\tilde{q}_1 / (2e)$")
plt.ylabel(r"Energy in GHz$\cdot$h")
plt.show()
###Output
_____no_output_____
###Markdown
Tutorial Import modules This tutorial describes how you can detect sequences in multi-neuronal spiking activity data.
###Code
import numpy as np
import pandas as pd
from scipy import sparse
import matplotlib.pyplot as plt
from spykesim import editsim
from gendata import gendata
from scipy.ndimage.filters import gaussian_filter1d
from tqdm import tqdm
%matplotlib inline
import spykesim
df, binmat = gendata()
plt.plot(df.spiketime, df.neuronid, "b.")
plt.title("Sample data")
plt.xlabel("Time(s)")
plt.ylabel("Neuron#")
###Output
_____no_output_____
###Markdown
If you would like to calculate the edit similarity between two partial neuronal activity data, you may use the function `_sim`: Edit similarity calculation
###Code
a = 0.001
es = editsim.FromBinMat(alpha=a)
mat1 = binmat[:, 0:200].toarray()
mat2 = binmat[:, 200:400].toarray()
sim, _, _, _ = es._sim(mat1, mat2)
print(f"The edit similarity between mat1 and mat2 is {sim}")
###Output
The edit similarity between mat1 and mat2 is 19.64182094031858
###Markdown
Or, if you want to divide the original neuronal data into the same length of segments and calculate the similarity matrix, you can do like follows:
###Code
window = 100 # ms
es.gensimmat(
binmat, window=window, slide=window, numband=4, bandwidth=5, minhash=False
)
plt.imshow(es.simmat)
###Output
_____no_output_____
###Markdown
ClusteringYou can clustering the segments using the simirality matrix:
###Code
es.clustering(min_cluster_size=10)
es.cluster_labels
es.gen_profile()
###Output
100%|██████████| 59/59 [00:01<00:00, 44.68it/s]
100%|██████████| 59/59 [00:01<00:00, 43.46it/s]
100%|██████████| 59/59 [00:01<00:00, 43.24it/s]
100%|██████████| 59/59 [00:01<00:00, 43.52it/s]
100%|██████████| 17/17 [00:00<00:00, 101.91it/s]
100%|██████████| 17/17 [00:00<00:00, 99.20it/s]
100%|██████████| 59/59 [00:01<00:00, 48.48it/s]
100%|██████████| 59/59 [00:01<00:00, 49.12it/s]
###Markdown
Below shows profiles that represents repeatedly-appeared spatiotemporal structures in the data.
###Code
plt.imshow(es.profiles[0])
plt.colorbar()
plt.imshow(es.profiles[1])
plt.colorbar()
plt.imshow(es.profiles[3])
plt.colorbar()
###Output
_____no_output_____
###Markdown
Detect Sequences
###Code
sequence_dict = es.detect_sequences(cluster_id=0, th=5)
for i, (idx, mat) in enumerate(sequence_dict.items()):
plt.imshow(mat, interpolation="nearest")
plt.title(f"Detected time: {idx}ms")
plt.colorbar()
plt.show()
if i >= 5:
break
sequence_dict = es.detect_sequences(cluster_id=3, th=5)
for i, (idx, mat) in enumerate(sequence_dict.items()):
plt.imshow(mat, interpolation="nearest")
plt.title(f"Detected time: {idx}ms")
plt.colorbar()
plt.show()
if i >= 5:
break
###Output
_____no_output_____
###Markdown
.. image:: ../artwork/cicada.png :width: 200px :align: right.. _tutorial:Tutorial========The Millionaires' Dilemma-------------------------Imagine a pair of millionaires who want to know who has the largest fortune, yet neither wants to reveal the exact size of their fortune to the other (or to anyone else). These may seem like mutually-exclusive goals, yet it turns out that we can arrive at the correct answer without revealing either of the millionaires' secrets. Using secure multiparty computation (MPC), the millionaires can cooperatively compute which fortune is largest in such a way that both learn the result, yet neither learns the other's private information. Cicada provides a collection of components that can be combined in flexible ways to create MPC programs. This tutorial will introduce you to the basic building blocks of a Cicada program, and solve the millionaires' dilemma. Ready? Let's get started!The Basics----------An important point to understand fully is that - just as the name says - secure multiparty computation involves multiple cooperating parties, which we will refer to as *players* throughout this documentation. In computer science terms, each player is a separate *process*, typically running on a separate *host*. In other words, you should think of an MPC program as a single set of instructions that run in parallel across multiple computers, communicating and coordinating as they execute.Writing programs this way can feel a little weird until you become accustomed to it. Fortunately, the high performance computing (HPC) community has been writing programs this way for decades, and Cicada brings that expertise to MPC. If you're familiar with writing software using popular HPC tools like `MPI `_, you'll be right at home in Cicada. If not, don't worry! We'll explain how it all works as we go. Before we begin, we'll setup Python's builtin logging functionality, which Cicada uses to provide feedback on running MPC programs:
###Code
import logging
logging.basicConfig(level=logging.INFO)
###Output
_____no_output_____
###Markdown
Next, we need to setup two players. That means we're going to have to start two processes, each running the same program, and the processes will need to communicate with each other. In Cicada, all communication is handled through *communicators*, which coordinate and pass messages between players. What this means is that every Cicada program must do (at a minimum) two things:* Start a collection of processes, one per player.* Create and initialize a communicator.For this tutorial, we'll be using :any:`NNGCommunicator`, which uses the Python `pynng `_ module and TCP networking for communication... note:: Advanced Cicada users can substitute their own custom communicators for use with alternate networking libraries or hardware.Cicada provides several means to simplify an otherwise tricky bootstrapping process. For example, :any:`NNGCommunicator` can run both processes on the local machine and setup the communicator for us, which is ideal for development. Let's see how it works:
###Code
import cicada.communicator
@cicada.communicator.NNGCommunicator.run(world_size=2)
def main(communicator):
print("Hello, World!")
main()
###Output
INFO:cicada.communicator.nng:Player 0 rendezvous with tcp://127.0.0.1:57103 from tcp://127.0.0.1:57103.
INFO:cicada.communicator.nng:Player 1 rendezvous with tcp://127.0.0.1:57103 from tcp://127.0.0.1:57104.
INFO:cicada.communicator.nng:Comm 'world' player 0 communicator ready.
INFO:cicada.communicator.nng:Comm 'world' player 1 communicator ready.
###Markdown
There's a lot to unpack here, so we'll go over things in detail. First, we define a function named `main`, which prints a familiar message:: def main(communicator): print("Hello, World!") Note that `main` takes a single `communicator` argument, yet we aren't providing one when we call it:: main() Where does the communicator come from? That's the job of the :any:`NNGCommunicator.run` decorator:: @cicada.communicator.NNGCommunicator.run(world_size=2)If you aren't familiar, a Python decorator is a function that "wraps" another function, altering the wrapped function's behavior when called. In this case, :any:`NNGCommunicator.run` is doing a lot of work for us: it starts two separate processes (specified using the decorator's `world_size` parameter), sets-up an instance of :any:`NNGCommunicator`, and passes the communicator as the first argument to `main`.We can see all of this as it happens by examining the log output. First, the individual player processes wait to establish communications with each other:.. code-block:: bash INFO:cicada.communicator.nng:Player 0 rendezvous with tcp://127.0.0.1:65257 from tcp://127.0.0.1:65257. INFO:cicada.communicator.nng:Player 1 rendezvous with tcp://127.0.0.1:65257 from tcp://127.0.0.1:65258. Next, we see a confirmation that communications have been established:.. code-block:: bash INFO:cicada.communicator.nng:Comm 'world' player 0 communicator ready. INFO:cicada.communicator.nng:Comm 'world' player 1 communicator ready.Then, we see the output from the decorated function ... in this case, two copies of "Hello, World!" (one from each player):.. code-block:: bash Hello, World!Hello, World! The outputs appear on the same line because they're being printed by two separate processes running at the same time - if you run this notebook yourself, the output may look different, depending on random quirks of timing. We'll see in a moment how to prevent processes from stepping on each others' outputs.Once the decorated function ends, the communicators are automatically cleaned-up:.. code-block:: bash INFO:cicada.communicator.nng:Comm 'world' player 0 communicator freed. INFO:cicada.communicator.nng:Comm 'world' player 1 communicator freed.Finally, we see the value (if any) returned by each player from the decorated function:.. code-block:: bash INFO:cicada.communicator.nng:Player 0 returned: None INFO:cicada.communicator.nng:Player 1 returned: None Since our `main` function doesn't have a return statement, the returned value is :any:`None`.So here's what we've done so far: we defined a function named `main`, although the name isn't important - we could decorate any function and use it run an MPC program. Because we wrapped our function using the decorator provided by :any:`NNGCommunicator`, calling the function causes the function body to be run multiple times in separate processes (two in this case), using a communicator automatically created for us. Be sure you understand these steps before proceeding. Logging-------As we saw above, when multiple processes print to stdout at the same time, the results can step on each other. This is a significant problem when we start doing real MPC computation and need to debug programs and print results. Let's add Cicada's :any:`Logger` to the current example to tidy things up:
###Code
import cicada.logging
@cicada.communicator.NNGCommunicator.run(world_size=2)
def main(communicator):
log = cicada.logging.Logger(logger=logging.getLogger(), communicator=communicator)
log.info("Hello, World!")
main()
###Output
INFO:cicada.communicator.nng:Player 0 rendezvous with tcp://127.0.0.1:57120 from tcp://127.0.0.1:57120.
INFO:cicada.communicator.nng:Player 1 rendezvous with tcp://127.0.0.1:57120 from tcp://127.0.0.1:57121.
INFO:cicada.communicator.nng:Comm 'world' player 0 communicator ready.
INFO:cicada.communicator.nng:Comm 'world' player 1 communicator ready.
INFO:root:Hello, World!
INFO:root:Hello, World!
INFO:cicada.communicator.nng:Comm 'world' player 1 communicator freed.
INFO:cicada.communicator.nng:Comm 'world' player 0 communicator freed.
INFO:cicada.communicator.nng:Player 0 returned: None
INFO:cicada.communicator.nng:Player 1 returned: None
###Markdown
Now, the output messages are nicely printed on separate lines. Cicada's :any:`Logger` wraps a standard Python logger with a communicator, using the latter to coordinate among the players so that only one player generates output at a time.Next, it would be especially useful to know which message belongs to which player. In Cicada, each player has a zero-based integer identifier referred to as its `rank`, which we can use in our message:
###Code
@cicada.communicator.NNGCommunicator.run(world_size=2)
def main(communicator):
log = cicada.logging.Logger(logger=logging.getLogger(), communicator=communicator)
log.info(f"Hello from player {communicator.rank}!")
main()
###Output
INFO:cicada.communicator.nng:Player 0 rendezvous with tcp://127.0.0.1:57135 from tcp://127.0.0.1:57135.
INFO:cicada.communicator.nng:Player 1 rendezvous with tcp://127.0.0.1:57135 from tcp://127.0.0.1:57136.
INFO:cicada.communicator.nng:Comm 'world' player 0 communicator ready.
INFO:cicada.communicator.nng:Comm 'world' player 1 communicator ready.
INFO:root:Hello from player 0!
INFO:root:Hello from player 1!
INFO:cicada.communicator.nng:Comm 'world' player 0 communicator freed.
INFO:cicada.communicator.nng:Comm 'world' player 1 communicator freed.
INFO:cicada.communicator.nng:Player 0 returned: None
INFO:cicada.communicator.nng:Player 1 returned: None
###Markdown
Notice that the player's rank is accessed using the communicator (a concrete example of how communicators provide the organization for a group of players), and that the logger prints messages to the console in rank order. As you will see in the examples that follow, the player's rank is one of the most-used pieces of information in an MPC program - we will use rank extensively to change the players' behavior based on their identity, including targeting communications and MPC operations to specific players based on their rank.To round things out, a good MPC program should be written in such a way that it can be run using any number of players. Instead of hard-coding the number of players into your programs, you should use the communicator's `world_size` attribute to determine at runtime how many players are participating in the computation, and adjust player behavior as-needed. Note that the value of the `world_size` attribute will always match the value of the `world_size` parameter to :any:`NNGCommunicator.run`:
###Code
@cicada.communicator.NNGCommunicator.run(world_size=2)
def main(communicator):
log = cicada.logging.Logger(logger=logging.getLogger(), communicator=communicator)
log.info(f"Hello from player {communicator.rank} of {communicator.world_size}!")
main()
###Output
INFO:cicada.communicator.nng:Player 0 rendezvous with tcp://127.0.0.1:57150 from tcp://127.0.0.1:57150.
INFO:cicada.communicator.nng:Player 1 rendezvous with tcp://127.0.0.1:57150 from tcp://127.0.0.1:57151.
INFO:cicada.communicator.nng:Comm 'world' player 0 communicator ready.
INFO:cicada.communicator.nng:Comm 'world' player 1 communicator ready.
INFO:root:Hello from player 0 of 2!
INFO:root:Hello from player 1 of 2!
INFO:cicada.communicator.nng:Comm 'world' player 0 communicator freed.
INFO:cicada.communicator.nng:Comm 'world' player 1 communicator freed.
INFO:cicada.communicator.nng:Player 0 returned: None
INFO:cicada.communicator.nng:Player 1 returned: None
###Markdown
Encoders--------One of the trickiest topics when working with MPC is managing *encodings* - as users of MPC, we typically want to perform computation on real numbers, but most MPC protocols require integer operands with special properties - for example, "only positive integers", or "only integers mod :math:`p` where :math:`p` is a finite number".To manage this, Cicada provides *encoders*, which are objects that convert real numbers between encoded and unencoded representations. Every encoder has, at a minimum, a method called `encode` and a method called `decode` (encoders typically have additional attributes and operations that are encoding-specific). To see how they work, let's start by creating an instance of :any:`FixedFieldEncoder` (note that we don't need separate processes or a communicator to work with encoders):
###Code
import cicada.encoder
encoder = cicada.encoder.FixedFieldEncoder()
encoder
###Output
_____no_output_____
###Markdown
:any:`FixedFieldEncoder` encodes real values as integers in a field by reserving a fixed number of bits (16 bits by default) to store the fractional part of the original value. Now we'll create a real value to encode:
###Code
import numpy
value = numpy.array(numpy.pi)
value
###Output
_____no_output_____
###Markdown
.. note:: Cicada requires numpy arrays as arguments throughout the API. This greatly simplifies application and implementation code by eliminating error-prone loops, and provides important speedups. This is why we initialized our value using :any:`numpy.array` in the example above, even though it's a scalar value - you may not have known that numpy treats a scalar as "just another array", albeit one with zero dimensions, size equal to `1`, and shape equal to an empty :any:`tuple`. With our value initialized, we can encode it:
###Code
encoded_value = encoder.encode(value)
encoded_value
###Output
_____no_output_____
###Markdown
The encoder turns the unencoded array of real values into an array of integers with the same shape that encode the original values. 205887 may seem like an unlikely way to store the value of $\pi$, so let's try decoding it to see if the decoded value matches the original:
###Code
decoded_value = encoder.decode(encoded_value)
decoded_value
###Output
_____no_output_____
###Markdown
You can see that the result is a value that's *pretty close* to the original, but not an exact match. This is because the default 16 bits of precision used by FixedFieldEncoder to represent fractions can only approximate some values (of course, the original value was itself a finite approximation of $\pi$, so this shouldn't bother you too much). For many computations 16 bits of fractional precision is more than enough, but if you need more (or less) precision, you can create an encoder with custom parameters. For example, if we double the number of fractional bits:
###Code
encoder = cicada.encoder.FixedFieldEncoder(precision=32)
encoder.decode(encoder.encode(value))
###Output
_____no_output_____
###Markdown
... we get a much closer match to the original value.
###Code
Secure Multiparty Computation (MPC)
-----------------------------------
OK, with the preliminaries out of the way, let's do some MPC! Recall that we have two players (the millionaires), that each has a secret (their fortune), and they wish to identify which secret is the largest, but without revealing the secrets to each other.
The key technique provided by MPC to accomplish this is *secret sharing*, where each secret is split into pieces called *secret shares*, and the shares are distributed to the other players. Because MPC provides *protocols* that allow players to collaboratively perform mathematical operations on shares, and because it is provably impossible to reconstruct a secret unless a player has obtained all of the necessary shares, the players in our computation can perform arithmetic and logical operations on values without knowing what they are!
There is more than one way to generate secret shares, and there are many protocols that have been developed to manipulate them. For this tutorial we're going to focus on *additive secret sharing*, where the shares of a secret are randomly-chosen numbers that reveal the secret value when summed. Thus, a player can only learn (or reveal) the value of a secret if they have access to every share, in order to add them all together.
Let's see what this looks like in Cicada. To create and manipulate additive secret shares, we will create an instance of :any:`AdditiveProtocol`. In the following example, we create the protocol, and use it to create a set of secret shares that are distributed to all players:
###Output
_____no_output_____
###Markdown
import [email protected](world_size=2)def main(communicator): log = cicada.logging.Logger(logger=logging.getLogger(), communicator=communicator) protocol = cicada.additive.AdditiveProtocol(communicator=communicator, seed=1234) secret = numpy.array(numpy.pi) if communicator.rank == 0 else None log.info(f"Player {communicator.rank} secret: {secret}") share = protocol.share(src=0, secret=protocol.encoder.encode(secret), shape=()) log.info(f"Player {communicator.rank} share: {share}")main()
###Code
Let's dig into this. Our first new addition is the creation of the protocol::
import cicada.additive
...
protocol = cicada.additive.AdditiveProtocol(communicator=communicator, seed=1234)
Much like the :any:`Logger` object created previously, we pass the communicator to our protocol object because it needs to communicate among players to implement its functionality.
Next, we initialize a secret value known only to player 0::
secret = numpy.array(numpy.pi) if communicator.rank == 0 else None
Note that, as we described earlier, we're using the player rank to change the behavior of our program depending on which player is executing it. In this case, player 0 sets the value of the `secret` variable to :math:`\pi` while the other player leaves it uninitialized.
.. note::
"But wait!" you may be thinking ... how is this value a "secret" if every player is executing the same code!? The other player may not be initializing the variable, but the code is the same everywhere, isn't it? Doesn't that mean that on some level every player "knows" the secret?
You are absolutely correct. In this example the secret is embedded into the program code as a literal value, which means it isn't really a secret. We do this frequently to keep these examples succinct. Whenever you see code like this that embeds a secret into code, you should have it in the back of your mind that a "real" program with "real" secrets would supply them in a privacy-preserving way, whether by prompting a human to enter a secret value, loading data from a player-specific file or database, reading a sensor that only that player has access to, etc.
The same goes for the logging output ... in real life, we wouldn't log secret values to stdout, where anyone can see them! In this case, we do it for strictly pedagogical purposes.
We can see from the log output that only player 0 is supplying a secret value:
.. code-block:: bash
INFO:root:Player 0 secret: 3.141592653589793
INFO:root:Player 1 secret: None
Next, player 0 shares the secret value with the other player using additive secret sharing::
share = protocol.share(src=0, secret=protocol.encoder.encode(secret), shape=())
Again, there's a lot of detail here to unpack. Remember that every player is running the code in parallel - :any:`AdditiveProtocol.share` is an example of a *collective operation*, one that **must** be called by every player and, except for the `secret` argument, must be called with the same arguments by every player. This is a common pattern in Cicada (and MPI, if you're familiar with HPC programming). In this case, the arguments indicate that player 0 will provide the secret (`src=0`), and that the secret is a scalar value (`shape=()` ... remember that Cicada can work with arrays of any shape as secrets). Every player has to provide a value for `secret`, but the value is ignored everywhere except for player 0.
Note that the secret is encoded using an encoder provided by the protocol - this is because :any:`AdditiveProtocol` can only work with one type of encoder, which it creates for itself, so you *must* use the protocol's encoder when encoding and decoding values.
The protocol object takes the encoded secret provided by player 0, creates secret shares, and distributes them to all players, where they become the return value from :any:`AdditiveProtocol.share`. Note from the log output that *both* players receive a share of the secret, including the player that provided it:
.. code-block:: bash
INFO:root:Player 0 share: cicada.additive.AdditiveArrayShare(storage=5819376713855054490)
INFO:root:Player 1 share: cicada.additive.AdditiveArrayShare(storage=1452817035210155571)
The values of the shares are random numbers from the encoder's field that when summed with the encoder's modulo arithmetic will equal the encoded representation of the original secret.
To see that this is so, let's re-run the experiment, but add a final step where the players combine their shares to reveal the original secret:
###Output
_____no_output_____
###Markdown
@cicada.communicator.NNGCommunicator.run(world_size=2)def main(communicator): log = cicada.logging.Logger(logger=logging.getLogger(), communicator=communicator) protocol = cicada.additive.AdditiveProtocol(communicator=communicator, seed=1234) secret = numpy.array(numpy.pi) if communicator.rank == 0 else None log.info(f"Player {communicator.rank} secret: {secret}") share = protocol.share(src=0, secret=protocol.encoder.encode(secret), shape=()) log.info(f"Player {communicator.rank} share: {share}") revealed = protocol.encoder.decode(protocol.reveal(share)) log.info(f"Player {communicator.rank} revealed: {revealed}") main()
###Code
Now, when every player passes their share of the secret to :any:`AdditiveProtocol.reveal` (which is another collective operation) the result is the encoded representation of the original secret. When we decode it using the encoder that the protocol object provides, we get the expected result:
.. code-block:: bash
INFO:root:Player 0 revealed: 3.1415863037109375
INFO:root:Player 1 revealed: 3.1415863037109375
(Remember that the value isn't an exact match because the encoding-decoding round trip is lossy.)
Now that we know how to share and reveal secrets, we're finally ready to do some computation. Let's get our millionaires an answer to their question. The trick is that, in addition to using :any:`AdditiveProtocol` to create and reveal shares, we can use it to perform mathematical operations on shares, which return shares of the result. For example, we can perform a less-than comparison on secret-shared values, and when we do so, we get back a share of the answer. If we reveal *that* to the players, then they will know which fortune is larger, without knowing what the original secrets were. Let's see what it looks like:
###Output
_____no_output_____
###Markdown
@cicada.communicator.NNGCommunicator.run(world_size=2)def main(communicator): log = cicada.logging.Logger(logger=logging.getLogger(), communicator=communicator) protocol = cicada.additive.AdditiveProtocol(communicator=communicator, seed=1234) if communicator.rank == 0: fortune = numpy.array(10000000) elif communicator.rank == 1: fortune = numpy.array(12000000) log.info(f"Player {communicator.rank} fortune: {fortune}") fortune = protocol.encoder.encode(fortune) share0 = protocol.share(src=0, secret=fortune, shape=()) share1 = protocol.share(src=1, secret=fortune, shape=()) if protocol.reveal(protocol.less(share0, share1)): winner = 1 else: winner = 0 log.info(f"Winner revealed to player {communicator.rank}: {winner}") main()
###Code
Note that both players provide a secret now, not just player 0, so that the `fortune` variable contains a different value on each process. Also, the same `fortune` variable is passed as the secret value to :any:`AdditiveProtocol.share` twice, once for each player. Remember that this works because the secret value is ignored by all players except the player specified in the `src` parameter.
We use :any:`AdditiveProtocol.less` to compare the shares of the two values, which returns a share of the result - either a share of 0 if the comparison is false, or a share of 1 if it is true. Because these are boolean values rather then numbers, they aren't encoded, so no decoding is necessary.
Looking at the results, we can verify that player 1 does have the largest fortune, so our millionaires finally have their answer!
Although this program works fine, it *is* hardcoded in such a way that it will *only* work with two players. As we mentioned above, it's usually a good idea to write MPC programs to work with any number of players - in addition to being more flexible, this approach can often make code more compact, and easier to understand. Let's rewrite our example to be agnostic about the number of players, and while we're at it, let's have every player get their input from a file, so that we're no longer embedding secrets in the code as literals:
###Output
_____no_output_____
###Markdown
@cicada.communicator.NNGCommunicator.run(world_size=4)def main(communicator): log = cicada.logging.Logger(logger=logging.getLogger(), communicator=communicator) protocol = cicada.additive.AdditiveProtocol(communicator=communicator, seed=1234) fortune = numpy.loadtxt(f"../examples/millionaire-{communicator.rank}.txt") winner = None winning_share = protocol.share(src=0, secret=protocol.encoder.zeros(shape=()), shape=()) for rank in communicator.ranks: fortune_share = protocol.share(src=rank, secret=protocol.encoder.encode(fortune), shape=()) less_share = protocol.less(fortune_share, winning_share) less = protocol.reveal(less_share) if not less: winner = rank winning_share = fortune_share log.info(f"Winner revealed to player {communicator.rank}: {winner}") main()
###Code
If we examine the contents of the individual players' files, we see that the choice of winner is correct:
###Output
_____no_output_____
###Markdown
fortunes = []for rank in range(4): fortunes.append(numpy.loadtxt(f"../examples/millionaire-{rank}.txt"))for rank, fortune in enumerate(fortunes): print(f"Player {rank} fortune: {fortune:>10}")
###Code
.. note::
Once again, you may be questioning how "secret" these files are - couldn't any of the players read the other players' secrets from the files? That's certainly true in this case, where all four players are running on the same machine. Just keep in mind that to be truly secure, an MPC program needs to be run by individual players on individual machines *that only those players control* - which is the only way to guarantee that secrets remain truly private.
That's it for this tutorial! Of course, real computation requires more than just comparisons - see the :doc:`user-guide` for individual articles with detailed topics on how to use Cicada for addition, multiplication, and more.
###Output
_____no_output_____
###Markdown
The sqlite-utils tutorial[sqlite-utils](https://sqlite-utils.datasette.io/en/stable/python-api.html) is a Python library (and [command-line tool](https://sqlite-utils.datasette.io/en/stable/cli.html) for quickly creating and manipulating SQLite database files.This tutorial will show you how to use the Python library to manipulate data. InstallationTo install the library, run: pip install sqlite-utilsYou can run this in a Jupyter notebook cell by executing: %pip install sqlite-utils Or use `pip install -U sqlite-utils` to ensure you have upgraded to the most recent version.
###Code
%pip install -U sqlite_utils
import sqlite_utils
###Output
_____no_output_____
###Markdown
You can use the library with a database file on disk by running: db = sqlite_utils.Database("path/to/my/database.db")In this tutorial we will use an in-memory database. This is a quick way to try out new things, though you should note that when you close the notebook the data store in the in-memory database will be lost.
###Code
db = sqlite_utils.Database(memory=True)
db
###Output
_____no_output_____
###Markdown
Creating a tableWe are going to create a new table in our database called `creatures` by passing in a Python list of dictionaries.`db[name_of_table]` will access a database table object with that name.Inserting data into that table will create it if it does not already exist.
###Code
db["creatures"].insert_all([{
"name": "Cleo",
"species": "dog",
"age": 6
}, {
"name": "Lila",
"species": "chicken",
"age": 0.8,
}, {
"name": "Bants",
"species": "chicken",
"age": 0.8,
}])
###Output
_____no_output_____
###Markdown
Let's grab a `table` reference to the new creatures table:
###Code
table = db["creatures"]
###Output
_____no_output_____
###Markdown
`sqlite-utils` automatically creates a table schema that matches the keys and data types of the dictionaries that were passed to `.insert_all()`.We can see that schema using `table.schema`:
###Code
print(table.schema)
###Output
CREATE TABLE [creatures] (
[name] TEXT,
[species] TEXT,
[age] FLOAT
)
###Markdown
Accessing dataThe `table.rows` property lets us loop through the rows in the table, returning each one as a Python dictionary:
###Code
for row in table.rows:
print(row)
###Output
{'name': 'Cleo', 'species': 'dog', 'age': 6.0}
{'name': 'Lila', 'species': 'chicken', 'age': 0.8}
{'name': 'Bants', 'species': 'chicken', 'age': 0.8}
###Markdown
The `db.query(sql)` method can be used to execute SQL queries and return the results as dictionaries:
###Code
list(db.query("select * from creatures"))
###Output
_____no_output_____
###Markdown
Or in a loop:
###Code
for row in db.query("select name, species from creatures"):
print(f'{row["name"]} is a {row["species"]}')
###Output
Cleo is a dog
Lila is a chicken
Bants is a chicken
###Markdown
SQL parametersYou can run a parameterized query using `?` as placeholders and passing a list of variables. The variables you pass will be correctly quoted, protecting your code from SQL injection vulnerabilities.
###Code
list(db.query("select * from creatures where age > ?", [1.0]))
###Output
_____no_output_____
###Markdown
As an alternative to question marks we can use `:name` parameters and feed in the values using a dictionary:
###Code
list(db.query("select * from creatures where species = :species", {"species": "chicken"}))
###Output
_____no_output_____
###Markdown
Primary keysWhen we created this table we did not specify a primary key. SQLite automatically creates a primary key called `rowid` if no other primary key is defined.We can run `select rowid, * from creatures` to see this hidden primary key:
###Code
list(db.query("select rowid, * from creatures"))
###Output
_____no_output_____
###Markdown
We can also see that using `table.pks_and_rows_where()`:
###Code
for pk, row in table.pks_and_rows_where():
print(pk, row)
###Output
1 {'rowid': 1, 'name': 'Cleo', 'species': 'dog', 'age': 6.0}
2 {'rowid': 2, 'name': 'Lila', 'species': 'chicken', 'age': 0.8}
3 {'rowid': 3, 'name': 'Bants', 'species': 'chicken', 'age': 0.8}
###Markdown
Let's recreate the table with our own primary key, which we will call `id`.`table.drop()` drops the table:
###Code
table.drop()
table
###Output
_____no_output_____
###Markdown
We can see a list of tables in the database using `db.tables`:
###Code
db.tables
###Output
_____no_output_____
###Markdown
We'll create the table again, this time with an `id` column.We use `pk="id"` to specify that the `id` column should be treated as the primary key for the table:
###Code
db["creatures"].insert_all([{
"id": 1,
"name": "Cleo",
"species": "dog",
"age": 6
}, {
"id": 2,
"name": "Lila",
"species": "chicken",
"age": 0.8,
}, {
"id": 3,
"name": "Bants",
"species": "chicken",
"age": 0.8,
}], pk="id")
print(table.schema)
###Output
CREATE TABLE [creatures] (
[id] INTEGER PRIMARY KEY,
[name] TEXT,
[species] TEXT,
[age] FLOAT
)
###Markdown
Inserting more recordsWe can call `.insert_all()` again to insert more records. Let's add two more chickens.
###Code
table.insert_all([{
"id": 4,
"name": "Azi",
"species": "chicken",
"age": 0.8,
}, {
"id": 5,
"name": "Snowy",
"species": "chicken",
"age": 0.9,
}], pk="id")
list(table.rows)
###Output
_____no_output_____
###Markdown
Since the `id` column is an integer primary key, we can insert a record without specifying an ID and one will be automatically added.Since we are only adding one record we will use `.insert()` instead of `.insert_all()`.
###Code
table.insert({"name": "Blue", "species": "chicken", "age": 0.9})
###Output
_____no_output_____
###Markdown
We can use `table.last_pk` to see the ID of the record we just added.
###Code
table.last_pk
###Output
_____no_output_____
###Markdown
Here's the full list of rows again:
###Code
list(table.rows)
###Output
_____no_output_____
###Markdown
If you try to add a new record with an existing ID, you will get an `IntegrityError`:
###Code
table.insert({"id": 6, "name": "Red", "species": "chicken", "age": 0.9})
###Output
_____no_output_____
###Markdown
You can use `replace=True` to replace the matching record with a new one:
###Code
table.insert({"id": 6, "name": "Red", "species": "chicken", "age": 0.9}, replace=True)
list(table.rows)
###Output
_____no_output_____
###Markdown
Updating a recordWe will rename that row back to `Blue`, this time using the `table.update(pk, updates)` method:
###Code
table.update(6, {"name": "Blue"})
list(db.query("select * from creatures where id = ?", [6]))
###Output
_____no_output_____
###Markdown
Extracting one of the columns into another tableOur current table has a `species` column with a string in it - let's pull that out into a separate table.We can do that using the [table.extract() method](https://sqlite-utils.datasette.io/en/stable/python-api.htmlextracting-columns-into-a-separate-table).
###Code
table.extract("species")
###Output
_____no_output_____
###Markdown
We now have a new table called `species`, which we can see using the `db.tables` method:
###Code
db.tables
###Output
_____no_output_____
###Markdown
Our creatures table has been modified - instead of a `species` column it now has `species_id` which is a foreign key to the new table:
###Code
print(db["creatures"].schema)
print(list(db["creatures"].rows))
###Output
CREATE TABLE "creatures" (
[id] INTEGER PRIMARY KEY,
[name] TEXT,
[species_id] INTEGER,
[age] FLOAT,
FOREIGN KEY([species_id]) REFERENCES [species]([id])
)
[{'id': 1, 'name': 'Cleo', 'species_id': 1, 'age': 6.0}, {'id': 2, 'name': 'Lila', 'species_id': 2, 'age': 0.8}, {'id': 3, 'name': 'Bants', 'species_id': 2, 'age': 0.8}, {'id': 4, 'name': 'Azi', 'species_id': 2, 'age': 0.8}, {'id': 5, 'name': 'Snowy', 'species_id': 2, 'age': 0.9}, {'id': 6, 'name': 'Blue', 'species_id': 2, 'age': 0.9}]
###Markdown
The new `species` table has been created and populated too:
###Code
print(db["species"].schema)
print(list(db["species"].rows))
###Output
CREATE TABLE [species] (
[id] INTEGER PRIMARY KEY,
[species] TEXT
)
[{'id': 1, 'species': 'dog'}, {'id': 2, 'species': 'chicken'}]
###Markdown
We can use a join SQL query to combine data from these two tables:
###Code
list(db.query("""
select
creatures.id,
creatures.name,
creatures.age,
species.id as species_id,
species.species
from creatures
join species on creatures.species_id = species.id
"""))
###Output
_____no_output_____
###Markdown
Getting started with `tess-locator`The `tess-locator` package is a fast and user-friendly tool to query TESS pixel coordinates and FFI filenames in a fast way without requiring internet access. It builds upon the existing [`tess-point`](https://github.com/christopherburke/tess-point) and `tess-cloud` packages. Example 1: Converting celestial to pixel coordinatesThe main feature of the package is the `locate()` function, which allows positions in the sky to be converted into TESS pixel coordinates.You can enter the Simbad name of an object:
###Code
from tess_locator import locate
locate("Alpha Cen")
###Output
_____no_output_____
###Markdown
You can pass an optional `time` or `sector` parameter if you are only interested in observations obtained at a specific time:
###Code
locate("Alpha Cen", time="2019-04-28")
locate("Alpha Cen", sector=12)
###Output
_____no_output_____
###Markdown
In addition to passing names, you can locate a custom `SkyCoord` object containing exact coordinates:
###Code
from astropy.coordinates import SkyCoord
locate(SkyCoord(ra=60, dec=70, unit='deg'), sector=19)
###Output
_____no_output_____
###Markdown
The *locate()* function returns a list of `TessCoord` objects which can be accessed using standard list and attribute syntax:
###Code
crd = locate("Alpha Cen")[0]
crd.sector, crd.camera, crd.ccd, crd.column, crd.row
###Output
_____no_output_____
###Markdown
You can also access the coordinates as a Pandas DataFrame:
###Code
locate("Alpha Cen").to_pandas()
###Output
_____no_output_____
###Markdown
Example 2: Accessing FFI filenames for a pixel coordinateWhen you have obtained a `TessCoord` object, you can use it to obtain a list of the TESS Full Frame Images (FFIs) which covered the position:
###Code
crd.list_images()
###Output
_____no_output_____
###Markdown
You can query the image list for a specific time:
###Code
crd.list_images(time="2019-04-28 00:00:00")
###Output
_____no_output_____
###Markdown
You can access the image attributes using standard syntax:
###Code
img = crd.list_images(time="2019-04-28 00:00:00")[0]
img.sector, img.camera, img.ccd, img.time
###Output
_____no_output_____
###Markdown
You can also obtain the full URL of the image:
###Code
img.url
###Output
_____no_output_____
###Markdown
You can expert the image list as a Pandas DataFrame:
###Code
crd.list_images().to_pandas()
###Output
_____no_output_____
###Markdown
The full FFI file is often very big. For this reason, the `TessImage` class also provides convenience methods to access exactly those parts of an image you need.For example, the `read_header` method gives you fast access to a FITS header by reading only the first few kilobytes of the file from the cloud:
###Code
hdr = img.read_header(ext=0)
type(hdr)
###Output
_____no_output_____
###Markdown
You can download the WCS in a similar way:
###Code
wcs = img.read_wcs()
type(wcs)
###Output
_____no_output_____
###Markdown
You can download a specific part of the image using the `cutout` method:
###Code
img.cutout(column=200, row=300, shape=(3, 3)).flux
###Output
_____no_output_____
###Markdown
You do have the option to download the entire file using the `read` method, which returns an AstroPy FITS object:
###Code
img.read()
###Output
_____no_output_____
###Markdown
Example 3: Creating your own `TessCoord` objectNote that you don't have to use the `locate` function to obtain a `TessCoord` object. You can create your own as follows:
###Code
from tess_locator import TessCoord
TessCoord(sector=1, camera=1, ccd=1, column=50, row=70)
###Output
_____no_output_____
###Markdown
TutorialThis tutorial will teach the basics of how to use cirq. This tutorial will walk through how to use qubits, gates, and operations to create and simulate your first quantum circuit using cirq. It will briefly introduce devices, unitary matrices, decompositions, and optimizers.Note that this tutorial isn’t a quantum computing 101 tutorial, we assume familiarity of quantum computing at about the level of the textbook “Quantum Computation and Quantum Information” by Nielsen and Chuang.For more in-depth examples closer to those found in current work, check out our tutorials page. To begin, please follow the instructions for [installing Cirq](install.md).
###Code
!pip install cirq --quiet
###Output
_____no_output_____
###Markdown
QubitsThe first part of creating a quantum circuit is to define a set of qubits (also known as a quantum registers) to act on.Cirq has three main ways of defining qubits:* `cirq.NamedQubit`: used to label qubits by an abstract name* `cirq.LineQubit`: qubits labelled by number in a linear array * `cirq.GridQubit`: qubits labelled by two numbers in a rectangular lattice.Here are some examples of defining each type of qubit.
###Code
import cirq
# Using named qubits can be useful for abstract algorithms
# as well as algorithms not yet mapped onto hardware.
q0 = cirq.NamedQubit('source')
q1 = cirq.NamedQubit('target')
# Line qubits can be created individually
q3 = cirq.LineQubit(3)
# Or created in a range
# This will create LineQubit(0), LineQubit(1), LineQubit(2)
q0, q1, q2 = cirq.LineQubit.range(3)
# Grid Qubits can also be referenced individually
q4_5 = cirq.GridQubit(4,5)
# Or created in bulk in a square
# This will create 16 qubits from (0,0) to (3,3)
qubits = cirq.GridQubit.square(4)
###Output
_____no_output_____
###Markdown
There are also pre-packaged sets of qubits called [Devices](devices.md). These are qubits along with a set of rules of how they can be used. A `cirq.Device` can be used to apply adjacency rules and other hardware constraints to a quantum circuit. For our example, we will use the `cirq.google.Foxtail` device that comes with cirq. It is a 2x11 grid that mimics early hardware released by Google.
###Code
print(cirq.google.Foxtail)
###Output
(0, 0)───(0, 1)───(0, 2)───(0, 3)───(0, 4)───(0, 5)───(0, 6)───(0, 7)───(0, 8)───(0, 9)───(0, 10)
│ │ │ │ │ │ │ │ │ │ │
│ │ │ │ │ │ │ │ │ │ │
(1, 0)───(1, 1)───(1, 2)───(1, 3)───(1, 4)───(1, 5)───(1, 6)───(1, 7)───(1, 8)───(1, 9)───(1, 10)
###Markdown
Gates and OperationsThe next step is to use the qubits to create operations that can be used in our circuit. Cirq has two concepts that are important to understand here:* A `Gate` is an effect that can be applied to a set of qubits. * An `Operation` is a gate applied to a set of qubits.For instance, `cirq.H` is the quantum [Hadamard](https://en.wikipedia.org/wiki/Quantum_logic_gateHadamard_(H)_gate) and is a `Gate` object. `cirq.H(cirq.LineQubit(1))` is an `Operation` object and is the Hadamard gate applied to a specific qubit (line qubit number 1).Many textbook gates are included within cirq. `cirq.X`, `cirq.Y`, and `cirq.Z` refer to the single-qubit Pauli gates. `cirq.CZ`, `cirq.CNOT`, `cirq.SWAP` are a few of the common two-qubit gates. `cirq.measure` is a macro to apply a `MeasurementGate` to a set of qubits. You can find more, as well as instructions on how to creats your own custom gates, on the [Gates documentation](gates.ipynb) page.Many arithmetic operations can also be applied to gates. Here are some examples:
###Code
# Example gates
not_gate = cirq.CNOT
pauli_z = cirq.Z
# Using exponentiation to get square root gates
sqrt_x_gate = cirq.X**0.5
sqrt_iswap = cirq.ISWAP**0.5
# Some gates can also take parameters
sqrt_sqrt_y = cirq.YPowGate(exponent=0.25)
# Example operations
q0, q1 = cirq.LineQubit.range(2)
z_op = cirq.Z(q0)
not_op = cirq.CNOT(q0, q1)
sqrt_iswap_op = sqrt_iswap(q0, q1)
###Output
_____no_output_____
###Markdown
Circuits and MomentsWe are now ready to construct a quantum circuit. A `Circuit` is a collection of `Moment`s. A `Moment` is a collection of `Operation`s that all act during the same abstract time slice. Each `Operation` must have a disjoint set of qubits from the other `Operation`s in the `Moment`. A `Moment` can be thought of as a vertical slice of a quantum circuit diagram.Circuits can be constructed in several different ways. By default, cirq will attempt to slide your operation into the earliest possible `Moment` when you insert it.
###Code
circuit = cirq.Circuit()
# You can create a circuit by appending to it
circuit.append(cirq.H(q) for q in cirq.LineQubit.range(3))
# All of the gates are put into the same Moment since none overlap
print(circuit)
# We can also create a circuit directly as well:
print(cirq.Circuit(cirq.SWAP(q, q+1) for q in cirq.LineQubit.range(3)))
###Output
0: ───×───────────
│
1: ───×───×───────
│
2: ───────×───×───
│
3: ───────────×───
###Markdown
Sometimes, you may not want cirq to automatically shift operations all the way to the left. To construct a circuit without doing this, you can create the circuit moment-by-moment or use a different `InsertStrategy`, explained more in the [Circuit documentation](circuits.ipynb).
###Code
# Creates each gate in a separate moment.
print(cirq.Circuit(cirq.Moment([cirq.H(q)]) for q in cirq.LineQubit.range(3)))
###Output
0: ───H───────────
1: ───────H───────
2: ───────────H───
###Markdown
Circuits and DevicesOne important comnsideration when using real quantum devices is that there are often hardware constraints on the circuit. Creating a circuit with a `Device` will allow you to capture some of these requirements. These `Device` objects will validate the operations you add to the circuit to make sure that no illegal operations are added.Let's look at an example using the Foxtail device.
###Code
q0 = cirq.GridQubit(0, 0)
q1 = cirq.GridQubit(0, 1)
q2 = cirq.GridQubit(0, 2)
adjacent_op = cirq.CZ(q0, q1)
nonadjacent_op = cirq.CZ(q0, q2)
# This is an unconstrained circuit with no device
free_circuit = cirq.Circuit()
# Both operations are allowed:
free_circuit.append(adjacent_op)
free_circuit.append(nonadjacent_op)
print('Unconstrained device:')
print(free_circuit)
print()
# This is a circuit on the Foxtail device
# only adjacent operations are allowed.
print('Foxtail device:')
foxtail_circuit = cirq.Circuit(device=cirq.google.Foxtail)
foxtail_circuit.append(adjacent_op)
try:
# Not allowed, will throw exception
foxtail_circuit.append(nonadjacent_op)
except ValueError as e:
print('Not allowed. %s' % e)
###Output
Unconstrained device:
(0, 0): ───@───@───
│ │
(0, 1): ───@───┼───
│
(0, 2): ───────@───
Foxtail device:
Not allowed. Non-local interaction: cirq.CZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 2)).
###Markdown
SimulationThe results of the application of a quantum circuit can be calculated by a `Simulator`. Cirq comes bundled with a simulator that can calculate the results of circuits up to about a limit of 20 qubits. It can be initialized with `cirq.Simulator()`.There are two different approaches to using a simulator:* `simulate()`: Since we are classically simulating a circuit, a simulator can directly access and view the resulting wave function. This is useful for debugging, learning, and understanding how circuits will function. * `run()`: When using actual quantum devices, we can only access the end result of a computation and must sample the results to get a distribution of results. Running the simulator as a sampler mimics this behavior and only returns bit strings as output.Let's try to simulate a 2-qubit "Bell State":
###Code
# Create a circuit to generate a Bell State:
# sqrt(2) * ( |00> + |11> )
bell_circuit = cirq.Circuit()
q0, q1 = cirq.LineQubit.range(2)
bell_circuit.append(cirq.H(q0))
bell_circuit.append(cirq.CNOT(q0,q1))
# Initialize Simulator
s=cirq.Simulator()
print('Simulate the circuit:')
results=s.simulate(bell_circuit)
print(results)
print()
# For sampling, we need to add a measurement at the end
bell_circuit.append(cirq.measure(q0, q1, key='result'))
print('Sample the circuit:')
samples=s.run(bell_circuit, repetitions=1000)
# Print a histogram of results
print(samples.histogram(key='result'))
###Output
Simulate the circuit:
measurements: (no measurements)
output vector: 0.707|00⟩ + 0.707|11⟩
Sample the circuit:
Counter({3: 537, 0: 463})
###Markdown
Using parameter sweepsCirq circuits allow for gates to have symbols as free parameters within the circuit. This is especially useful for variational algorithms, which vary parameters within the circuit in order to optimize a cost function, but it can be useful in a variety of circumstances.For parameters, cirq uses the library `sympy` to add `sympy.Symbol` as parameters to gates and operations. Once the circuit is complete, you can fill in the possible values of each of these parameters with a `Sweep`. There are several possibilities that can be used as a sweep:* `cirq.Points`: A list of manually specified values for one specific symbol as a sequence of floats* `cirq.Linspace`: A linear sweep from a starting value to an ending value.* `cirq.ListSweep`: A list of manually specified values for several different symbols, specified as a list of dictionaries.* `cirq.Zip` and `cirq.Product`: Sweeps can be combined list-wise by zipping them together or through their Cartesian product.A parameterized circuit and sweep together can be run using the simulator or other sampler by changing `run()` to `run_sweep()` and adding the sweep as a parameter.Here is an example of sweeping an exponent of a X gate:
###Code
import matplotlib.pyplot as plt
import sympy
# Perform an X gate with variable exponent
q = cirq.GridQubit(1,1)
circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'),
cirq.measure(q, key='m'))
# Sweep exponent from zero (off) to one (on) and back to two (off)
param_sweep = cirq.Linspace('t', start=0, stop=2, length=200)
# Simulate the sweep
s = cirq.Simulator()
trials = s.run_sweep(circuit, param_sweep, repetitions=1000)
# Plot all the results
x_data = [trial.params['t'] for trial in trials]
y_data = [trial.histogram(key='m')[1] / 1000.0 for trial in trials]
plt.scatter('t','p', data={'t': x_data, 'p': y_data})
###Output
_____no_output_____
###Markdown
Unitary matrices and decompositionsMost quantum operations have a unitary matrix representation. This matrix can be accessed by applying `cirq.unitary()`. This can be applied to gates, operations, and circuits that support this protocol and will return the unitary matrix that represents the object.
###Code
print('Unitary of the X gate')
print(cirq.unitary(cirq.X))
print('Unitary of SWAP operator on two qubits.')
q0, q1 = cirq.LineQubit.range(2)
print(cirq.unitary(cirq.SWAP(q0, q1)))
print('Unitary of a sample circuit')
print(cirq.unitary(cirq.Circuit(cirq.X(q0), cirq.SWAP(q0, q1))))
###Output
Unitary of the X gate
[[0.+0.j 1.+0.j]
[1.+0.j 0.+0.j]]
Unitary of SWAP operator on two qubits.
[[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]]
Unitary of a sample circuit
[[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]]
###Markdown
Decompositions Many gates can be decomposed into an equivalent circuit with simpler operations and gates. This is called decomposition and can be accomplished with the `cirq.decompose` protocol. For instance, a Hadamard H gate can be decomposed into X and Y gates:
###Code
print(cirq.decompose(cirq.H(cirq.LineQubit(0))))
###Output
[(cirq.Y**0.5).on(cirq.LineQubit(0)), cirq.XPowGate(exponent=1.0, global_shift=-0.25).on(cirq.LineQubit(0))]
###Markdown
Another example is the 3-qubit Toffoli gate, which is equivalent to a controlled-controlled-X gate. Many devices do not support a three qubit gate, so it is important
###Code
q0, q1, q2 = cirq.LineQubit.range(3)
print(cirq.Circuit(cirq.decompose(cirq.TOFFOLI(q0, q1, q2))))
###Output
0: ───T────────────────@─────────────────────────────────@─────────────────────────────@────────────────────────────@───────────────────────────────────────
│ │ │ │
1: ───T───────Y^-0.5───@───Y^0.5────@───T^-1────Y^-0.5───@────────Y^0.5───@───Y^-0.5───@──────Y^0.5────@───Y^-0.5───@──────Y^0.5────@───────────────────────
│ │ │ │
2: ───Y^0.5───X────────T───Y^-0.5───@───Y^0.5───T────────Y^-0.5───────────@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5───Y^0.5───X───
###Markdown
The above decomposes the Toffoli into a simpler set of one-qubit gates and CZ gates at the cost of lengthening the circuit considerably.Some devices will automatically decompose gates that they do not support. For instance, if we use the `Foxtail` device from above, we can see this in action by adding an unsupported SWAP gate:
###Code
swap = cirq.SWAP(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1))
print(cirq.Circuit(swap, device=cirq.google.Foxtail))
###Output
(0, 0): ───S^-1───Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───Z───
│ │ │
(0, 1): ───Z──────Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───S───
###Markdown
OptimizersThe last concept in this tutorial is the optimizer. An optimizer can take a circuit and modify it. Usually, this will entail combining or modifying operations to make it more efficient and shorter, though an optimizer can, in theory, do any sort of circuit manipulation.For example, the `MergeSingleQubitGates` optimizer will take consecutive single-qubit operations and merge them into a single `PhasedXZ` operation.
###Code
q=cirq.GridQubit(1, 1)
optimizer=cirq.MergeSingleQubitGates()
c=cirq.Circuit(cirq.X(q) ** 0.25, cirq.Y(q) ** 0.25, cirq.Z(q) ** 0.25)
print(c)
optimizer.optimize_circuit(c)
print(c)
###Output
(1, 1): ───X^0.25───Y^0.25───T───
┌ ┐
(1, 1): ───│ 0.5 +0.707j -0. -0.5j │───────────
│ 0.354+0.354j 0.146+0.854j│
└ ┘
###Markdown
TutorialThis tutorial will teach the basics of how to use cirq. This tutorial will walk through how to use qubits, gates, and operations to create and simulate your first quantum circuit using cirq. It will briefly introduce devices, unitary matrices, decompositions, and optimizers.Note that this tutorial isn’t a quantum computing 101 tutorial, we assume familiarity of quantum computing at about the level of the textbook “Quantum Computation and Quantum Information” by Nielsen and Chuang.For more in-depth examples closer to those found in current work, check out our case studies page. To begin, please follow the instructions for [installing Cirq](install.md).
###Code
!pip install cirq --quiet
###Output
_____no_output_____
###Markdown
QubitsThe first part of creating a quantum circuit is to define a set of qubits (also known as a quantum registers) to act on.Cirq has three main ways of defining qubits:* `cirq.NamedQubit`: used to label qubits by an abstract name* `cirq.LineQubit`: qubits labelled by number in a linear array * `cirq.GridQubit`: qubits labelled by two numbers in a rectangular lattice.Here are some examples of defining each type of qubit.
###Code
import cirq
# Using named qubits can be useful for abstract algorithms
# as well as algorithms not yet mapped onto hardware.
q0 = cirq.NamedQubit('source')
q1 = cirq.NamedQubit('target')
# Line qubits can be created individually
q3 = cirq.LineQubit(3)
# Or created in a range
# This will create LineQubit(0), LineQubit(1), LineQubit(2)
q0, q1, q2 = cirq.LineQubit.range(3)
# Grid Qubits can also be referenced individually
q4_5 = cirq.GridQubit(4,5)
# Or created in bulk in a square
# This will create 16 qubits from (0,0) to (3,3)
qubits = cirq.GridQubit.square(4)
###Output
_____no_output_____
###Markdown
There are also pre-packaged sets of qubits called [Devices](devices.md). These are qubits along with a set of rules of how they can be used. A `cirq.Device` can be used to apply adjacency rules and other hardware constraints to a quantum circuit. For our example, we will use the `cirq.google.Foxtail` device that comes with cirq. It is a 2x11 grid that mimics early hardware released by Google.
###Code
print(cirq.google.Foxtail)
###Output
(0, 0)───(0, 1)───(0, 2)───(0, 3)───(0, 4)───(0, 5)───(0, 6)───(0, 7)───(0, 8)───(0, 9)───(0, 10)
│ │ │ │ │ │ │ │ │ │ │
│ │ │ │ │ │ │ │ │ │ │
(1, 0)───(1, 1)───(1, 2)───(1, 3)───(1, 4)───(1, 5)───(1, 6)───(1, 7)───(1, 8)───(1, 9)───(1, 10)
###Markdown
Gates and OperationsThe next step is to use the qubits to create operations that can be used in our circuit. Cirq has two concepts that are important to understand here:* A `Gate` is an effect that can be applied to a set of qubits. * An `Operation` is a gate applied to a set of qubits.For instance, `cirq.H` is the quantum [Hadamard](https://en.wikipedia.org/wiki/Quantum_logic_gateHadamard_(H)_gate) and is a `Gate` object. `cirq.H(cirq.LineQubit(1))` is an `Operation` object and is the Hadamard gate applied to a specific qubit (line qubit number 1).Many textbook gates are included within cirq. `cirq.X`, `cirq.Y`, and `cirq.Z` refer to the single-qubit Pauli gates. `cirq.CZ`, `cirq.CNOT`, `cirq.SWAP` are a few of the common two-qubit gates. `cirq.measure` is a macro to apply a `MeasurementGate` to a set of qubits. You can find more, as well as instructions on how to creats your own custom gates, on the [Gates documentation](gates.md) page.Many arithmetic operations can also be applied to gates. Here are some examples:
###Code
# Example gates
not_gate = cirq.CNOT
pauli_z = cirq.Z
# Using exponentiation to get square root gates
sqrt_x_gate = cirq.X**0.5
sqrt_iswap = cirq.ISWAP**0.5
# Some gates can also take parameters
sqrt_sqrt_y = cirq.YPowGate(exponent=0.25)
# Example operations
q0, q1 = cirq.LineQubit.range(2)
z_op = cirq.Z(q0)
not_op = cirq.CNOT(q0, q1)
sqrt_iswap_op = sqrt_iswap(q0, q1)
###Output
_____no_output_____
###Markdown
Circuits and MomentsWe are now ready to construct a quantum circuit. A `Circuit` is a collection of `Moment`s. A `Moment` is a collection of `Operation`s that all act during the same abstract time slice. Each `Operation` must have a disjoint set of qubits from the other `Operation`s in the `Moment`. A `Moment` can be thought of as a vertical slice of a quantum circuit diagram.Circuits can be constructed in several different ways. By default, cirq will attempt to slide your operation into the earliest possible `Moment` when you insert it.
###Code
circuit = cirq.Circuit()
# You can create a circuit by appending to it
circuit.append(cirq.H(q) for q in cirq.LineQubit.range(3))
# All of the gates are put into the same Moment since none overlap
print(circuit)
# We can also create a circuit directly as well:
print(cirq.Circuit(cirq.SWAP(q, q+1) for q in cirq.LineQubit.range(3)))
###Output
0: ───×───────────
│
1: ───×───×───────
│
2: ───────×───×───
│
3: ───────────×───
###Markdown
Sometimes, you may not want cirq to automatically shift operations all the way to the left. To construct a circuit without doing this, you can create the circuit moment-by-moment or use a different `InsertStrategy`, explained more in the [Circuit documentation](circuits.md).
###Code
# Creates each gate in a separate moment.
print(cirq.Circuit(cirq.Moment([cirq.H(q)]) for q in cirq.LineQubit.range(3)))
###Output
0: ───H───────────
1: ───────H───────
2: ───────────H───
###Markdown
Circuits and DevicesOne important comnsideration when using real quantum devices is that there are often hardware constraints on the circuit. Creating a circuit with a `Device` will allow you to capture some of these requirements. These `Device` objects will validate the operations you add to the circuit to make sure that no illegal operations are added.Let's look at an example using the Foxtail device.
###Code
q0 = cirq.GridQubit(0, 0)
q1 = cirq.GridQubit(0, 1)
q2 = cirq.GridQubit(0, 2)
adjacent_op = cirq.CZ(q0, q1)
nonadjacent_op = cirq.CZ(q0, q2)
# This is an unconstrained circuit with no device
free_circuit = cirq.Circuit()
# Both operations are allowed:
free_circuit.append(adjacent_op)
free_circuit.append(nonadjacent_op)
print('Unconstrained device:')
print(free_circuit)
print()
# This is a circuit on the Foxtail device
# only adjacent operations are allowed.
print('Foxtail device:')
foxtail_circuit = cirq.Circuit(device=cirq.google.Foxtail)
foxtail_circuit.append(adjacent_op)
try:
# Not allowed, will throw exception
foxtail_circuit.append(nonadjacent_op)
except ValueError as e:
print('Not allowed. %s' % e)
###Output
Unconstrained device:
(0, 0): ───@───@───
│ │
(0, 1): ───@───┼───
│
(0, 2): ───────@───
Foxtail device:
Not allowed. Non-local interaction: cirq.CZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 2)).
###Markdown
SimulationThe results of the application of a quantum circuit can be calculated by a `Simulator`. Cirq comes bundled with a simulator that can calculate the results of circuits up to about a limit of 20 qubits. It can be initialized with `cirq.Simulator()`.There are two different approaches to using a simulator:* `simulate()`: Since we are classically simulating a circuit, a simulator can directly access and view the resulting wave function. This is useful for debugging, learning, and understanding how circuits will function. * `run()`: When using actual quantum devices, we can only access the end result of a computation and must sample the results to get a distribution of results. Running the simulator as a sampler mimics this behavior and only returns bit strings as output.Let's try to simulate a 2-qubit "Bell State":
###Code
# Create a circuit to generate a Bell State:
# sqrt(2) * ( |00> + |11> )
bell_circuit = cirq.Circuit()
q0, q1 = cirq.LineQubit.range(2)
bell_circuit.append(cirq.H(q0))
bell_circuit.append(cirq.CNOT(q0,q1))
# Initialize Simulator
s=cirq.Simulator()
print('Simulate the circuit:')
results=s.simulate(bell_circuit)
print(results)
print()
# For sampling, we need to add a measurement at the end
bell_circuit.append(cirq.measure(q0, q1, key='result'))
print('Sample the circuit:')
samples=s.run(bell_circuit, repetitions=1000)
# Print a histogram of results
print(samples.histogram(key='result'))
###Output
Simulate the circuit:
measurements: (no measurements)
output vector: 0.707|00⟩ + 0.707|11⟩
Sample the circuit:
Counter({3: 537, 0: 463})
###Markdown
Using parameter sweepsCirq circuits allow for gates to have symbols as free parameters within the circuit. This is especially useful for variational algorithms, which vary parameters within the circuit in order to optimize a cost function, but it can be useful in a variety of circumstances.For parameters, cirq uses the library `sympy` to add `sympy.Symbol` as parameters to gates and operations. Once the circuit is complete, you can fill in the possible values of each of these parameters with a `Sweep`. There are several possibilities that can be used as a sweep:* `cirq.Points`: A list of manually specified values for one specific symbol as a sequence of floats* `cirq.Linspace`: A linear sweep from a starting value to an ending value.* `cirq.ListSweep`: A list of manually specified values for several different symbols, specified as a list of dictionaries.* `cirq.Zip` and `cirq.Product`: Sweeps can be combined list-wise by zipping them together or through their Cartesian product.A parameterized circuit and sweep together can be run using the simulator or other sampler by changing `run()` to `run_sweep()` and adding the sweep as a parameter.Here is an example of sweeping an exponent of a X gate:
###Code
import matplotlib.pyplot as plt
import sympy
# Perform an X gate with variable exponent
q = cirq.GridQubit(1,1)
circuit = cirq.Circuit(cirq.X(q) ** sympy.Symbol('t'),
cirq.measure(q, key='m'))
# Sweep exponent from zero (off) to one (on) and back to two (off)
param_sweep = cirq.Linspace('t', start=0, stop=2, length=200)
# Simulate the sweep
s = cirq.Simulator()
trials = s.run_sweep(circuit, param_sweep, repetitions=1000)
# Plot all the results
x_data = [trial.params['t'] for trial in trials]
y_data = [trial.histogram(key='m')[1] / 1000.0 for trial in trials]
plt.scatter('t','p', data={'t': x_data, 'p': y_data})
###Output
_____no_output_____
###Markdown
Unitary matrices and decompositionsMost quantum operations have a unitary matrix representation. This matrix can be accessed by applying `cirq.unitary()`. This can be applied to gates, operations, and circuits that support this protocol and will return the unitary matrix that represents the object.
###Code
print('Unitary of the X gate')
print(cirq.unitary(cirq.X))
print('Unitary of SWAP operator on two qubits.')
q0, q1 = cirq.LineQubit.range(2)
print(cirq.unitary(cirq.SWAP(q0, q1)))
print('Unitary of a sample circuit')
print(cirq.unitary(cirq.Circuit(cirq.X(q0), cirq.SWAP(q0, q1))))
###Output
Unitary of the X gate
[[0.+0.j 1.+0.j]
[1.+0.j 0.+0.j]]
Unitary of SWAP operator on two qubits.
[[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]]
Unitary of a sample circuit
[[0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]
[0.+0.j 1.+0.j 0.+0.j 0.+0.j]]
###Markdown
Decompositions Many gates can be decomposed into an equivalent circuit with simpler operations and gates. This is called decomposition and can be accomplished with the `cirq.decompose` protocol. For instance, a Hadamard H gate can be decomposed into X and Y gates:
###Code
print(cirq.decompose(cirq.H(cirq.LineQubit(0))))
###Output
[(cirq.Y**0.5).on(cirq.LineQubit(0)), cirq.XPowGate(exponent=1.0, global_shift=-0.25).on(cirq.LineQubit(0))]
###Markdown
Another example is the 3-qubit Toffoli gate, which is equivalent to a controlled-controlled-X gate. Many devices do not support a three qubit gate, so it is important
###Code
q0, q1, q2 = cirq.LineQubit.range(3)
print(cirq.Circuit(cirq.decompose(cirq.TOFFOLI(q0, q1, q2))))
###Output
0: ───T────────────────@─────────────────────────────────@─────────────────────────────@────────────────────────────@───────────────────────────────────────
│ │ │ │
1: ───T───────Y^-0.5───@───Y^0.5────@───T^-1────Y^-0.5───@────────Y^0.5───@───Y^-0.5───@──────Y^0.5────@───Y^-0.5───@──────Y^0.5────@───────────────────────
│ │ │ │
2: ───Y^0.5───X────────T───Y^-0.5───@───Y^0.5───T────────Y^-0.5───────────@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5────T^-1───Y^-0.5───@───Y^0.5───Y^0.5───X───
###Markdown
The above decomposes the Toffoli into a simpler set of one-qubit gates and CZ gates at the cost of lengthening the circuit considerably.Some devices will automatically decompose gates that they do not support. For instance, if we use the `Foxtail` device from above, we can see this in action by adding an unsupported SWAP gate:
###Code
swap = cirq.SWAP(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1))
print(cirq.Circuit(swap, device=cirq.google.Foxtail))
###Output
(0, 0): ───S^-1───Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───Z───
│ │ │
(0, 1): ───Z──────Y^-0.5───@───S^-1───Y^0.5───X^0.5───@───S^-1───X^-0.5───@───S^-1───S───
###Markdown
OptimizersThe last concept in this tutorial is the optimizer. An optimizer can take a circuit and modify it. Usually, this will entail combining or modifying operations to make it more efficient and shorter, though an optimizer can, in theory, do any sort of circuit manipulation.For example, the `MergeSingleQubitGates` optimizer will take consecutive single-qubit operations and merge them into a single `PhasedXZ` operation.
###Code
q=cirq.GridQubit(1, 1)
optimizer=cirq.MergeSingleQubitGates()
c=cirq.Circuit(cirq.X(q) ** 0.25, cirq.Y(q) ** 0.25, cirq.Z(q) ** 0.25)
print(c)
optimizer.optimize_circuit(c)
print(c)
###Output
(1, 1): ───X^0.25───Y^0.25───T───
┌ ┐
(1, 1): ───│ 0.5 +0.707j -0. -0.5j │───────────
│ 0.354+0.354j 0.146+0.854j│
└ ┘
###Markdown
Rechunker TutorialThis tutorial notebook explains how to use rechunker with real datasets. We will also use xarray to make some things easier and prettier, but we note that xarray is not a dependency for rechunker. Toy Example Create Example DataHere we load one of xarray's tutorial datasets and write it to Zarr. This is not actually a big dataset, so rechunker is not really needed here. But it's a convenient example.
###Code
import xarray as xr
xr.set_options(display_style='text')
import zarr
import dask.array as dsa
ds = xr.tutorial.open_dataset("air_temperature")
# create initial chunk structure
ds = ds.chunk({'time': 100})
ds.air.encoding = {} # helps when writing to zarr
ds
###Output
_____no_output_____
###Markdown
We can examine the chunk structure of the data variable using Dask's pretty Array repr.
###Code
ds.air.data
! rm -rf *.zarr # clean up any existing temporary data
ds.to_zarr('air_temperature.zarr')
###Output
_____no_output_____
###Markdown
Now we open up a Zarr Group and Array that we will use as inputs to rechunker.
###Code
source_group = zarr.open('air_temperature.zarr')
print(source_group.tree())
source_array = source_group['air']
source_array.info
###Output
_____no_output_____
###Markdown
Rechunk a single ArrayThe original array has chunks of (100, 25, 53). Let's rechunk it to be contiguous in time, but chunked in space.We specify a small value of `max_mem` in order to force rechunker to create an intermediate dataset. We also have to specify a place to store the final and intermediate data.We use the [rechunk](api.rstrechunker.rechunk) function, which returns a [Rechunked](api.rstrechunker.Rechunked) object.
###Code
from rechunker import rechunk
target_chunks = (2920, 25, 1)
max_mem = '1MB'
target_store = 'air_rechunked.zarr'
temp_store = 'air_rechunked-tmp.zarr'
array_plan = rechunk(source_array, target_chunks, max_mem, target_store, temp_store)
array_plan
###Output
_____no_output_____
###Markdown
Since this array has dimensions, we can also specify the chunks using a dictionary syntax.
###Code
target_chunks_dict = {'time': 2920, 'lat': 25, 'lon': 1}
# need to remove the existing stores or it won't work
!rm -rf air_rechunked.zarr air_rechunked-tmp.zarr
array_plan = rechunk(source_array, target_chunks_dict, max_mem, target_store, temp_store)
array_plan
###Output
_____no_output_____
###Markdown
The `array_plan` is a `Rechunked` object.It has not actually performed the rechunking yet.To do this, we need to call the `execute` method.This will use Dask to perform the rechunking.
###Code
result = array_plan.execute()
result.chunks
###Output
_____no_output_____
###Markdown
By default, Dask will use the multi-threaded scheduler.Since rechunking can take a long time, we might want to use a progress bar.
###Code
from dask.diagnostics import ProgressBar
with ProgressBar():
array_plan.execute()
###Output
[########################################] | 100% Completed | 6.2s
###Markdown
If we create a distributed cluster, then rechunker will use that when it executes.
###Code
from dask.distributed import Client, LocalCluster, progress
cluster = LocalCluster()
client = Client(cluster)
future = array_plan.persist()
progress(future)
###Output
_____no_output_____
###Markdown
Now that it is written to disk, we can open the rechunked array however we please. Using Zarr...
###Code
target_array = zarr.open('air_rechunked.zarr')
target_array
###Output
_____no_output_____
###Markdown
...or Dask
###Code
target_array_dask = dsa.from_zarr('air_rechunked.zarr')
target_array_dask
###Output
_____no_output_____
###Markdown
Rechunk a GroupIn the example above, we only rechunked a single array.We can open it with Dask, but not Xarray, because it doesn't contain any coordinates or metadata.Rechunker also supports rechunking entire groups.In this case, `target_chunks` must be a dictionary.
###Code
target_chunks = {
'air': {'time': 2920, 'lat': 25, 'lon': 1},
'time': None, # don't rechunk this array
'lon': None,
'lat': None,
}
max_mem = '1MB'
target_store = 'group_rechunked.zarr'
temp_store = 'group_rechunked-tmp.zarr'
array_plan = rechunk(source_group, target_chunks, max_mem, target_store, temp_store)
array_plan
array_plan.execute()
###Output
_____no_output_____
###Markdown
Now that we have written a group, we can open it back up with Xarray.
###Code
xr.open_zarr('group_rechunked.zarr')
###Output
_____no_output_____
###Markdown
Cloud ExampleIn this example we use real data from Pangeo's [Cloud Data Catalog](http://catalog.pangeo.io/).This dataset is stored in Google Cloud Storage.We also use a [Dask Gateway](https://gateway.dask.org/) distributed cluster to scale up our processing.This part of the tutorial won't work for you unless you are in a [Pangeo Cloud](http://pangeo.io/cloud.html) environment or binder.
###Code
from dask_gateway import GatewayCluster
cluster = GatewayCluster()
cluster.scale(20)
cluster
from dask.distributed import Client
client = Client(cluster)
client
import gcsfs
# a zarr group lives here
url = 'gs://pangeo-cmems-duacs'
gcs = gcsfs.GCSFileSystem(requester_pays=True)
source_store = gcs.get_mapper(url)
###Output
_____no_output_____
###Markdown
Open Zarr Array
###Code
group = zarr.open_consolidated(source_store, mode='r')
source_array = group['sla']
source_array
source_array.chunks
###Output
_____no_output_____
###Markdown
Make a Rechunking Plan
###Code
max_mem = '1GB'
target_chunks = (8901, 72, 72)
# you must have write access to this location
store_tmp = gcs.get_mapper('pangeo-scratch/rabernat/rechunker_demo/temp.zarr')
store_target = gcs.get_mapper('pangeo-scratch/rabernat/rechunker_demo/target.zarr')
r = rechunk(source_array, target_chunks, max_mem,
store_target, store_tmp)
r
###Output
_____no_output_____
###Markdown
Execute the Plan
###Code
result = r.execute()
result
dsa.from_zarr(result)
###Output
_____no_output_____
###Markdown
三消游戏算法
###Code
import random
def show_image(puzzle):
import matplotlib.pyplot as plt
import numpy as np
plt.matshow(np.array(puzzle))
plt.show()
###Output
_____no_output_____
###Markdown
初始化棋盘
###Code
length = 9
width = 9
puzzle = [[0 for point_x in range(length)] for point_y in range(width)]
puzzle
###Output
_____no_output_____
###Markdown
自上到下,自左到右地填充格子为避免出现三连格子,当被填充格子左边两个为同色时,随机颜色中就去掉这个相同的颜色,上方同理。
###Code
color = 6
def fill_block(puzzle, point_x, point_y):
# 可使用的颜色
ready_color = set(range(color))
# 上边两个格子同色,去除这个颜色
if point_x-2 >= 0 and puzzle[point_x-1][point_y] == puzzle[point_x-2][point_y]:
ready_color.remove(puzzle[point_x-1][point_y])
# 左边两个格子同色
if point_y-2 >= 0 and puzzle[point_x][point_y-1] == puzzle[point_x][point_y-2]:
# 上一步骤中没有去除这个颜色,现在去除
if puzzle[point_x][point_y-1] in ready_color:
ready_color.remove(puzzle[point_x][point_y-1])
# 随机填充一种颜色
puzzle[point_x][point_y] = random.choice(list(ready_color))
return puzzle
def random_init(puzzle):
# 逐行逐列填充
for point_x in range(width):
for point_y in range(length):
puzzle = fill_block(puzzle, point_x, point_y)
return puzzle
puzzle = random_init(puzzle)
show_image(puzzle)
###Output
_____no_output_____
###Markdown
计算可消除的格子对某一格子,计算与它相邻的格子是否同色,可以被消除。这里不打算计算同色格子的连通性。每次对该格同行和同列,使用一个三格宽的遍历,检查是否同色。如果同色,将这三个格子的坐标计入一个列表。最后对这个列表去重,得到所有被消除格子的坐标。
###Code
def match_blocks(puzzle, point_x, point_y):
matched = list()
for i in range(length-2):
if puzzle[point_x][i] - puzzle[point_x][i+1] == puzzle[point_x][i+1] - puzzle[point_x][i+2] == 0:
matched += [(point_x, i), (point_x, i+1), (point_x, i+2)]
for i in range(width-2):
if puzzle[i+1][point_y] - puzzle[i][point_y] == puzzle[i+2][point_y] - puzzle[i+1][point_y] == 0:
matched += [(i, point_y), (i+1, point_y), (i+2, point_y)]
return matched
###Output
_____no_output_____
###Markdown
输入移动的格子坐标,和它要移动的方向。将与之交换的格子都做匹配处理。
###Code
def match_puzzle(puzzle, point_x, point_y, arrow):
if arrow == 'left':
x1, y1, x2, y2 = point_x, point_y, point_x, point_y-1
elif arrow == 'right':
x1, y1, x2, y2 = point_x, point_y, point_x, point_y+1
elif arrow == 'up':
x1, y1, x2, y2 = point_x-1, point_y, point_x, point_y
elif arrow == 'down':
x1, y1, x2, y2 = point_x+1, point_y, point_x, point_y
puzzle[x1][y1], puzzle[x2][y2] = puzzle[x2][y2], puzzle[x1][y1]
matched = match_blocks(puzzle, x1, y1) + match_blocks(puzzle, x2, y2)
if len(matched) == 0:
puzzle[x1][y1], puzzle[x2][y2] = puzzle[x2][y2], puzzle[x1][y1]
return list(set(matched))
point_x = 8
point_y = 3
arrow = 'up'
matched = match_puzzle(puzzle, point_x, point_y, arrow)
print(matched)
show_image(puzzle)
###Output
[(7, 3), (7, 2), (7, 1)]
###Markdown
死局监测遍历当前棋盘,如果能发现一组可被消除的格子,则表示当前棋盘不是死局。 一共有两类,可以构成三连一种是相邻的两个同色格子一种是空一格的两个同色格子
###Code
# 一个新的puzzle
puzzle = random_init(puzzle)
show_image(puzzle)
###Output
_____no_output_____
###Markdown
如果发现一组,立即返回 `False`,结束其他检测,都没有发现,则返回 `True`。
###Code
def is_dead(puzzle):
# 不同方向可以将棋盘镜像再做检测
puzzle_mirror = [row[::-1] for row in puzzle]
# 不同角度可以将棋盘旋转90度再做检测
puzzle_transfor = list(map(list,zip(*puzzle)))
# 寻找相邻两个同色格子: [口口]
if block_status_1(puzzle):
return False
if block_status_1(puzzle_mirror):
return False
if block_status_1(puzzle_transfor):
return False
# 寻找空一格两个同色格子: [口X口]
if block_status_2(puzzle):
return False
if block_status_2(puzzle_transfor):
return False
return True
###Output
_____no_output_____
###Markdown
检查这些格子周围是否有能凑成一组的格子,如果能返回 `True`,不能返回 `False`。
###Code
def block_status_1(puzzle):
for point_x in range(width):
for point_y in range(length-1):
if puzzle[point_x][point_y] == puzzle[point_x][point_y+1]:
if point_x >= 1 and point_y >= 1 and puzzle[point_x-1][point_y-1] == puzzle[point_x][point_y]:
return True
if point_y >= 2 and puzzle[point_x][point_y-2] == puzzle[point_x][point_y]:
return True
if point_x+1 < length and point_y <= 1 and puzzle[point_x+1][point_y-1] == puzzle[point_x][point_y]:
return True
return False
def block_status_2(puzzle):
for point_x in range(width):
for point_y in range(1, length-1):
if puzzle[point_x][point_y-1] == puzzle[point_x][point_y+1]:
if point_x >= 1 and puzzle[point_x-1][point_y] == puzzle[point_x][point_y-1]:
return True
if point_x+1 < length and puzzle[point_x+1][point_y] == puzzle[point_x][point_y-1]:
return True
return False
status = is_dead(puzzle)
status
###Output
_____no_output_____
###Markdown
xDSL tutorial Imports and setup
###Code
from xdsl import *
from xdsl.ir import *
from xdsl.irdl import *
from xdsl.dialects.std import *
from xdsl.dialects.builtin import *
from xdsl.parser import *
from xdsl.printer import *
from xdsl.util import *
# MLContext, containing information about the registered dialects
context = MLContext()
# Some useful dialects
std = Std(context)
builtin = Builtin(context)
# Printer used to pretty-print MLIR data structures
printer = Printer()
###Output
_____no_output_____
###Markdown
High-level presentation (TODO)Base ideas of what xDSL is. Example of a small program, and SSA. Base IR features Dialects Dialects are namespaces that contain a collection of attributes and operations. For instance, the Standard dialect contains (but not exclusively) the attribute `!std.i32` and the operation `std.constant`.A dialect is usually a single level of abstraction in the IR, and multiple dialects can be used together in the same MLIR program.Dialects are currently Python classes registering operations and attributes, and providing simple accessors to their attributes and dialects.This will however change in the near future to provide a better interface to dialects. Attributes Attributes represent compile-time information.In particular, each SSA-value is associated with an attribute, representing its type.Each attribute type has a name and belongs in a dialect. The textual representation of attributes is prefixed with `!`, and the dialect name.For instance, the `vector` attribute has the format `!builtin.vector`, where `T` is the expected parameter of the attribute.In Python, attributes are always expected to be immutable objects heriting from either `Data` or `ParametrizedAttribute`. Data attributes `Data` attributes are used to wrap python data structures. For instance, the `IntAttr` is an attribute containing an `int`, and the `StringAttr` is an attribute containing a `str`.`Data` attributes are parsed and printed with the format `dialect_name.attr_name`, where `custom_format` is the format defined by the parser and printer of each `Data` attribute.Note that some attributes, such as `StringAttr`, are shortened by the printer, and do not require the use of `dialect_name.attr_name`. For instance, `builtin.str` is shortened to `"foo"`. Here is an example on how to create and print an `IntAttr` attribute:
###Code
# Attribute definitions usually define a `get` method to create the attribute
my_int = IntAttr.get(42)
printer.print_attribute(my_int)
###Output
!int<42>
###Markdown
Note that here, the `IntAttr` does not print a dialect prefix. This will be fixed soon-ish.
###Code
# Access the data in the IntAttr:
print(my_int.data)
###Output
42
###Markdown
Parametrized attributesParametrized attributes are attributes containing optionally multiple attributes as parameters.For instance, the `integer` attribute from `builtin` is a parametrized attribute and expects two attributes as parameter.Parametrized attributes are printed with the format `!dialect.attr_name`, where `attr_i` are the attribute parameters.Here is an example on how to create and inspect an `integer_type` attribute, which represent a machine integer type. It is parametrized by a single `IntAttr` parameter, representing the bitwidth.
###Code
# Get the int that will be passed as parameter to the integer_type
int_64 = IntAttr.get(64)
i64 = IntegerType([int_64])
printer.print_attribute(i64)
# Get back the parameters of IntegerType
printer.print_attribute(i64.parameters[0])
# Use a custom `get` method from IntegerType to construct it
assert IntegerType.get(64) == i64
###Output
_____no_output_____
###Markdown
Note that parametrized attributes may define invariants that need to be respected.For instance, constructing an `integer_type` with wrong parameters will trigger an error:
###Code
# Try to create an IntegerType with wrong parameters
try:
bad_attr = IntegerType([i64])
except Exception as err:
print(err)
###Output
IntegerType(name='integer_type', parameters=[IntAttr(name='int', data=64)]) should be of base attribute int
###Markdown
Operations Operations represent the computation that a program can do. They span in all abstraction levels, and can be domain-specific.For instance, `std.addi` will add two integers, while `scf.if` represent an if/else structure.Operations are composed of:* A base operation type, which represent the semantics of the operation;* Operands, which are SSA-values previously defined;* Results, which are new SSA-values defined by the operation;* Attributes, which encode compile-time information about the operation;* Regions, which contain operations, and are used to represent more complex control-flow;* Successors, which are basic block names for which the operation can give control to.The format of an operation is: `results = dialect_name.op_name(operands) (successors) [attributes] regions`Here is for example how to create a constant operation, representing a constant value:
###Code
const_op = Constant.create([], [std.i64], attributes={"value": IntegerAttr.get(62, std.i64)})
printer.print_op(const_op)
###Output
%0 : !i64 = std.constant() ["value" = 62 : !i64]
###Markdown
Note that dialects usually define methods to ease the definition of such operations:
###Code
const_op2 = std.constant_from_attr(IntegerAttr.get(62, std.i64), std.i64)
printer.print_op(const_op2)
###Output
%1 : !i64 = std.constant() ["value" = 62 : !i64]
###Markdown
We can use the results from the operation to pass them as operands for a later operation. For instance, we will add the constant to itself using the `std.addi` operation:
###Code
add_op = Addi.create([const_op.results[0], const_op.results[0]], [std.i32], {})
printer.print_op(const_op)
print()
printer.print_op(add_op)
###Output
%2 : !i64 = std.constant() ["value" = 62 : !i64]
%3 : !i32 = std.addi(%2 : !i64, %2 : !i64)
###Markdown
We can also put the operations in regions, which can be then used by other operations (such as func)
###Code
my_region = Region.from_operation_list([const_op, add_op])
printer._print_region(my_region)
###Output
{
%4 : !i64 = std.constant() ["value" = 62 : !i64]
%5 : !i32 = std.addi(%4 : !i64, %4 : !i64)
}
###Markdown
Functions are created using the `std.func` op, which contain a single region:
###Code
my_func = func2("my_function", [], [], my_region)
printer.print_op(my_func)
###Output
builtin.func() ["sym_name" = "my_function", "type" = !fun<[], []>, "sym_visibility" = "private"]{
%6 : !i64 = std.constant() ["value" = 62 : !i64]
%7 : !i32 = std.addi(%6 : !i64, %6 : !i64)
}
|
git_notebooks/ALL_MODELS_comparison_46A_dir1.ipynb | ###Markdown
Dataset
###Code
import json
import pandas as pd
import pymysql
from sqlalchemy import create_engine
from sklearn.cross_validation import train_test_split
from sklearn import metrics
pd.set_option('display.max_columns', 500)
with open("credentials.json") as f:
credentials = json.loads(f.read())
host = credentials["host"]
user = credentials["db_user"]
password = credentials["db_pass"]
db = credentials["db_name"]
engine = create_engine(f"mysql+pymysql://{user}:{password}@{host}:3306/{db}")
df = pd.read_sql_query('SELECT * FROM trips_2017 WHERE lineid = "46A" AND direction = 1', engine)
df.head()
# Replace missing actual time departure values with timetable values
df.actualtime_dep.fillna(df.plannedtime_dep, inplace=True)
df.head()
# Remove rows with missing values for actual time arrival as we cannot safely assume these are as per timetable
df = df[pd.notnull(df['actualtime_arr'])]
df.head()
# Create a new column for trip duration
df['trip_duration'] = df['actualtime_arr'] - df['actualtime_dep']
df.head()
# Create a new column with the hour of the day the trip took place
df['actualtime_dep_H'] = round(df['actualtime_dep']/3600)
df.head()
# Hour of actual time arrival
df['actualtime_arr_H'] = round(df['actualtime_arr']/3600)
df.head()
# Average hour of the day of the journey
df['avg_H'] = (df['actualtime_dep_H'] + df['actualtime_arr_H']) / 2
df.head()
df['avg_H'] = df['avg_H'].astype(int)
df.head()
# Creating column solely for the dates to correlate with the dates column on the historical weather data table
df['time'] = df['timestamp'] + df['avg_H'] * 3600
df.time
# Removing suppressed rows where suppressed=1.0
df = df.query('suppressed != 1.0')
df.index = range(len(df))
# Creating columns from timestamp for further processing
df['dayofweek'] = df['timestamp']
df['monthofyear'] = df['timestamp']
# Converting the unix time to datetime format
df.dayofweek = pd.to_datetime(df['dayofweek'], unit='s')
df.monthofyear = pd.to_datetime(df['monthofyear'], unit='s')
# Converting datetime to name of weekday, and to name of month (in separate columns)
df['dayofweek'] = df['dayofweek'].dt.weekday_name
df['monthofyear'] = df['monthofyear'].dt.month
# Creating dummy variables for weekday names and name of month
df_dayofweek_dummies = pd.get_dummies(df['dayofweek'])
# Removing rows not in the month of March and up to Easter in April
# April was on Sunday the 16th April and the timestamps is Monday April 10th, which is the first Monday of Easter Break
df = df.query('monthofyear == 2 or monthofyear == 3 or monthofyear == 4 and time >= 1487548800 and time < 1491782400')
df.head()
df.shape
df1 = pd.concat([df, df_dayofweek_dummies], axis=1, join_axes=[df.index])
df1
# Pull weather data from database
df2 = pd.read_sql_query('SELECT * FROM DarkSky_historical_weather_data WHERE year = 2017', engine)
df2.head()
d = {'clear-day':'clear','clear-night':'clear','partly-cloudy-day':'partly-cloudy','partly-cloudy-night':'partly-cloudy'}
df2 = df2.replace(d)
df2.rename(columns={'day_of_week': 'dayofweek', 'month': 'monthofyear'}, inplace=True)
df3 = pd.merge(df1, df2, on=['time'])
df3.head()
df3 = df3[['avg_H', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'temp', 'precip_intensity','trip_duration']]
# Trip duration is in seconds, convert to minutes and round to the nearest integer
df3['trip_duration'] = round(df3['trip_duration']/60)
df3['trip_duration'] = df3['trip_duration'].astype(int)
df3['temp'] = round(df3['temp'])
df3['temp'] = df3['temp'].astype(int)
#df3 = df3[['avg_H', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'temp','trip_duration']]
df3.head()
df3.shape
###Output
_____no_output_____
###Markdown
PreprocessingYou can see that our dataset has eleven columns. The task is to predict the trip duration (last column) based on the day of the week, the time of the day and the weather conditions (temperature and rain intesity). The next step is to split our dataset into attributes and labels.
###Code
# Assign data from first four columns to X variable
X = df3.iloc[:, 0:10]
# Assign data from fifth column to y variable
y = df3['trip_duration']
y.head()
# Split the dataset 70/30
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)
###Output
_____no_output_____
###Markdown
Gradient Boosting Regression http://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_regression.html
###Code
from sklearn import ensemble
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
#n_estimators : int (default=100)
#The number of boosting stages to perform.
#Gradient boosting is fairly robust to over-fitting so a large number usually results in better performance.
#max_depth : integer, optional (default=3)
#maximum depth of the individual regression estimators.
#The maximum depth limits the number of nodes in the tree.
#Tune this parameter for best performance; the best value depends on the interaction of the input variables.
#min_samples_split : int, float, optional (default=2)
#The minimum number of samples required to split an internal node:
#If int, then consider min_samples_split as the minimum number.
#If float, then min_samples_split is a percentage and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.
#Changed in version 0.18: Added float values for percentages.
#learning_rate : float, optional (default=0.1)
#learning rate shrinks the contribution of each tree by learning_rate.
#There is a trade-off between learning_rate and n_estimators.
#loss : {‘deviance’, ‘exponential’}, optional (default=’deviance’)
#loss function to be optimized.
#‘deviance’ refers to deviance (= logistic regression) for classification with probabilistic outputs.
#For loss ‘exponential’ gradient boosting recovers the AdaBoost algorithm.
# Fit regression model
params = {'n_estimators': 600, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.02, 'loss': 'ls'}
clf = ensemble.GradientBoostingRegressor(**params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
print("MSE: %.4f" % mse)
# Compute the importance of each feature based on the model
pd.DataFrame({'feature': X.columns, 'importance': clf.feature_importances_})
import numpy as np
import matplotlib.pyplot as plt
# Plot training deviance
# compute test set deviance
test_score = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_predict(X_test)):
test_score[i] = clf.loss_(y_test, y_pred)
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title('Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, clf.train_score_, 'b-',
label='Training Set Deviance')
plt.plot(np.arange(params['n_estimators']) + 1, test_score, 'r-',
label='Test Set Deviance')
plt.legend(loc='upper right')
plt.xlabel('Boosting Iterations')
plt.ylabel('Deviance')
# predict for 9 am on a Tuesday with 0.0 rain and 12 degrees
print(round(clf.predict([[9, 0, 1, 0, 0, 0, 0, 0, 12, 0.0]])[0]),"minutes")
pred = clf.predict(X_test)
predictions = pd.DataFrame(pred)
predictions.rename(columns={0:'estimated_time'}, inplace=True )
predictions['estimated_time'] = round(predictions['estimated_time'])
predictions['estimated_time'] = predictions['estimated_time'].astype(int)
predictions.head()
print(metrics.mean_absolute_error(y_test,predictions))
###Output
_____no_output_____
###Markdown
KNN Regressionn_neighbors : int, optional (default = 5) Number of neighbors to use by default for kneighbors queries.weights : str or callable weight function used in prediction. Possible values: ‘uniform’ : uniform weights. All points in each neighborhood are weighted equally. ‘distance’ : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. Uniform weights are used by default.algorithm : {‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’}, optional Algorithm used to compute the nearest neighbors: ‘ball_tree’ will use BallTree ‘kd_tree’ will use KDTree ‘brute’ will use a brute-force search. ‘auto’ will attempt to decide the most appropriate algorithm based on the values passed to fit method.
###Code
from sklearn.neighbors import KNeighborsRegressor
knn = KNeighborsRegressor(n_neighbors=5, weights = "uniform", algorithm = "auto")
knn.fit(X_train, y_train)
# predict for 9 am on a Tuesday with 0.0 rain and 12 degrees
print(round(knn.predict([[9, 0, 1, 0, 0, 0, 0, 0, 12, 0.0]])[0]),"minutes")
pred2 = knn.predict(X_test)
predictions2 = pd.DataFrame(pred2)
predictions2.rename(columns={0:'estimated_time'}, inplace=True )
predictions2['estimated_time'] = round(predictions2['estimated_time'])
predictions2['estimated_time'] = predictions2['estimated_time'].astype(int)
predictions2.head()
# around 9.4 with 2 neighbours
# around 8.6 with 5 neighbours
# around 8.4 with 5 neightbours and uniform distance
print(metrics.mean_absolute_error(y_test,predictions2))
###Output
_____no_output_____
###Markdown
Random Forest Regression
###Code
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
regr = RandomForestRegressor(n_estimators=100, max_depth=3, random_state=0)
regr.fit(X_train, y_train)
# predict for 9 am on a Tuesday with 0.0 rain and 12 degrees
print(round(regr.predict([[9, 0, 1, 0, 0, 0, 0, 0, 12, 0.0]])[0]),"minutes")
pred3 = regr.predict(X_test)
pred3
predictions3 = pd.DataFrame(pred3)
predictions3.rename(columns={0:'estimated_time'}, inplace=True )
predictions3['estimated_time'] = round(predictions3['estimated_time'])
predictions3['estimated_time'] = predictions3['estimated_time'].astype(int)
predictions3.head()
print(metrics.mean_absolute_error(y_test,predictions3))
###Output
_____no_output_____
###Markdown
GBR with XGBoosthttps://machinelearningmastery.com/develop-first-xgboost-model-python-scikit-learn/
###Code
import xgboost as xgb
from numpy import loadtxt
from xgboost import XGBRegressor
# Train the model on the training data
boost = XGBRegressor()
boost.fit(X_train, y_train)
y_pred = boost.predict(X_test)
#print(round(boost.predict([[9, 0, 1, 0, 0, 0, 0, 0, 12, 0.0]])[0]),"minutes")
predictions6 = [round(value) for value in y_pred]
print(metrics.mean_absolute_error(y_test,predictions6))
###Output
_____no_output_____
###Markdown
ANN Regression
###Code
# Feature scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Train the NN model
from sklearn.neural_network import MLPRegressor
mlp = MLPRegressor(hidden_layer_sizes=(100, 100, 100), max_iter=2000)
mlp.fit(X_train, y_train.values.ravel())
# predict for 9 am on a Tuesday with 0.0 rain and 12 degrees
print(round(mlp.predict([[-1.35814288, -0.42520414, 2.35526298, -0.46323037, -0.46323037,
-0.42270958, -0.3360006 , -0.31017723, -1.88838929, -0.29194067]])[0]),"minutes")
pred4 = mlp.predict(X_test)
predictions4 = pd.DataFrame(pred4)
predictions4.rename(columns={0:'estimated_time'}, inplace=True )
predictions4['estimated_time'] = round(predictions4['estimated_time'])
predictions4['estimated_time'] = predictions4['estimated_time'].astype(int)
predictions4.head()
print(metrics.mean_absolute_error(y_test,predictions4))
###Output
_____no_output_____
###Markdown
Overall summary
###Code
# GBR Gradient Boost Regression
print(metrics.mean_absolute_error(y_test,predictions))
# ANN - Regression
print(metrics.mean_absolute_error(y_test,predictions4))
# RFR Random Forest Regression
print(metrics.mean_absolute_error(y_test,predictions3))
# KNN K-Nearest Neighbours
print(metrics.mean_absolute_error(y_test,predictions2))
# XGB GBR with XGBoost
print(metrics.mean_absolute_error(y_test,predictions6))
# test_time takes: hour[0], day of week[1:8], temp[8], rain[9]
test_time = [[9, 0, 0, 0, 1, 0, 0, 0, 7, 0.0]]
test_time_nn = [[ 1.1752721 , -0.4158191 , -0.41393183, 2.10666004, -0.46504427,
-0.42333376, -0.34778442, -0.30425241, 0.32245413, -0.30440758]]
# Please, note, test_time_nn is not necessarily the same data as test_time
print("%.2f" % clf.predict(test_time)[0],"minutes") #GBR
print("%.2f" % mlp.predict(test_time_nn)[0],"minutes") #ANN - R
print("%.2f" % regr.predict(test_time)[0],"minutes") # RFR
print("%.2f" % knn.predict(test_time)[0],"minutes") #KNN
df3[2364:]
X_test[1]
###Output
_____no_output_____ |
how-to-use-azureml/automated-machine-learning/classification-local-azuredatabricks/auto-ml-classification-local-azuredatabricks.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. Automated Machine Learning: Classification local on Azure DataBricksIn this example we use the scikit-learn's [digit dataset](http://scikit-learn.org/stable/datasets/index.htmloptical-recognition-of-handwritten-digits-dataset) to showcase how you can use AutoML for a simple classification problem.In this notebook you will learn how to:1. Create Azure Machine Learning Workspace object and initialize your notebook directory to easily reload this object from a configuration file.2. Create an `Experiment` in an existing `Workspace`.3. Configure AutoML using `AutoMLConfig`.4. Train the model using AzureDataBricks.5. Explore the results.6. Test the best fitted model.Prerequisites:Before running this notebook, run the install instructions described in README.md. Register Machine Learning Services Resource ProviderMicrosoft.MachineLearningServices only needs to be registed once in the subscription. To register it:Start the Azure portal.Select your All services and then Subscription.Select the subscription that you want to use.Click on Resource providersClick the Register link next to Microsoft.MachineLearningServices Check the Azure ML Core SDK Version to Validate Your Installation
###Code
import azureml.core
print("SDK Version:", azureml.core.VERSION)
###Output
_____no_output_____
###Markdown
Initialize an Azure ML Workspace What is an Azure ML Workspace and Why Do I Need One?An Azure ML workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, operationalization, and the monitoring of operationalized models. What do I Need?To create or access an Azure ML workspace, you will need to import the Azure ML library and specify following information:* A name for your workspace. You can choose one.* Your subscription id. Use the `id` value from the `az account show` command output above.* The resource group name. The resource group organizes Azure resources and provides a default region for the resources in the group. The resource group will be created if it doesn't exist. Resource groups can be created and viewed in the [Azure portal](https://portal.azure.com)* Supported regions include `eastus2`, `eastus`,`westcentralus`, `southeastasia`, `westeurope`, `australiaeast`, `westus2`, `southcentralus`.
###Code
subscription_id = "<SubscriptionId>"
resource_group = "myrg"
workspace_name = "myws"
workspace_region = "eastus2"
###Output
_____no_output_____
###Markdown
Creating a WorkspaceIf you already have access to an Azure ML workspace you want to use, you can skip this cell. Otherwise, this cell will create an Azure ML workspace for you in the specified subscription, provided you have the correct permissions for the given `subscription_id`.This will fail when:1. The workspace already exists.2. You do not have permission to create a workspace in the resource group.3. You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription.If workspace creation fails for any reason other than already existing, please work with your IT administrator to provide you with the appropriate permissions or to provision the required resources.**Note:** Creation of a new workspace can take several minutes.
###Code
# Import the Workspace class and check the Azure ML SDK version.
from azureml.core import Workspace
ws = Workspace.create(name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group,
location = workspace_region,
exist_ok=True)
ws.get_details()
###Output
_____no_output_____
###Markdown
Configuring Your Local EnvironmentYou can validate that you have access to the specified workspace and write a configuration file to the default configuration location, `./aml_config/config.json`.
###Code
from azureml.core import Workspace
ws = Workspace(workspace_name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group)
# Persist the subscription id, resource group name, and workspace name in aml_config/config.json.
ws.write_config()
###Output
_____no_output_____
###Markdown
Create a Folder to Host Sample ProjectsFinally, create a folder where all the sample projects will be hosted.
###Code
import os
sample_projects_folder = './sample_projects'
if not os.path.isdir(sample_projects_folder):
os.mkdir(sample_projects_folder)
print('Sample projects will be created in {}.'.format(sample_projects_folder))
###Output
_____no_output_____
###Markdown
Create an ExperimentAs part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
###Code
import logging
import os
import random
import time
from matplotlib import pyplot as plt
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
from azureml.train.automl.run import AutoMLRun
ws = Workspace.from_config()
# Choose a name for the experiment and specify the project folder.
experiment_name = 'automl-local-classification'
project_folder = './sample_projects/automl-local-classification'
experiment = Experiment(ws, experiment_name)
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace Name'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Project Directory'] = project_folder
output['Experiment Name'] = experiment.name
pd.set_option('display.max_colwidth', -1)
pd.DataFrame(data = output, index = ['']).T
###Output
_____no_output_____
###Markdown
DiagnosticsOpt-in diagnostics for better experience, quality, and security of future releases.
###Code
from azureml.telemetry import set_diagnostics_collection
set_diagnostics_collection(send_diagnostics = True)
###Output
_____no_output_____
###Markdown
Load Training Data Using DataPrep
###Code
import azureml.dataprep as dprep
# You can use `auto_read_file` which intelligently figures out delimiters and datatypes of a file.
# The data referenced here was pulled from `sklearn.datasets.load_digits()`.
simple_example_data_root = 'https://dprepdata.blob.core.windows.net/automl-notebook-data/'
X = dprep.auto_read_file(simple_example_data_root + 'X.csv').skip(1) # Remove the header row.
# You can also use `read_csv` and `to_*` transformations to read (with overridable delimiter)
# and convert column types manually.
# Here we read a comma delimited file and convert all columns to integers.
y = dprep.read_csv(simple_example_data_root + 'y.csv').to_long(dprep.ColumnSelector(term='.*', use_regex = True))
###Output
_____no_output_____
###Markdown
Review the Data Preparation ResultYou can peek the result of a Dataflow at any range using skip(i) and head(j). Doing so evaluates only j records for all the steps in the Dataflow, which makes it fast even against large datasets.
###Code
X.skip(1).head(5)
###Output
_____no_output_____
###Markdown
Configure AutoMLInstantiate an `AutoMLConfig` object to specify the settings and data used to run the experiment.|Property|Description||-|-||**task**|classification or regression||**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: accuracyAUC_weightedaverage_precision_score_weightednorm_macro_recallprecision_score_weighted||**iteration_timeout_minutes**|Time limit in minutes for each iteration.||**iterations**|Number of iterations. In each iteration AutoML trains a specific pipeline with the data.||**n_cross_validations**|Number of cross validation splits.||**spark_context**|Spark Context object.||**max_cuncurrent_iterations**|Maximum number of iterations to execute in parallel. This should be less than the number of cores on the ADB..||**X**|(sparse) array-like, shape = [n_samples, n_features]||**y**|(sparse) array-like, shape = [n_samples, ], [n_samples, n_classes]Multi-class targets. An indicator matrix turns on multilabel classification. This should be an array of integers.||**path**|Relative path to the project folder. AutoML stores configuration files for the experiment under this folder. You can specify a new empty folder.|
###Code
automl_settings = {
"iteration_timeout_minutes": 10,
"iterations": 10,
"n_cross_validations": 5,
"primary_metric": 'AUC_weighted',
"preprocess": False,
"max_concurrent_iterations": 2,
"verbosity": logging.INFO,
"spark_context": sc
}
automl_config = AutoMLConfig(task = 'classification',
debug_log = 'automl_errors.log',
path = project_folder,
X = X,
y = y,
**automl_settings
)
###Output
_____no_output_____
###Markdown
Train the ModelsCall the `submit` method on the experiment object and pass the run configuration. Execution of local runs is synchronous. Depending on the data and the number of iterations this can run for a while.In this example, we specify `show_output = True` to print currently running iterations to the console.
###Code
local_run = experiment.submit(automl_config, show_output = False)
###Output
_____no_output_____
###Markdown
Explore the Results Portal URL for Monitoring RunsThe following will provide a link to the web interface to explore individual run details and status.
###Code
print(local_run.get_portal_url())
###Output
_____no_output_____
###Markdown
The following will show the child runs and waits for the parent run to complete.
###Code
local_run.wait_for_completion(show_output = True)
###Output
_____no_output_____
###Markdown
Retrieve All Child RunsYou can also use SDK methods to fetch all the child runs and see individual metrics that we log.
###Code
children = list(local_run.get_children())
metricslist = {}
for run in children:
properties = run.get_properties()
metrics = {k: v for k, v in run.get_metrics().items() if isinstance(v, float)}
metricslist[int(properties['iteration'])] = metrics
rundata = pd.DataFrame(metricslist).sort_index(1)
rundata
###Output
_____no_output_____
###Markdown
Retrieve the Best ModelBelow we select the best pipeline from our iterations. The `get_output` method returns the best run and the fitted model. The Model includes the pipeline and any pre-processing. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*.
###Code
best_run, fitted_model = local_run.get_output()
print(best_run)
print(fitted_model)
###Output
_____no_output_____
###Markdown
Best Model Based on Any Other MetricShow the run and the model that has the smallest `log_loss` value:
###Code
lookup_metric = "log_loss"
best_run, fitted_model = local_run.get_output(metric = lookup_metric)
print(best_run)
print(fitted_model)
###Output
_____no_output_____
###Markdown
Model from a Specific IterationShow the run and the model from the third iteration:
###Code
iteration = 3
third_run, third_model = local_run.get_output(iteration = iteration)
print(third_run)
print(third_model)
###Output
_____no_output_____
###Markdown
Test the Best Fitted Model Load Test Data
###Code
digits = datasets.load_digits()
X_test = digits.data[:10, :]
y_test = digits.target[:10]
images = digits.images[:10]
###Output
_____no_output_____
###Markdown
Testing Our Best Fitted ModelWe will try to predict 2 digits and see how our model works.
###Code
# Randomly select digits and test.
for index in np.random.choice(len(y_test), 2, replace = False):
print(index)
predicted = fitted_model.predict(X_test[index:index + 1])[0]
label = y_test[index]
title = "Label value = %d Predicted value = %d " % (label, predicted)
fig = plt.figure(1, figsize = (3,3))
ax1 = fig.add_axes((0,0,.8,.8))
ax1.set_title(title)
plt.imshow(images[index], cmap = plt.cm.gray_r, interpolation = 'nearest')
display(fig)
###Output
_____no_output_____ |
Inf141278.ipynb | ###Markdown
Joachim Mąkowski INF141278 Analiza i testy statystyczne na przykładzie danych o PKB Przygotowywanie i sprzątanie danych
###Code
#zaimportowanie niezbędnych bibliotek
library(tidyverse)
library(reshape)
library(ggplot2)
region <- read.csv(file = 'Metadata_Country_API_NY.GDP.PCAP.KD_DS2_en_csv_v2_1068878.csv')
#head(region)
gdp <- read.csv('API_NY.GDP.PCAP.KD_DS2_en_csv_v2_1068878.csv')
#head(gdp)
###Output
_____no_output_____
###Markdown
Dane pochodzą z obserwacji. są to dane dotyczące produktu krajowego brutto na mieszkańca. Pobrałem je ze strony: https://data.worldbank.org/indicator/NY.GDP.PCAP.KDWartości w danych są przedstawione w dolarach amerykańskich pomniejszone/powiększone o inflacje/deflacje dolara amerykańskiego. Będę badał dane tylko od 1990 roku.
###Code
gdp <- gdp %>% select(Country.Name,X1990:X2018) # wyrzucenie zbędnych kolumn
#head(gdp)
names(gdp) <- c('Country_Name',1990:2018) #zmiana nazw na wygodniejsze
#head(gdp)
gdp <- na.omit(gdp) #usunięcie krajów z wartościami NA
#head(gdp)
gdp <- merge(x = gdp,y = region,by.x = 'Country_Name',by.y = 'TableName') # połączenie dzięki któremu uzyskujemy dostęp do
#informacji o regionie w którym znajduje się państwo i do której grupy przychodów należy
#head(gdp)
gdp <- gdp %>% select('Country_Name':'2018',IncomeGroup,Region) #wyrzucenie zbędnych kolumn
#head(gdp)
gdp <- filter(gdp,Region != '')#wyrzucenie danych dla regionów (ponieważ w zbiorze danych znajdowały się informacje o PKB poszczególnych regionów)
###Output
_____no_output_____
###Markdown
Analiza eksploracyjna
###Code
od1990do2018 <- gdp[c(1:30)] # wybór tylko nazwy kraju i wartości PKB
od1990do2018 <- melt(od1990do2018,id = c('Country_Name'))#przerzucenie kolumn z rokiem do wierszy (wygodniejsza praca ze zbiorem)
plot(od1990do2018$variable,od1990do2018$value,xlab = 'rok',ylab = 'PKB per capita',main = 'Wykres PKB dla wszystkicj państw w latach 1990-2018')
###Output
_____no_output_____
###Markdown
Na wykresie widać nieznaczą zmianę mediany w latach 1990-2018, natomiast zmienia się rozstęp międzykwartylowy (największy w latach 2008-2010), oraz liczba outlierów (największa w roku 2018), co pozwala nam sądzić, że coraz więcej narodów jest bogatych, a kraje biedne nie zmieniają swojego statusu materialnego.
###Code
od1990do2018$variable <- as.integer(as.character(od1990do2018$variable)) #zmiana typu danych na całkowitoliczbowe
oddoregion <- merge(x = od1990do2018,y = region,by.x = 'Country_Name',by.y = 'TableName')
oddoregion <- oddoregion %>% select('Country_Name':'value','Region','IncomeGroup')
ggplot(oddoregion,aes(x = reorder(IncomeGroup,value, FUN = median), y = value,color = IncomeGroup),las = 3) + geom_boxplot()+ labs(x = 'grupa przychodowa',y ='PKB w $',title = 'Wykres różnic w PKB w zależności od przynależenia do grupy przychodowej')
###Output
_____no_output_____
###Markdown
Z wykresu można zauważyć, że im wyższe PKB tym w lepszej grupie dochodowej jest państwo.
###Code
ggplot(oddoregion,aes(x = reorder(Region,value, FUN = median), y = value,color = Region),las = 3) + geom_boxplot()+ labs(x = 'Region',y ='PKB w $',title = 'PKB w zależności od regionu')+theme(axis.text.x = element_text(angle = 90))
###Output
_____no_output_____
###Markdown
Na wykresie widać, że najwyższa mediana jest w Północnej Ameryce, natomiast najbogatsze kraje znajdują się w Europie i centralnej Azji. Najbiedniejszym regionem jest południowa Afryka (na południe od Sahary), natomiast w południowej Azji nie ma żadnego bogatego państwa Test statystyczny Test F-SnedecoraH0: czas nie ma wpływu na PKB (PKB nie jest zależne od czasu) B1 = 0H1: istnieje zależność między czasem a PKB B1 =/= 0
###Code
set.seed(5)
proba <- od1990do2018[sample(c(1:length(od1990do2018$value)),100),]
model <- lm(value ~ variable, proba)
model
plot(model)
###Output
_____no_output_____
###Markdown
dane nie są liniowe, kwantyle nie mają rozkładu normalnego, odchylenie standardowe jest zależne od roku.
###Code
Yp <- model$fitted.values
#Yp
srednia <- mean(proba$value)
sst <- sum((proba$value - srednia)^2)
sse <- sum((proba$value - Yp)^2)
ssr <- sum((Yp - srednia)^2)
ssr
test_f <- (ssr/sse)*(length(proba)-2)
test_f
###Output
_____no_output_____
###Markdown
Dla poziomu istotności alfa = 0.05, wartość krytyczna testu F(1;98) wynosi około 3,94, czyli przedział krytyczny to: (3,94;inf). Nie możemy odrzucić H0, co oznacza, że zmienne mogą być niezależne.
###Code
ggplot(proba,aes(x=variable,y=value),las = 1)+geom_point()+geom_smooth(method = 'lm')+ labs(x = 'rok',y ='PKB w $',title = 'Wykres PKB próby 1')
cor(proba$variable,proba$value)
set.seed(50)
proba <- od1990do2018[sample(c(1:length(od1990do2018$value)),100),]
ggplot(proba,aes(x=variable,y=value),las = 1)+geom_point()+geom_smooth(method = 'lm')+ labs(x = 'rok',y ='PKB w $',title = 'Wykres próby losowej 2')
cor(proba$variable,proba$value)
###Output
_____no_output_____
###Markdown
Zarówno próba pierwsza jak i druga nie nadają się do tworzenia regresji liniowej, ponieważ różnice pomiędzy państwami są zbyt duże i w nie wszystkich państwach PKB rośnie liniowo. Regresje Widać, że w przypadku obu prób losowych współczynnik korelacji jest niewielki. Jest to efekt, tego że różne państwa w znaczącym stopniu różnią się od siebie i w jednej próbce nie powinny się znaleźć państwa z Afryki i Europy zachodniej, ponieważ PKB na przykład Niemiec znacznie zawyża wszystkie statystyki. Dlatego warto się przyjrzeć PKB każdego państwa z osobna.
###Code
wyciaganie_danych <- function(country){
kraj <- od1990do2018[od1990do2018$Country_Name == country,]
ggplot(kraj,aes(x = variable, y= value),xlab = 'rok',ylab = 'PKB per capita')+geom_point()+geom_smooth(method = 'lm')
return (kraj)
}
niemcy <- wyciaganie_danych('Germany')
ggplot(niemcy,aes(x = variable, y= value),xlab = 'rok',ylab = 'PKB per capita')+geom_point()+geom_smooth(method = 'lm')+ labs(x = 'rok',y ='PKB w $',title = 'Wykres PKB w Niemczech')
cor(niemcy$variable,niemcy$value)
polska <- wyciaganie_danych('Poland')
ggplot(polska,aes(x = variable, y= value),xlab = 'rok',ylab = 'PKB per capita')+geom_point()+geom_smooth(method = 'lm') + labs(x = 'rok',y ='PKB w $',title = 'Wykres PKB w Polsce')
cor(polska$variable,niemcy$value)
###Output
_____no_output_____
###Markdown
Widzimy, że zarówno Niemcy jak i Polska mają prawie liniowy wzrost PKB.
###Code
zimbabwe <- wyciaganie_danych('Zimbabwe')
ggplot(zimbabwe,aes(x = variable, y= value),xlab = 'rok',ylab = 'PKB per capita')+geom_point()+geom_smooth(method = 'lm')+ labs(x = 'rok',y ='PKB w $',title = 'Wykres PKB w Zimbabwe')
cor(zimbabwe$value,zimbabwe$variable)
###Output
_____no_output_____
###Markdown
Natomiast kształt wykresu Zimbabwe bardziej przypomina sinusoidę, więc można stwierdzić, że są państwa, które mają silnie skorelowany wzrost PKB z czasem, ale są też państwa w których nie można stosować regresji do predykcji przyszłego PKB.
###Code
polska_niemcy <- od1990do2018[od1990do2018$Country_Name == 'Poland' | od1990do2018$Country_Name == 'Germany' ,]
ggplot(polska_niemcy,aes(x = variable, y= value,color = Country_Name))+geom_point()+geom_smooth(method = 'lm') + labs(x = 'rok',y ='PKB w $',title = 'Wykres porównujący Niemcy i Polskę')
ggplot(polska_niemcy,aes(x = variable, y= value,color = Country_Name))+geom_point()+geom_smooth(method = 'lm',se=FALSE,fullrange = TRUE)+xlim(1990,2080) + labs(x = 'rok',y ='PKB per capita',title = 'Wykres porównujący Niemcy i Polskę')
###Output
_____no_output_____ |
probability.ipynb | ###Markdown
Probability This IPy notebook acts as supporting material for topics covered in **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning**, **Chapter 15 Probabilistic Reasoning over Time**, **Chapter 16 Making Simple Decisions** and parts of **Chapter 25 Robotics** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from utils import print_table
from notebook import psource, pseudocode, heatmap
###Output
_____no_output_____
###Markdown
CONTENTS- Probability Distribution - Joint probability distribution - Inference using full joint distributions- Bayesian Networks - BayesNode - BayesNet - Exact Inference in Bayesian Networks - Enumeration - Variable elimination - Approximate Inference in Bayesian Networks - Prior sample - Rejection sampling - Likelihood weighting - Gibbs sampling- Hidden Markov Models - Inference in Hidden Markov Models - Forward-backward - Fixed lag smoothing - Particle filtering- Monte Carlo Localization- Decision Theoretic Agent- Information Gathering Agent PROBABILITY DISTRIBUTIONLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
psource(ProbDist)
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable: probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incrementally. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
psource(JointProbDist)
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = \alpha \textbf{P}(X, \textbf{e}) = \alpha \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. BAYESIAN NETWORKSA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate_all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network. **enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource(make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Elimination Ask Optimizations`elimination_ask` has some critical point to consider and some optimizations could be performed:- **Operation on factors**: `sum_out` and `pointwise_product` function used in `elimination_ask` is where space and time complexity arise in the variable elimination algorithm (AIMA3e pg. 526).>The only trick is to notice that any factor that does not depend on the variable to be summed out can be moved outside the summation.- **Variable ordering**: Elimination ordering is important, every choice of ordering yields a valid algorithm, but different orderings cause different intermediate factors to be generated during the calculation (AIMA3e pg. 527). In this case the algorithm applies a reversed order.> In general, the time and space requirements of variable elimination are dominated by the size of the largest factor constructed during the operation of the algorithm. This in turn is determined by the order of elimination of variables and by the structure of the network. It turns out to be intractable to determine the optimal ordering, but several good heuristics are available. One fairly effective method is a greedy one: eliminate whichever variable minimizes the size of the next factor to be constructed. - **Variable relevance** Some variables could be irrelevant to resolve a query (i.e. sums to 1). A variable elimination algorithm can therefore remove all these variables before evaluating the query (AIMA3e pg. 528).> An optimization is to remove 'every variable that is not an ancestor of a query variable or evidence variable is irrelevant to the query'. Runtime comparisonLet's see how the runtimes of these two algorithms compare.We expect variable elimination to outperform enumeration by a large margin as we reduce the number of repetitive calculations significantly.
###Code
%%timeit
enumeration_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
%%timeit
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
262 µs ± 54.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
In this test case we observe that variable elimination is slower than what we expected. It has something to do with number of threads, how Python tries to optimize things and this happens because the network is very small, with just 5 nodes. The `elimination_ask` has some critical point and some optimizations must be perfomed as seen above.Of course, for more complicated networks, variable elimination will be significantly faster and runtime will drop not just by a constant factor, but by a polynomial factor proportional to the number of nodes, due to the reduction in repeated calculations. Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**Traversing the graph in topological order is important.There are two possible topological orderings for this particular directed acyclic graph.1. `Cloudy -> Sprinkler -> Rain -> Wet Grass`2. `Cloudy -> Rain -> Sprinkler -> Wet Grass`We can follow any of the two orderings to sample from the network.Any ordering other than these two, however, cannot be used.One way to think about this is that `Cloudy` can be seen as a precondition of both `Rain` and `Sprinkler` and just like we have seen in planning, preconditions need to be satisfied before a certain action can be executed.We store the samples on the observations. Let us find **P(Rain=True)** by taking 1000 random samples from the network.
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.503
###Markdown
Sampling this another time might give different results as we have no control over the distribution of the random samples
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
answer = len(rain_true) / N
print(answer)
###Output
0.519
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.8265895953757225
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. Rejection sampling is advantageous only when we know the query beforehand.While prior sampling generally works for any query, it might fail in some scenarios.Let's say we have a generic Bayesian network and we have evidence `e`, and we want to know how many times a state `A` is true, given evidence `e` is true.Normally, prior sampling can answer this question, but let's assume that the probability of evidence `e` being true in our actual probability distribution is very small.In this situation, it might be possible that sampling never encounters a data-point where `e` is true.If our sampled data has no instance of `e` being true, `P(e) = 0`, and therefore `P(A | e) / P(e) = 0/0`, which is undefined.We cannot find the required value using this sample.We can definitely increase the number of sample points, but we can never guarantee that we will encounter the case where `e` is non-zero (assuming our actual probability distribution has atleast one case where `e` is true).To guarantee this, we would have to consider every single data point, which means we lose the speed advantage that approximation provides us and we essentially have to calculate the exact inference model of the Bayesian network.Rejection sampling will be useful in this situation, as we already know the query.While sampling from the network, we will reject any sample which is inconsistent with the evidence variables of the given query (in this example, the only evidence variable is `e`).We will only consider samples that do not violate **any** of the evidence variables.In this way, we will have enough data with the required evidence to infer queries involving a subset of that evidence.The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling takes a long time to run when the probability of finding consistent evidence is low. It is also slow for larger networks and more evidence variables.Rejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**.
###Code
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Runtime analysisLet's take a look at how much time each algorithm takes.
###Code
%%timeit
all_observations = [prior_sample(sprinkler) for x in range(1000)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
len([observation for observation in rain_true if observation['Cloudy'] == True]) / len(rain_true)
%%timeit
rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
%%timeit
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200)
%%timeit
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200)
###Output
14.4 ms ± 2.16 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
As expected, all algorithms have a very similar runtime.However, rejection sampling would be a lot faster and more accurate when the probabiliy of finding data-points consistent with the required evidence is small.Likelihood weighting is the fastest out of all as it doesn't involve rejecting samples, but also has a quite high variance. HIDDEN MARKOV MODELS Often, we need to carry out probabilistic inference on temporal data or a sequence of observations where the order of observations matter.We require a model similar to a Bayesian Network, but one that grows over time to keep up with the latest evidences.If you are familiar with the `mdp` module or Markov models in general, you can probably guess that a Markov model might come close to representing our problem accurately.A Markov model is basically a chain-structured Bayesian Network in which there is one state for each time step and each node has an identical probability distribution.The first node, however, has a different distribution, called the prior distribution which models the initial state of the process.A state in a Markov model depends only on the previous state and the latest evidence and not on the states before it.A **Hidden Markov Model** or **HMM** is a special case of a Markov model in which the state of the process is described by a single discrete random variable.The possible values of the variable are the possible states of the world.But what if we want to model a process with two or more state variables?In that case, we can still fit the process into the HMM framework by redefining our state variables as a single "megavariable".We do this because carrying out inference on HMMs have standard optimized algorithms.A HMM is very similar to an MDP, but we don't have the option of taking actions like in MDPs, instead, the process carries on as new evidence appears.If a HMM is truncated at a fixed length, it becomes a Bayesian network and general BN inference can be used on it to answer queries.Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | HMMs are implemented in the **`HiddenMarkovModel`** class.Let's have a look.
###Code
psource(HiddenMarkovModel)
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
Now that we have defined an HMM object, our task here is to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$ given evidence **U** at each time step **t**.The basic inference tasks that must be solved are:1. **Filtering**: Computing the posterior probability distribution over the most recent state, given all the evidence up to the current time step.2. **Prediction**: Computing the posterior probability distribution over the future state.3. **Smoothing**: Computing the posterior probability distribution over a past state. Smoothing provides a better estimation as it incorporates more evidence.4. **Most likely explanation**: Finding the most likely sequence of states for a given observation5. **Learning**: The transition and sensor models can be learnt, if not yet known, just like in an information gathering agentThere are three primary methods to carry out inference in Hidden Markov Models:1. The Forward-Backward algorithm2. Fixed lag smoothing3. Particle filteringLet's have a look at how we can carry out inference and answer queries based on our umbrella HMM using these algorithms. FORWARD-BACKWARDThis is a general algorithm that works for all Markov models, not just HMMs.In the filtering task (inference) we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5]. The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
umbrella_prior = [0.5, 0.5]
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**.Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88
###Markdown
Since HMMs are represented as single variable systems, we can represent the transition model and sensor model as matrices.The `forward_backward` algorithm can be easily carried out on this representation (as we have done here) with a time complexity of $O({S}^{2} t)$ where t is the length of the sequence and each step multiplies a vector of size $S$ with a matrix of dimensions $SxS$.Additionally, the forward pass stores $t$ vectors of size $S$ which makes the auxiliary space requirement equivalent to $O(St)$.Is there any way we can improve the time or space complexity?Fortunately, the matrix representation of HMM properties allows us to do so.If $f$ and $b$ represent the forward and backward messages respectively, we can modify the smoothing algorithm by firstrunning the standard forward pass to compute $f_{t:t}$ (forgetting all the intermediate results) and then runningbackward pass for both $b$ and $f$ together, using them to compute the smoothed estimate at each step.This optimization reduces auxlilary space requirement to constant (irrespective of the length of the sequence) providedthe transition matrix is invertible and the sensor model has no zeros (which is sometimes hard to accomplish)Let's look at another algorithm, that carries out smoothing in a more optimized way. FIXED LAG SMOOTHINGThe matrix formulation allows to optimize online smoothing with a fixed lag.Since smoothing can be done in constant, there should exist an algorithm whose time complexity is independent of the length of the lag.For smoothing a time slice $t - d$ where $d$ is the lag, we need to compute $\alpha f_{1:t-d}$ x $b_{t-d+1:t}$ incrementally.As we already know, the forward equation is$$f_{1:t+1} = \alpha O_{t+1}{T}^{T}f_{1:t}$$and the backward equation is$$b_{k+1:t} = TO_{k+1}b_{k+2:t}$$where $T$ and $O$ are the transition and sensor models respectively.For smoothing, the forward message is easy to compute but there exists no simple relation between the backward message of this time step and the one at the previous time step, hence we apply the backward equation $d$ times to get$$b_{t-d+1:t} = \left ( \prod_{i=t-d+1}^{t}{TO_i} \right )b_{t+1:t} = B_{t-d+1:t}1$$where $B_{t-d+1:t}$ is the product of the sequence of $T$ and $O$ matrices.Here's how the `probability` module implements `fixed_lag_smoothing`.
###Code
psource(fixed_lag_smoothing)
###Output
_____no_output_____
###Markdown
This algorithm applies `forward` as usual and optimizes the smoothing step by using the equations above.This optimization could be achieved only because HMM properties can be represented as matrices.`vector_to_diagonal`, `matrix_multiplication` and `inverse_matrix` are matrix manipulation functions to simplify the implementation.`normalize` is used to normalize the output before returning it. Here's how we can use `fixed_lag_smoothing` for inference on our umbrella HMM.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
Given evidence T, F, T, F and T, we want to calculate the probability distribution for the fourth day with a fixed lag of 2 days.Let `e_t = False`
###Code
e_t = F
evidence = [T, F, T, F, T]
fixed_lag_smoothing(e_t, hmm, d=2, ev=evidence, t=4)
e_t = T
evidence = [T, T, F, T, T]
fixed_lag_smoothing(e_t, hmm, d=1, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
We cannot calculate probability distributions when $t$ is less than $d$
###Code
fixed_lag_smoothing(e_t, hmm, d=5, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
As expected, the output is `None` PARTICLE FILTERINGThe filtering problem is too expensive to solve using the previous methods for problems with large or continuous state spaces.Particle filtering is a method that can solve the same problem but when the state space is a lot larger, where we wouldn't be able to do these computations in a reasonable amount of time as fast, as time goes by, and we want to keep track of things as they happen.The downside is that it is a sampling method and hence isn't accurate, but the more samples we're willing to take, the more accurate we'd get.In this method, instead of keping track of the probability distribution, we will drop particles in a similar proportion at the required regions.The internal representation of this distribution is usually a list of particles with coordinates in the state-space.A particle is just a new name for a sample.Particle filtering can be divided into four steps:1. __Initialization__: If we have some idea about the prior probability distribution, we drop the initial particles accordingly, or else we just drop them uniformly over the state space.2. __Forward pass__: As time goes by and measurements come in, we are going to move the selected particles into the grid squares that makes the most sense in terms of representing the distribution that we are trying to track.When time goes by, we just loop through all our particles and try to simulate what could happen to each one of them by sampling its next position from the transition model.This is like prior sampling - samples' frequencies reflect the transition probabilities.If we have enough samples we are pretty close to exact values.We work through the list of particles, one particle at a time, all we do is stochastically simulate what the outcome might be.If we had no dimension of time, and we had no new measurements come in, this would be exactly the same as what we did in prior sampling.3. __Reweight__:As observations come in, don't sample the observations, fix them and downweight the samples based on the evidence just like in likelihood weighting.$$w(x) = P(e/x)$$$$B(X) \propto P(e/X)B'(X)$$As before, the probabilities don't sum to one, since most have been downweighted.They sum to an approximation of $P(e)$.To normalize the resulting distribution, we can divide by $P(e)$Likelihood weighting wasn't the best thing for Bayesian networks, because we were not accounting for the incoming evidence so we were getting samples from the prior distribution, in some sense not the right distribution, so we might end up with a lot of particles with low weights. These samples were very uninformative and the way we fixed it then was by using __Gibbs sampling__.Theoretically, Gibbs sampling can be run on a HMM, but as we iterated over the process infinitely many times in a Bayesian network, we cannot do that here as we have new incoming evidence and we also need computational cycles to propagate through time.A lot of samples with very low weight and they are not representative of the _actual probability distribution_.So if we keep running likelihood weighting, we keep propagating the samples with smaller weights and carry out computations for that even though these samples have no significant contribution to the actual probability distribution.Which is why we require this last step.4. __Resample__:Rather than tracking weighted samples, we _resample_.We choose from our weighted sample distribution as many times as the number of particles we initially had and we replace these particles too, so that we have a constant number of particles.This is equivalent to renormalizing the distribution.The samples with low weight are rarely chosen in the new distribution after resampling.This newer set of particles after resampling is in some sense more representative of the actual distribution and so we are better allocating our computational cycles.Now the update is complete for this time step, continue with the next one.Let's see how this is implemented in the module.
###Code
psource(particle_filtering)
###Output
_____no_output_____
###Markdown
Here, `scalar_vector_product` and `vector_add` are helper functions to help with vector math and `weighted_sample_with_replacement` resamples from a weighted sample and replaces the original sample, as is obvious from the name.This implementation considers two state variables with generic names 'A' and 'B'. Here's how we can use `particle_filtering` on our umbrella HMM, though it doesn't make much sense using particle filtering on a problem with such a small state space.It is just to get familiar with the syntax.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
particle_filtering(T, 10, hmm)
###Output
_____no_output_____
###Markdown
We got 5 samples from state `A` and 5 samples from state `B`
###Code
particle_filtering([F, T, F, F, T], 10, hmm)
###Output
_____no_output_____
###Markdown
This time we got 2 samples from state `A` and 8 samples from state `B` Comparing runtimes for these algorithms will not be useful, as each solves the filtering task efficiently for a different scenario.`forward_backward` calculates the exact probability distribution.`fixed_lag_smoothing` calculates an approximate distribution and its runtime will depend on the value of the lag chosen.`particle_filtering` is an efficient method for approximating distributions for a very large or continuous state space. MONTE CARLO LOCALIZATIONIn the domain of robotics, particle filtering is used for _robot localization_.__Localization__ is the problem of finding out where things are, in this case, we want to find the position of a robot in a continuous state space.__Monte Carlo Localization__ is an algorithm for robots to _localize_ using a _particle filter_.Given a map of the environment, the algorithm estimates the position and orientation of a robot as it moves and senses the environment.Initially, particles are distributed uniformly over the state space, ie the robot has no information of where it is and assumes it is equally likely to be at any point in space.When the robot moves, it analyses the incoming evidence to shift and change the probability to better approximate the probability distribution of its position.The particles are then resampled based on their weights.Gradually, as more evidence comes in, the robot gets better at approximating its location and the particles converge towards the actual position of the robot.The pose of a robot is defined by its two Cartesian coordinates with values $x$ and $y$ and its direction with value $\theta$.We use the kinematic equations of motion to model a deterministic state prediction.This is our motion model (or transition model).Next, we need a sensor model.There can be two kinds of sensor models, the first assumes that the sensors detect _stable_, _recognizable_ features of the environment called __landmarks__.The robot senses the location and bearing of each landmark and updates its belief according to that.We can also assume the noise in measurements to be Gaussian, to simplify things.Another kind of sensor model is used for an array of range sensors, each of which has a fixed bearing relative to the robot.These sensors provide a set of range values in each direction.This will also be corrupted by Gaussian noise, but we can assume that the errors for different beam directions are independent and identically distributed.After evidence comes in, the robot updates its belief state and reweights the particle distribution to better aproximate the actual distribution.Let's have a look at how this algorithm is implemented in the module
###Code
psource(monte_carlo_localization)
###Output
_____no_output_____
###Markdown
Our implementation of Monte Carlo Localization uses the range scan method.The `ray_cast` helper function casts rays in different directions and stores the range values.`a` stores the `v` and `w` components of the robot's velocity.`z` is a range scan.`P_motion_sample` is the motion or transition model.`P_sensor` is the range sensor noise model.`m` is the 2D map of the environment`S` is a vector of samples of size N We'll now define a simple 2D map to run Monte Carlo Localization on.Let's say this is the map we want
###Code
m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]])
heatmap(m.m, cmap='binary')
###Output
_____no_output_____
###Markdown
Let's define the motion model as a function `P_motion_sample`.
###Code
def P_motion_sample(kin_state, v, w):
"""Sample from possible kinematic states.
Returns from a single element distribution (no uncertainity in motion)"""
pos = kin_state[:2]
orient = kin_state[2]
# for simplicity the robot first rotates and then moves
orient = (orient + w)%4
for _ in range(orient):
v = (v[1], -v[0])
pos = vector_add(pos, v)
return pos + (orient,)
###Output
_____no_output_____
###Markdown
Define the sensor model as a function `P_sensor`.
###Code
def P_sensor(x, y):
"""Conditional probability for sensor reading"""
# Need not be exact probability. Can use a scaled value.
if x == y:
return 0.8
elif abs(x - y) <= 2:
return 0.05
else:
return 0
###Output
_____no_output_____
###Markdown
Initializing variables.
###Code
a = {'v': (0, 0), 'w': 0}
z = (2, 4, 1, 6)
###Output
_____no_output_____
###Markdown
Let's run `monte_carlo_localization` with these parameters to find a sample distribution S.
###Code
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m)
###Output
_____no_output_____
###Markdown
Let's plot the values in the sample distribution `S`.
###Code
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 12 0 143 14 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 17 52 201 6 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 3 5 19 9 3 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 6 166 0 21 0 0 0 0 0 0 0 0 0 0 0
0 0 0 1 11 75 0 0 0 0 0 0 0 0 0 0 0
73 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0
124 0 0 0 0 0 0 1 0 3 0 0 0 0 0 0 0
0 0 0 14 4 15 1 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
The distribution is highly concentrated at `(5, 3)`, but the robot is not very confident about its position as some other cells also have high probability values. Let's look at another scenario.
###Code
a = {'v': (0, 1), 'w': 0}
z = (2, 3, 5, 7)
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S)
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 1000 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
In this case, the robot is 99.9% certain that it is at position `(6, 7)`. DECISION THEORETIC AGENTWe now move into the domain of probabilistic decision making.To make choices between different possible plans in a certain situation in a given environment, an agent must have _preference_ between the possible outcomes of the various plans.__Utility theory__ is used to represent and reason with preferences.The agent prefers states with a higher _utility_.While constructing multi-agent systems, one major element in the design is the mechanism the agents use for making decisions about which actions to adopt in order to achieve their goals.What is usually required is a mechanism which ensures that the actions adopted lead to benefits for both individual agents, and the community of which they are part.The utility of a state is _relative_ to an agent.Preferences, as expressed by utilities, are combined with probabilities in the general theory of rational decisions called __decision theory__.An agent is said to be _rational_ if and only if it chooses the action that yields the highest expected utility, averaged over all the possible outcomes of the action. Here we'll see how a decision-theoretic agent is implemented in the module.
###Code
psource(DTAgentProgram)
###Output
_____no_output_____
###Markdown
The `DTAgentProgram` function is pretty self-explanatory.It encapsulates a function `program` that takes in an observation or a `percept`, updates its `belief_state` and returns the action that maximizes the `expected_outcome_utility`. INFORMATION GATHERING AGENTBefore we discuss what an information gathering agent is, we'll need to know what decision networks are.For an agent in an environment, a decision network represents information about the agent's current state, its possible actions, the state that will result from the agent's action, and the utility of that state.Decision networks have three primary kinds of nodes which are:1. __Chance nodes__: These represent random variables, just like in Bayesian networks.2. __Decision nodes__: These represent points where the decision-makes has a choice between different actions and the decision maker tries to find the optimal decision at these nodes with regard to the cost, safety and resulting utility.3. __Utility nodes__: These represent the agent's utility function.A description of the agent's utility as a function is associated with a utility node.To evaluate a decision network, we do the following:1. Initialize the evidence variables according to the current state.2. Calculate posterior probabilities for each possible value of the decision node and calculate the utility resulting from that action.3. Return the action with the highest utility.Let's have a look at the implementation of the `DecisionNetwork` class.
###Code
psource(DecisionNetwork)
###Output
_____no_output_____
###Markdown
The `DecisionNetwork` class inherits from `BayesNet` and has a few extra helper methods.`best_action` returns the best action in the network.`get_utility` is an abstract method which is supposed to return the utility of a particular action and state in the network.`get_expected_utility` computes the expected utility, given an action and evidence. Before we proceed, we need to know a few more terms.Having __perfect information__ refers to a state of being fully aware of the current state, the cost functions and the outcomes of actions.This in turn allows an agent to find the exact utility value of each state.If an agent has perfect information about the environment, maximum expected utility calculations are exact and can be computed with absolute certainty.In decision theory, the __value of perfect information__ (VPI) is the price that an agent would be willing to pay in order to gain access to _perfect information_.VPI calculations are extensively used to calculate expected utilities for nodes in a decision network.For a random variable $E_j$ whose value is currently unknown, the value of discovering $E_j$, given current information $e$ must average over all possible values $e_{jk}$ that we might discover for $E_j$, using our _current_ beliefs about its value.The VPI of $E_j$ is then given by:$$VPI_e(E_j) = \left(\sum_{k}P(E_j=e_{jk}\ |\ e) EU(\alpha_{e_{jk}}\ |\ e, E_j=e_{jk})\right) - EU(\alpha\ |\ e)$$VPI is _non-negative_, _non-additive_ and _order-indepentent_. An information gathering agent is an agent with certain properties that explores decision networks as and when required with heuristics driven by VPI calculations of nodes.A sensible agent should ask questions in a reasonable order, should avoid asking irrelevant questions, should take into account the importance of each piece of information in relation to its cost and should stop asking questions when that is appropriate._VPI_ is used as the primary heuristic to consider all these points in an information gathering agent as the agent ultimately wants to maximize the utility and needs to find the optimal cost and extent of finding the required information.As an overview, an information gathering agent works by repeatedly selecting the observations with the highest information value, until the cost of the next observation is greater than its expected benefit.The `InformationGatheringAgent` class is an abstract class that inherits from `Agent` and works on the principles discussed above.Let's have a look.
###Code
psource(InformationGatheringAgent)
###Output
_____no_output_____
###Markdown
Coin Flips and Die RollsUse NumPy to create simulations and compute proportions for the following outcomes. The first one is done for you.**Please note again that we are using 0 to represent heads, and 1 to represent tails.**
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
1. Two fair coin flips produce exactly two heads
###Code
# simulate 1 million tests of two fair coin flips
tests = np.random.randint(2, size=(int(1e6), 2))
# sums of all tests
test_sums = tests.sum(axis=1)
test_sums
# proportion of tests that produced exactly two heads
(test_sums == 0).mean()
###Output
_____no_output_____
###Markdown
2. Three fair coin flips produce exactly one head
###Code
# simulate 1 million tests of three fair coin flips
tests = np.random.randint(2, size=(int(1e6),3))
# sums of all tests
test_sums = tests.sum(axis=1)
# proportion of tests that produced exactly one head
(test_sums == 2).mean()
###Output
_____no_output_____
###Markdown
3. Three biased coin flips with P(H) = 0.6 produce exactly one head
###Code
# simulate 1 million tests of three bias coin flips
# hint: use np.random.choice()
tests = np.random.choice([0, 1], size=(int(1e6), 3), p=[0.6, 0.4])
# sums of all tests
test_sums = tests.sum(axis=1)
# proportion of tests that produced exactly one head
(test_sums == 2).mean()
###Output
_____no_output_____
###Markdown
4. A die rolls an even number
###Code
# simulate 1 million tests of one die roll
tests = np.random.choice(np.arange(1,7), size = int(1e6))
# proportion of tests that produced an even number
(tests%2==0).mean()
###Output
_____no_output_____
###Markdown
5. Two dice roll a double
###Code
# simulate the first million die rolls
first = np.random.choice(np.arange(6), size = int(1e6))
# simulate the second million die rolls
second = np.random.choice(np.arange(6), size= int(1e6))
# proportion of tests where the 1st and 2nd die rolled the same number
(first == second).mean()
###Output
_____no_output_____
###Markdown
Probability This IPy notebook acts as supporting material for **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning** and **Chapter 15 Probabilistic Reasoning over Time** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from notebook import *
###Output
_____no_output_____
###Markdown
Probability DistributionLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
%psource ProbDist
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable:probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incremently. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
%psource JointProbDist
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. Bayesian NetworksA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate__all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network.**enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource( make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**We store the samples on the observations. Let us find **P(Rain=True)**
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.508
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.7755905511811023
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**. likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx() Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Inference in Temporal Models Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | In the filtering task we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5].
###Code
%psource HiddenMarkovModel
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**. Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88
###Markdown
Probability This IPy notebook acts as supporting material for **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning** and **Chapter 15 Probabilistic Reasoning over Time** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
###Output
_____no_output_____
###Markdown
Probability DistributionLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
%psource ProbDist
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable:probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incremently. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values (event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
%psource JointProbDist
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
%psource enumerate_joint
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
%psource enumerate_joint_ask
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. Bayesian NetworksA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
%psource BayesNode
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equvivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
%psource BayesNet
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
%psource enumerate_all
###Output
_____no_output_____
###Markdown
**enumerate__all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
%psource enumeration_ask
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network.**enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
%psource make_factor
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
%psource all_events
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
%psource Factor.pointwise_product
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
%psource pointwise_product
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
%psource Factor.sum_out
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
%psource sum_out
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
%psource elimination_ask
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
%psource BayesNode.sample
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
%psource prior_sample
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**We store the samples on the observations. Let us find **P(Rain=True)**
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
_____no_output_____
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
_____no_output_____
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
%psource rejection_sampling
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
%psource consistent_with
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
%psource weighted_sample
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
%psource likelihood_weighting
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**.
###Code
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
%psource gibbs_ask
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Probability This IPy notebook acts as supporting material for topics covered in **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning**, **Chapter 15 Probabilistic Reasoning over Time**, **Chapter 16 Making Simple Decisions** and parts of **Chapter 25 Robotics** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from utils import print_table
from notebook import psource, pseudocode, heatmap
###Output
_____no_output_____
###Markdown
CONTENTS- Probability Distribution - Joint probability distribution - Inference using full joint distributions- Bayesian Networks - BayesNode - BayesNet - Exact Inference in Bayesian Networks - Enumeration - Variable elimination - Approximate Inference in Bayesian Networks - Prior sample - Rejection sampling - Likelihood weighting - Gibbs sampling- Hidden Markov Models - Inference in Hidden Markov Models - Forward-backward - Fixed lag smoothing - Particle filtering- Monte Carlo Localization- Decision Theoretic Agent- Information Gathering Agent PROBABILITY DISTRIBUTIONLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
psource(ProbDist)
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable: probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freq={'low': 125, 'medium': 375, 'high': 500})
p.var_name
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incrementally. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
psource(JointProbDist)
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = \alpha \textbf{P}(X, \textbf{e}) = \alpha \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. BAYESIAN NETWORKSA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate_all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network. **enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource(make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Elimination Ask Optimizations`elimination_ask` has some critical point to consider and some optimizations could be performed:- **Operation on factors**: `sum_out` and `pointwise_product` function used in `elimination_ask` is where space and time complexity arise in the variable elimination algorithm (AIMA3e pg. 526).>The only trick is to notice that any factor that does not depend on the variable to be summed out can be moved outside the summation.- **Variable ordering**: Elimination ordering is important, every choice of ordering yields a valid algorithm, but different orderings cause different intermediate factors to be generated during the calculation (AIMA3e pg. 527). In this case the algorithm applies a reversed order.> In general, the time and space requirements of variable elimination are dominated by the size of the largest factor constructed during the operation of the algorithm. This in turn is determined by the order of elimination of variables and by the structure of the network. It turns out to be intractable to determine the optimal ordering, but several good heuristics are available. One fairly effective method is a greedy one: eliminate whichever variable minimizes the size of the next factor to be constructed. - **Variable relevance** Some variables could be irrelevant to resolve a query (i.e. sums to 1). A variable elimination algorithm can therefore remove all these variables before evaluating the query (AIMA3e pg. 528).> An optimization is to remove 'every variable that is not an ancestor of a query variable or evidence variable is irrelevant to the query'. Runtime comparisonLet's see how the runtimes of these two algorithms compare.We expect variable elimination to outperform enumeration by a large margin as we reduce the number of repetitive calculations significantly.
###Code
%%timeit
enumeration_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
%%timeit
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
1.3 ms ± 11 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
In this test case we observe that variable elimination is slower than what we expected. It has something to do with number of threads, how Python tries to optimize things and this happens because the network is very small, with just 5 nodes. The `elimination_ask` has some critical point and some optimizations must be perfomed as seen above.Of course, for more complicated networks, variable elimination will be significantly faster and runtime will drop not just by a constant factor, but by a polynomial factor proportional to the number of nodes, due to the reduction in repeated calculations. Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**Traversing the graph in topological order is important.There are two possible topological orderings for this particular directed acyclic graph.1. `Cloudy -> Sprinkler -> Rain -> Wet Grass`2. `Cloudy -> Rain -> Sprinkler -> Wet Grass`We can follow any of the two orderings to sample from the network.Any ordering other than these two, however, cannot be used.One way to think about this is that `Cloudy` can be seen as a precondition of both `Rain` and `Sprinkler` and just like we have seen in planning, preconditions need to be satisfied before a certain action can be executed.We store the samples on the observations. Let us find **P(Rain=True)** by taking 1000 random samples from the network.
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.506
###Markdown
Sampling this another time might give different results as we have no control over the distribution of the random samples
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
answer = len(rain_true) / N
print(answer)
###Output
0.474
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.8059071729957806
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. Rejection sampling is advantageous only when we know the query beforehand.While prior sampling generally works for any query, it might fail in some scenarios.Let's say we have a generic Bayesian network and we have evidence `e`, and we want to know how many times a state `A` is true, given evidence `e` is true.Normally, prior sampling can answer this question, but let's assume that the probability of evidence `e` being true in our actual probability distribution is very small.In this situation, it might be possible that sampling never encounters a data-point where `e` is true.If our sampled data has no instance of `e` being true, `P(e) = 0`, and therefore `P(A | e) / P(e) = 0/0`, which is undefined.We cannot find the required value using this sample.We can definitely increase the number of sample points, but we can never guarantee that we will encounter the case where `e` is non-zero (assuming our actual probability distribution has atleast one case where `e` is true).To guarantee this, we would have to consider every single data point, which means we lose the speed advantage that approximation provides us and we essentially have to calculate the exact inference model of the Bayesian network.Rejection sampling will be useful in this situation, as we already know the query.While sampling from the network, we will reject any sample which is inconsistent with the evidence variables of the given query (in this example, the only evidence variable is `e`).We will only consider samples that do not violate **any** of the evidence variables.In this way, we will have enough data with the required evidence to infer queries involving a subset of that evidence.The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling takes a long time to run when the probability of finding consistent evidence is low. It is also slow for larger networks and more evidence variables.Rejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**.
###Code
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Runtime analysisLet's take a look at how much time each algorithm takes.
###Code
%%timeit
all_observations = [prior_sample(sprinkler) for x in range(1000)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
len([observation for observation in rain_true if observation['Cloudy'] == True]) / len(rain_true)
%%timeit
rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
%%timeit
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200)
%%timeit
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200)
###Output
45.4 ms ± 484 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
###Markdown
As expected, all algorithms have a very similar runtime.However, rejection sampling would be a lot faster and more accurate when the probabiliy of finding data-points consistent with the required evidence is small.Likelihood weighting is the fastest out of all as it doesn't involve rejecting samples, but also has a quite high variance. HIDDEN MARKOV MODELS Often, we need to carry out probabilistic inference on temporal data or a sequence of observations where the order of observations matter.We require a model similar to a Bayesian Network, but one that grows over time to keep up with the latest evidences.If you are familiar with the `mdp` module or Markov models in general, you can probably guess that a Markov model might come close to representing our problem accurately.A Markov model is basically a chain-structured Bayesian Network in which there is one state for each time step and each node has an identical probability distribution.The first node, however, has a different distribution, called the prior distribution which models the initial state of the process.A state in a Markov model depends only on the previous state and the latest evidence and not on the states before it.A **Hidden Markov Model** or **HMM** is a special case of a Markov model in which the state of the process is described by a single discrete random variable.The possible values of the variable are the possible states of the world.But what if we want to model a process with two or more state variables?In that case, we can still fit the process into the HMM framework by redefining our state variables as a single "megavariable".We do this because carrying out inference on HMMs have standard optimized algorithms.A HMM is very similar to an MDP, but we don't have the option of taking actions like in MDPs, instead, the process carries on as new evidence appears.If a HMM is truncated at a fixed length, it becomes a Bayesian network and general BN inference can be used on it to answer queries.Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | HMMs are implemented in the **`HiddenMarkovModel`** class.Let's have a look.
###Code
psource(HiddenMarkovModel)
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
Now that we have defined an HMM object, our task here is to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$ given evidence **U** at each time step **t**.The basic inference tasks that must be solved are:1. **Filtering**: Computing the posterior probability distribution over the most recent state, given all the evidence up to the current time step.2. **Prediction**: Computing the posterior probability distribution over the future state.3. **Smoothing**: Computing the posterior probability distribution over a past state. Smoothing provides a better estimation as it incorporates more evidence.4. **Most likely explanation**: Finding the most likely sequence of states for a given observation5. **Learning**: The transition and sensor models can be learnt, if not yet known, just like in an information gathering agentThere are three primary methods to carry out inference in Hidden Markov Models:1. The Forward-Backward algorithm2. Fixed lag smoothing3. Particle filteringLet's have a look at how we can carry out inference and answer queries based on our umbrella HMM using these algorithms. FORWARD-BACKWARDThis is a general algorithm that works for all Markov models, not just HMMs.In the filtering task (inference) we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5]. The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
umbrella_prior = [0.5, 0.5]
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**.Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
_____no_output_____
###Markdown
Since HMMs are represented as single variable systems, we can represent the transition model and sensor model as matrices.The `forward_backward` algorithm can be easily carried out on this representation (as we have done here) with a time complexity of $O({S}^{2} t)$ where t is the length of the sequence and each step multiplies a vector of size $S$ with a matrix of dimensions $SxS$.Additionally, the forward pass stores $t$ vectors of size $S$ which makes the auxiliary space requirement equivalent to $O(St)$.Is there any way we can improve the time or space complexity?Fortunately, the matrix representation of HMM properties allows us to do so.If $f$ and $b$ represent the forward and backward messages respectively, we can modify the smoothing algorithm by firstrunning the standard forward pass to compute $f_{t:t}$ (forgetting all the intermediate results) and then runningbackward pass for both $b$ and $f$ together, using them to compute the smoothed estimate at each step.This optimization reduces auxlilary space requirement to constant (irrespective of the length of the sequence) providedthe transition matrix is invertible and the sensor model has no zeros (which is sometimes hard to accomplish)Let's look at another algorithm, that carries out smoothing in a more optimized way. FIXED LAG SMOOTHINGThe matrix formulation allows to optimize online smoothing with a fixed lag.Since smoothing can be done in constant, there should exist an algorithm whose time complexity is independent of the length of the lag.For smoothing a time slice $t - d$ where $d$ is the lag, we need to compute $\alpha f_{1:t-d}$ x $b_{t-d+1:t}$ incrementally.As we already know, the forward equation is$$f_{1:t+1} = \alpha O_{t+1}{T}^{T}f_{1:t}$$and the backward equation is$$b_{k+1:t} = TO_{k+1}b_{k+2:t}$$where $T$ and $O$ are the transition and sensor models respectively.For smoothing, the forward message is easy to compute but there exists no simple relation between the backward message of this time step and the one at the previous time step, hence we apply the backward equation $d$ times to get$$b_{t-d+1:t} = \left ( \prod_{i=t-d+1}^{t}{TO_i} \right )b_{t+1:t} = B_{t-d+1:t}1$$where $B_{t-d+1:t}$ is the product of the sequence of $T$ and $O$ matrices.Here's how the `probability` module implements `fixed_lag_smoothing`.
###Code
psource(fixed_lag_smoothing)
###Output
_____no_output_____
###Markdown
This algorithm applies `forward` as usual and optimizes the smoothing step by using the equations above.This optimization could be achieved only because HMM properties can be represented as matrices.`vector_to_diagonal`, `matrix_multiplication` and `inverse_matrix` are matrix manipulation functions to simplify the implementation.`normalize` is used to normalize the output before returning it. Here's how we can use `fixed_lag_smoothing` for inference on our umbrella HMM.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
Given evidence T, F, T, F and T, we want to calculate the probability distribution for the fourth day with a fixed lag of 2 days.Let `e_t = False`
###Code
e_t = F
evidence = [T, F, T, F, T]
fixed_lag_smoothing(e_t, hmm, d=2, ev=evidence, t=4)
e_t = T
evidence = [T, T, F, T, T]
fixed_lag_smoothing(e_t, hmm, d=1, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
We cannot calculate probability distributions when $t$ is less than $d$
###Code
fixed_lag_smoothing(e_t, hmm, d=5, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
As expected, the output is `None` PARTICLE FILTERINGThe filtering problem is too expensive to solve using the previous methods for problems with large or continuous state spaces.Particle filtering is a method that can solve the same problem but when the state space is a lot larger, where we wouldn't be able to do these computations in a reasonable amount of time as fast, as time goes by, and we want to keep track of things as they happen.The downside is that it is a sampling method and hence isn't accurate, but the more samples we're willing to take, the more accurate we'd get.In this method, instead of keping track of the probability distribution, we will drop particles in a similar proportion at the required regions.The internal representation of this distribution is usually a list of particles with coordinates in the state-space.A particle is just a new name for a sample.Particle filtering can be divided into four steps:1. __Initialization__: If we have some idea about the prior probability distribution, we drop the initial particles accordingly, or else we just drop them uniformly over the state space.2. __Forward pass__: As time goes by and measurements come in, we are going to move the selected particles into the grid squares that makes the most sense in terms of representing the distribution that we are trying to track.When time goes by, we just loop through all our particles and try to simulate what could happen to each one of them by sampling its next position from the transition model.This is like prior sampling - samples' frequencies reflect the transition probabilities.If we have enough samples we are pretty close to exact values.We work through the list of particles, one particle at a time, all we do is stochastically simulate what the outcome might be.If we had no dimension of time, and we had no new measurements come in, this would be exactly the same as what we did in prior sampling.3. __Reweight__:As observations come in, don't sample the observations, fix them and downweight the samples based on the evidence just like in likelihood weighting.$$w(x) = P(e/x)$$$$B(X) \propto P(e/X)B'(X)$$As before, the probabilities don't sum to one, since most have been downweighted.They sum to an approximation of $P(e)$.To normalize the resulting distribution, we can divide by $P(e)$Likelihood weighting wasn't the best thing for Bayesian networks, because we were not accounting for the incoming evidence so we were getting samples from the prior distribution, in some sense not the right distribution, so we might end up with a lot of particles with low weights. These samples were very uninformative and the way we fixed it then was by using __Gibbs sampling__.Theoretically, Gibbs sampling can be run on a HMM, but as we iterated over the process infinitely many times in a Bayesian network, we cannot do that here as we have new incoming evidence and we also need computational cycles to propagate through time.A lot of samples with very low weight and they are not representative of the _actual probability distribution_.So if we keep running likelihood weighting, we keep propagating the samples with smaller weights and carry out computations for that even though these samples have no significant contribution to the actual probability distribution.Which is why we require this last step.4. __Resample__:Rather than tracking weighted samples, we _resample_.We choose from our weighted sample distribution as many times as the number of particles we initially had and we replace these particles too, so that we have a constant number of particles.This is equivalent to renormalizing the distribution.The samples with low weight are rarely chosen in the new distribution after resampling.This newer set of particles after resampling is in some sense more representative of the actual distribution and so we are better allocating our computational cycles.Now the update is complete for this time step, continue with the next one.Let's see how this is implemented in the module.
###Code
psource(particle_filtering)
###Output
_____no_output_____
###Markdown
Here, `scalar_vector_product` and `vector_add` are helper functions to help with vector math and `weighted_sample_with_replacement` resamples from a weighted sample and replaces the original sample, as is obvious from the name.This implementation considers two state variables with generic names 'A' and 'B'. Here's how we can use `particle_filtering` on our umbrella HMM, though it doesn't make much sense using particle filtering on a problem with such a small state space.It is just to get familiar with the syntax.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
particle_filtering(T, 10, hmm)
###Output
_____no_output_____
###Markdown
We got 5 samples from state `A` and 5 samples from state `B`
###Code
particle_filtering([F, T, F, F, T], 10, hmm)
###Output
_____no_output_____
###Markdown
This time we got 2 samples from state `A` and 8 samples from state `B` Comparing runtimes for these algorithms will not be useful, as each solves the filtering task efficiently for a different scenario.`forward_backward` calculates the exact probability distribution.`fixed_lag_smoothing` calculates an approximate distribution and its runtime will depend on the value of the lag chosen.`particle_filtering` is an efficient method for approximating distributions for a very large or continuous state space. MONTE CARLO LOCALIZATIONIn the domain of robotics, particle filtering is used for _robot localization_.__Localization__ is the problem of finding out where things are, in this case, we want to find the position of a robot in a continuous state space.__Monte Carlo Localization__ is an algorithm for robots to _localize_ using a _particle filter_.Given a map of the environment, the algorithm estimates the position and orientation of a robot as it moves and senses the environment.Initially, particles are distributed uniformly over the state space, ie the robot has no information of where it is and assumes it is equally likely to be at any point in space.When the robot moves, it analyses the incoming evidence to shift and change the probability to better approximate the probability distribution of its position.The particles are then resampled based on their weights.Gradually, as more evidence comes in, the robot gets better at approximating its location and the particles converge towards the actual position of the robot.The pose of a robot is defined by its two Cartesian coordinates with values $x$ and $y$ and its direction with value $\theta$.We use the kinematic equations of motion to model a deterministic state prediction.This is our motion model (or transition model).Next, we need a sensor model.There can be two kinds of sensor models, the first assumes that the sensors detect _stable_, _recognizable_ features of the environment called __landmarks__.The robot senses the location and bearing of each landmark and updates its belief according to that.We can also assume the noise in measurements to be Gaussian, to simplify things.Another kind of sensor model is used for an array of range sensors, each of which has a fixed bearing relative to the robot.These sensors provide a set of range values in each direction.This will also be corrupted by Gaussian noise, but we can assume that the errors for different beam directions are independent and identically distributed.After evidence comes in, the robot updates its belief state and reweights the particle distribution to better aproximate the actual distribution.Let's have a look at how this algorithm is implemented in the module
###Code
psource(monte_carlo_localization)
###Output
_____no_output_____
###Markdown
Our implementation of Monte Carlo Localization uses the range scan method.The `ray_cast` helper function casts rays in different directions and stores the range values.`a` stores the `v` and `w` components of the robot's velocity.`z` is a range scan.`P_motion_sample` is the motion or transition model.`P_sensor` is the range sensor noise model.`m` is the 2D map of the environment`S` is a vector of samples of size N We'll now define a simple 2D map to run Monte Carlo Localization on.Let's say this is the map we want
###Code
m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]])
heatmap(m.m, cmap='binary')
###Output
_____no_output_____
###Markdown
Let's define the motion model as a function `P_motion_sample`.
###Code
def P_motion_sample(kin_state, v, w):
"""Sample from possible kinematic states.
Returns from a single element distribution (no uncertainity in motion)"""
pos = kin_state[:2]
orient = kin_state[2]
# for simplicity the robot first rotates and then moves
orient = (orient + w)%4
for _ in range(orient):
v = (v[1], -v[0])
pos = vector_add(pos, v)
return pos + (orient,)
###Output
_____no_output_____
###Markdown
Define the sensor model as a function `P_sensor`.
###Code
def P_sensor(x, y):
"""Conditional probability for sensor reading"""
# Need not be exact probability. Can use a scaled value.
if x == y:
return 0.8
elif abs(x - y) <= 2:
return 0.05
else:
return 0
###Output
_____no_output_____
###Markdown
Initializing variables.
###Code
a = {'v': (0, 0), 'w': 0}
z = (2, 4, 1, 6)
###Output
_____no_output_____
###Markdown
Let's run `monte_carlo_localization` with these parameters to find a sample distribution S.
###Code
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m)
###Output
_____no_output_____
###Markdown
Let's plot the values in the sample distribution `S`.
###Code
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
_____no_output_____
###Markdown
The distribution is highly concentrated at `(5, 3)`, but the robot is not very confident about its position as some other cells also have high probability values. Let's look at another scenario.
###Code
a = {'v': (0, 1), 'w': 0}
z = (2, 3, 5, 7)
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S)
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
_____no_output_____
###Markdown
In this case, the robot is 99.9% certain that it is at position `(6, 7)`. DECISION THEORETIC AGENTWe now move into the domain of probabilistic decision making.To make choices between different possible plans in a certain situation in a given environment, an agent must have _preference_ between the possible outcomes of the various plans.__Utility theory__ is used to represent and reason with preferences.The agent prefers states with a higher _utility_.While constructing multi-agent systems, one major element in the design is the mechanism the agents use for making decisions about which actions to adopt in order to achieve their goals.What is usually required is a mechanism which ensures that the actions adopted lead to benefits for both individual agents, and the community of which they are part.The utility of a state is _relative_ to an agent.Preferences, as expressed by utilities, are combined with probabilities in the general theory of rational decisions called __decision theory__.An agent is said to be _rational_ if and only if it chooses the action that yields the highest expected utility, averaged over all the possible outcomes of the action. Here we'll see how a decision-theoretic agent is implemented in the module.
###Code
psource(DTAgentProgram)
###Output
_____no_output_____
###Markdown
The `DTAgentProgram` function is pretty self-explanatory.It encapsulates a function `program` that takes in an observation or a `percept`, updates its `belief_state` and returns the action that maximizes the `expected_outcome_utility`. INFORMATION GATHERING AGENTBefore we discuss what an information gathering agent is, we'll need to know what decision networks are.For an agent in an environment, a decision network represents information about the agent's current state, its possible actions, the state that will result from the agent's action, and the utility of that state.Decision networks have three primary kinds of nodes which are:1. __Chance nodes__: These represent random variables, just like in Bayesian networks.2. __Decision nodes__: These represent points where the decision-makes has a choice between different actions and the decision maker tries to find the optimal decision at these nodes with regard to the cost, safety and resulting utility.3. __Utility nodes__: These represent the agent's utility function.A description of the agent's utility as a function is associated with a utility node.To evaluate a decision network, we do the following:1. Initialize the evidence variables according to the current state.2. Calculate posterior probabilities for each possible value of the decision node and calculate the utility resulting from that action.3. Return the action with the highest utility.Let's have a look at the implementation of the `DecisionNetwork` class.
###Code
psource(DecisionNetwork)
###Output
_____no_output_____
###Markdown
The `DecisionNetwork` class inherits from `BayesNet` and has a few extra helper methods.`best_action` returns the best action in the network.`get_utility` is an abstract method which is supposed to return the utility of a particular action and state in the network.`get_expected_utility` computes the expected utility, given an action and evidence. Before we proceed, we need to know a few more terms.Having __perfect information__ refers to a state of being fully aware of the current state, the cost functions and the outcomes of actions.This in turn allows an agent to find the exact utility value of each state.If an agent has perfect information about the environment, maximum expected utility calculations are exact and can be computed with absolute certainty.In decision theory, the __value of perfect information__ (VPI) is the price that an agent would be willing to pay in order to gain access to _perfect information_.VPI calculations are extensively used to calculate expected utilities for nodes in a decision network.For a random variable $E_j$ whose value is currently unknown, the value of discovering $E_j$, given current information $e$ must average over all possible values $e_{jk}$ that we might discover for $E_j$, using our _current_ beliefs about its value.The VPI of $E_j$ is then given by:$$VPI_e(E_j) = \left(\sum_{k}P(E_j=e_{jk}\ |\ e) EU(\alpha_{e_{jk}}\ |\ e, E_j=e_{jk})\right) - EU(\alpha\ |\ e)$$VPI is _non-negative_, _non-additive_ and _order-indepentent_. An information gathering agent is an agent with certain properties that explores decision networks as and when required with heuristics driven by VPI calculations of nodes.A sensible agent should ask questions in a reasonable order, should avoid asking irrelevant questions, should take into account the importance of each piece of information in relation to its cost and should stop asking questions when that is appropriate._VPI_ is used as the primary heuristic to consider all these points in an information gathering agent as the agent ultimately wants to maximize the utility and needs to find the optimal cost and extent of finding the required information.As an overview, an information gathering agent works by repeatedly selecting the observations with the highest information value, until the cost of the next observation is greater than its expected benefit.The `InformationGatheringAgent` class is an abstract class that inherits from `Agent` and works on the principles discussed above.Let's have a look.
###Code
psource(InformationGatheringAgent)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Runtime comparisonLet's see how the runtimes of these two algorithms compare.We expect variable elimination to outperform enumeration by a large margin as we reduce the number of repetitive calculations significantly.
###Code
%%timeit
enumeration_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
%%timeit
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
241 µs ± 64.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
We observe that variable elimination was faster than enumeration as we had expected but the gain in speed is not a lot, in fact it is just about 30% faster.This happened because the bayesian network in question is pretty small, with just 5 nodes, some of which aren't even required in the inference process.For more complicated networks, variable elimination will be significantly faster and runtime will reduce not just by a constant factor, but by a polynomial factor proportional to the number of nodes, due to the reduction in repeated calculations. Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**Traversing the graph in topological order is important.There are two possible topological orderings for this particular directed acyclic graph.1. `Cloudy -> Sprinkler -> Rain -> Wet Grass`2. `Cloudy -> Rain -> Sprinkler -> Wet Grass`We can follow any of the two orderings to sample from the network.Any ordering other than these two, however, cannot be used.One way to think about this is that `Cloudy` can be seen as a precondition of both `Rain` and `Sprinkler` and just like we have seen in planning, preconditions need to be satisfied before a certain action can be executed.We store the samples on the observations. Let us find **P(Rain=True)** by taking 1000 random samples from the network.
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.496
###Markdown
Sampling this another time might give different results as we have no control over the distribution of the random samples
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
answer = len(rain_true) / N
print(answer)
###Output
0.503
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.8091451292246521
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. Rejection sampling is advantageous only when we know the query beforehand.While prior sampling generally works for any query, it might fail in some scenarios.Let's say we have a generic Bayesian network and we have evidence `e`, and we want to know how many times a state `A` is true, given evidence `e` is true.Normally, prior sampling can answer this question, but let's assume that the probability of evidence `e` being true in our actual probability distribution is very small.In this situation, it might be possible that sampling never encounters a data-point where `e` is true.If our sampled data has no instance of `e` being true, `P(e) = 0`, and therefore `P(A | e) / P(e) = 0/0`, which is undefined.We cannot find the required value using this sample.We can definitely increase the number of sample points, but we can never guarantee that we will encounter the case where `e` is non-zero (assuming our actual probability distribution has atleast one case where `e` is true).To guarantee this, we would have to consider every single data point, which means we lose the speed advantage that approximation provides us and we essentially have to calculate the exact inference model of the Bayesian network.Rejection sampling will be useful in this situation, as we already know the query.While sampling from the network, we will reject any sample which is inconsistent with the evidence variables of the given query (in this example, the only evidence variable is `e`).We will only consider samples that do not violate **any** of the evidence variables.In this way, we will have enough data with the required evidence to infer queries involving a subset of that evidence.The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling takes a long time to run when the probability of finding consistent evidence is low. It is also slow for larger networks and more evidence variables.Rejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**.
###Code
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Runtime analysisLet's take a look at how much time each algorithm takes.
###Code
%%timeit
all_observations = [prior_sample(sprinkler) for x in range(1000)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
len([observation for observation in rain_true if observation['Cloudy'] == True]) / len(rain_true)
%%timeit
rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
%%timeit
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200)
%%timeit
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200)
###Output
7.03 ms ± 117 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
As expected, all algorithms have a very similar runtime.However, rejection sampling would be a lot faster and more accurate when the probabiliy of finding data-points consistent with the required evidence is small.Likelihood weighting is the fastest out of all as it doesn't involve rejecting samples, but also has a quite high variance. HIDDEN MARKOV MODELS Often, we need to carry out probabilistic inference on temporal data or a sequence of observations where the order of observations matter.We require a model similar to a Bayesian Network, but one that grows over time to keep up with the latest evidences.If you are familiar with the `mdp` module or Markov models in general, you can probably guess that a Markov model might come close to representing our problem accurately.A Markov model is basically a chain-structured Bayesian Network in which there is one state for each time step and each node has an identical probability distribution.The first node, however, has a different distribution, called the prior distribution which models the initial state of the process.A state in a Markov model depends only on the previous state and the latest evidence and not on the states before it.A **Hidden Markov Model** or **HMM** is a special case of a Markov model in which the state of the process is described by a single discrete random variable.The possible values of the variable are the possible states of the world.But what if we want to model a process with two or more state variables?In that case, we can still fit the process into the HMM framework by redefining our state variables as a single "megavariable".We do this because carrying out inference on HMMs have standard optimized algorithms.A HMM is very similar to an MDP, but we don't have the option of taking actions like in MDPs, instead, the process carries on as new evidence appears.If a HMM is truncated at a fixed length, it becomes a Bayesian network and general BN inference can be used on it to answer queries.Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | HMMs are implemented in the **`HiddenMarkovModel`** class.Let's have a look.
###Code
psource(HiddenMarkovModel)
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
Now that we have defined an HMM object, our task here is to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$ given evidence **U** at each time step **t**.The basic inference tasks that must be solved are:1. **Filtering**: Computing the posterior probability distribution over the most recent state, given all the evidence up to the current time step.2. **Prediction**: Computing the posterior probability distribution over the future state.3. **Smoothing**: Computing the posterior probability distribution over a past state. Smoothing provides a better estimation as it incorporates more evidence.4. **Most likely explanation**: Finding the most likely sequence of states for a given observation5. **Learning**: The transition and sensor models can be learnt, if not yet known, just like in an information gathering agentThere are three primary methods to carry out inference in Hidden Markov Models:1. The Forward-Backward algorithm2. Fixed lag smoothing3. Particle filteringLet's have a look at how we can carry out inference and answer queries based on our umbrella HMM using these algorithms. FORWARD-BACKWARDThis is a general algorithm that works for all Markov models, not just HMMs.In the filtering task (inference) we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5]. The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
umbrella_prior = [0.5, 0.5]
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**.Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88
###Markdown
Since HMMs are represented as single variable systems, we can represent the transition model and sensor model as matrices.The `forward_backward` algorithm can be easily carried out on this representation (as we have done here) with a time complexity of $O({S}^{2} t)$ where t is the length of the sequence and each step multiplies a vector of size $S$ with a matrix of dimensions $SxS$.Additionally, the forward pass stores $t$ vectors of size $S$ which makes the auxiliary space requirement equivalent to $O(St)$.Is there any way we can improve the time or space complexity?Fortunately, the matrix representation of HMM properties allows us to do so.If $f$ and $b$ represent the forward and backward messages respectively, we can modify the smoothing algorithm by firstrunning the standard forward pass to compute $f_{t:t}$ (forgetting all the intermediate results) and then runningbackward pass for both $b$ and $f$ together, using them to compute the smoothed estimate at each step.This optimization reduces auxlilary space requirement to constant (irrespective of the length of the sequence) providedthe transition matrix is invertible and the sensor model has no zeros (which is sometimes hard to accomplish)Let's look at another algorithm, that carries out smoothing in a more optimized way. FIXED LAG SMOOTHINGThe matrix formulation allows to optimize online smoothing with a fixed lag.Since smoothing can be done in constant, there should exist an algorithm whose time complexity is independent of the length of the lag.For smoothing a time slice $t - d$ where $d$ is the lag, we need to compute $\alpha f_{1:t-d}$ x $b_{t-d+1:t}$ incrementally.As we already know, the forward equation is$$f_{1:t+1} = \alpha O_{t+1}{T}^{T}f_{1:t}$$and the backward equation is$$b_{k+1:t} = TO_{k+1}b_{k+2:t}$$where $T$ and $O$ are the transition and sensor models respectively.For smoothing, the forward message is easy to compute but there exists no simple relation between the backward message of this time step and the one at the previous time step, hence we apply the backward equation $d$ times to get$$b_{t-d+1:t} = \left ( \prod_{i=t-d+1}^{t}{TO_i} \right )b_{t+1:t} = B_{t-d+1:t}1$$where $B_{t-d+1:t}$ is the product of the sequence of $T$ and $O$ matrices.Here's how the `probability` module implements `fixed_lag_smoothing`.
###Code
psource(fixed_lag_smoothing)
###Output
_____no_output_____
###Markdown
This algorithm applies `forward` as usual and optimizes the smoothing step by using the equations above.This optimization could be achieved only because HMM properties can be represented as matrices.`vector_to_diagonal`, `matrix_multiplication` and `inverse_matrix` are matrix manipulation functions to simplify the implementation.`normalize` is used to normalize the output before returning it. Here's how we can use `fixed_lag_smoothing` for inference on our umbrella HMM.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
Given evidence T, F, T, F and T, we want to calculate the probability distribution for the fourth day with a fixed lag of 2 days.Let `e_t = False`
###Code
e_t = F
evidence = [T, F, T, F, T]
fixed_lag_smoothing(e_t, hmm, d=2, ev=evidence, t=4)
e_t = T
evidence = [T, T, F, T, T]
fixed_lag_smoothing(e_t, hmm, d=1, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
We cannot calculate probability distributions when $t$ is less than $d$
###Code
fixed_lag_smoothing(e_t, hmm, d=5, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
As expected, the output is `None` PARTICLE FILTERINGThe filtering problem is too expensive to solve using the previous methods for problems with large or continuous state spaces.Particle filtering is a method that can solve the same problem but when the state space is a lot larger, where we wouldn't be able to do these computations in a reasonable amount of time as fast, as time goes by, and we want to keep track of things as they happen.The downside is that it is a sampling method and hence isn't accurate, but the more samples we're willing to take, the more accurate we'd get.In this method, instead of keping track of the probability distribution, we will drop particles in a similar proportion at the required regions.The internal representation of this distribution is usually a list of particles with coordinates in the state-space.A particle is just a new name for a sample.Particle filtering can be divided into four steps:1. __Initialization__: If we have some idea about the prior probability distribution, we drop the initial particles accordingly, or else we just drop them uniformly over the state space.2. __Forward pass__: As time goes by and measurements come in, we are going to move the selected particles into the grid squares that makes the most sense in terms of representing the distribution that we are trying to track.When time goes by, we just loop through all our particles and try to simulate what could happen to each one of them by sampling its next position from the transition model.This is like prior sampling - samples' frequencies reflect the transition probabilities.If we have enough samples we are pretty close to exact values.We work through the list of particles, one particle at a time, all we do is stochastically simulate what the outcome might be.If we had no dimension of time, and we had no new measurements come in, this would be exactly the same as what we did in prior sampling.3. __Reweight__:As observations come in, don't sample the observations, fix them and downweight the samples based on the evidence just like in likelihood weighting.$$w(x) = P(e/x)$$$$B(X) \propto P(e/X)B'(X)$$As before, the probabilities don't sum to one, since most have been downweighted.They sum to an approximation of $P(e)$.To normalize the resulting distribution, we can divide by $P(e)$Likelihood weighting wasn't the best thing for Bayesian networks, because we were not accounting for the incoming evidence so we were getting samples from the prior distribution, in some sense not the right distribution, so we might end up with a lot of particles with low weights. These samples were very uninformative and the way we fixed it then was by using __Gibbs sampling__.Theoretically, Gibbs sampling can be run on a HMM, but as we iterated over the process infinitely many times in a Bayesian network, we cannot do that here as we have new incoming evidence and we also need computational cycles to propagate through time.A lot of samples with very low weight and they are not representative of the _actual probability distribution_.So if we keep running likelihood weighting, we keep propagating the samples with smaller weights and carry out computations for that even though these samples have no significant contribution to the actual probability distribution.Which is why we require this last step.4. __Resample__:Rather than tracking weighted samples, we _resample_.We choose from our weighted sample distribution as many times as the number of particles we initially had and we replace these particles too, so that we have a constant number of particles.This is equivalent to renormalizing the distribution.The samples with low weight are rarely chosen in the new distribution after resampling.This newer set of particles after resampling is in some sense more representative of the actual distribution and so we are better allocating our computational cycles.Now the update is complete for this time step, continue with the next one.Let's see how this is implemented in the module.
###Code
psource(particle_filtering)
###Output
_____no_output_____
###Markdown
Here, `scalar_vector_product` and `vector_add` are helper functions to help with vector math and `weighted_sample_with_replacement` resamples from a weighted sample and replaces the original sample, as is obvious from the name.This implementation considers two state variables with generic names 'A' and 'B'. Here's how we can use `particle_filtering` on our umbrella HMM, though it doesn't make much sense using particle filtering on a problem with such a small state space.It is just to get familiar with the syntax.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
particle_filtering(T, 10, hmm)
###Output
_____no_output_____
###Markdown
We got 5 samples from state `A` and 5 samples from state `B`
###Code
particle_filtering([F, T, F, F, T], 10, hmm)
###Output
_____no_output_____
###Markdown
This time we got 2 samples from state `A` and 8 samples from state `B` Comparing runtimes for these algorithms will not be useful, as each solves the filtering task efficiently for a different scenario.`forward_backward` calculates the exact probability distribution.`fixed_lag_smoothing` calculates an approximate distribution and its runtime will depend on the value of the lag chosen.`particle_filtering` is an efficient method for approximating distributions for a very large or continuous state space. MONTE CARLO LOCALIZATIONIn the domain of robotics, particle filtering is used for _robot localization_.__Localization__ is the problem of finding out where things are, in this case, we want to find the position of a robot in a continuous state space.__Monte Carlo Localization__ is an algorithm for robots to _localize_ using a _particle filter_.Given a map of the environment, the algorithm estimates the position and orientation of a robot as it moves and senses the environment.Initially, particles are distributed uniformly over the state space, ie the robot has no information of where it is and assumes it is equally likely to be at any point in space.When the robot moves, it analyses the incoming evidence to shift and change the probability to better approximate the probability distribution of its position.The particles are then resampled based on their weights.Gradually, as more evidence comes in, the robot gets better at approximating its location and the particles converge towards the actual position of the robot.The pose of a robot is defined by its two Cartesian coordinates with values $x$ and $y$ and its direction with value $\theta$.We use the kinematic equations of motion to model a deterministic state prediction.This is our motion model (or transition model).Next, we need a sensor model.There can be two kinds of sensor models, the first assumes that the sensors detect _stable_, _recognizable_ features of the environment called __landmarks__.The robot senses the location and bearing of each landmark and updates its belief according to that.We can also assume the noise in measurements to be Gaussian, to simplify things.Another kind of sensor model is used for an array of range sensors, each of which has a fixed bearing relative to the robot.These sensors provide a set of range values in each direction.This will also be corrupted by Gaussian noise, but we can assume that the errors for different beam directions are independent and identically distributed.After evidence comes in, the robot updates its belief state and reweights the particle distribution to better aproximate the actual distribution.Let's have a look at how this algorithm is implemented in the module
###Code
psource(monte_carlo_localization)
###Output
_____no_output_____
###Markdown
Our implementation of Monte Carlo Localization uses the range scan method.The `ray_cast` helper function casts rays in different directions and stores the range values.`a` stores the `v` and `w` components of the robot's velocity.`z` is a range scan.`P_motion_sample` is the motion or transition model.`P_sensor` is the range sensor noise model.`m` is the 2D map of the environment`S` is a vector of samples of size N We'll now define a simple 2D map to run Monte Carlo Localization on.Let's say this is the map we want
###Code
m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]])
heatmap(m.m, cmap='binary')
###Output
_____no_output_____
###Markdown
Let's define the motion model as a function `P_motion_sample`.
###Code
def P_motion_sample(kin_state, v, w):
"""Sample from possible kinematic states.
Returns from a single element distribution (no uncertainity in motion)"""
pos = kin_state[:2]
orient = kin_state[2]
# for simplicity the robot first rotates and then moves
orient = (orient + w)%4
for _ in range(orient):
v = (v[1], -v[0])
pos = vector_add(pos, v)
return pos + (orient,)
###Output
_____no_output_____
###Markdown
Define the sensor model as a function `P_sensor`.
###Code
def P_sensor(x, y):
"""Conditional probability for sensor reading"""
# Need not be exact probability. Can use a scaled value.
if x == y:
return 0.8
elif abs(x - y) <= 2:
return 0.05
else:
return 0
###Output
_____no_output_____
###Markdown
Initializing variables.
###Code
a = {'v': (0, 0), 'w': 0}
z = (2, 4, 1, 6)
###Output
_____no_output_____
###Markdown
Let's run `monte_carlo_localization` with these parameters to find a sample distribution S.
###Code
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m)
###Output
_____no_output_____
###Markdown
Let's plot the values in the sample distribution `S`.
###Code
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 9 41 123 12 1 0 0 0 0 0 0 0 0 0 0
0 0 0 0 2 107 56 4 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 5 4 9 2 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 10 260 135 5 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 5 34 50 0 0 0 0 0 0 0 0 0 0
79 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
26 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
0 0 0 3 2 10 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
The distribution is highly concentrated at `(5, 3)`, but the robot is not very confident about its position as some other cells also have high probability values. Let's look at another scenario.
###Code
a = {'v': (0, 1), 'w': 0}
z = (2, 3, 5, 7)
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S)
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 999 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
In this case, the robot is 99.9% certain that it is at position `(6, 7)`. DECISION THEORETIC AGENTWe now move into the domain of probabilistic decision making.To make choices between different possible plans in a certain situation in a given environment, an agent must have _preference_ between the possible outcomes of the various plans.__Utility theory__ is used to represent and reason with preferences.The agent prefers states with a higher _utility_.While constructing multi-agent systems, one major element in the design is the mechanism the agents use for making decisions about which actions to adopt in order to achieve their goals.What is usually required is a mechanism which ensures that the actions adopted lead to benefits for both individual agents, and the community of which they are part.The utility of a state is _relative_ to an agent.Preferences, as expressed by utilities, are combined with probabilities in the general theory of rational decisions called __decision theory__.An agent is said to be _rational_ if and only if it chooses the action that yields the highest expected utility, averaged over all the possible outcomes of the action. Here we'll see how a decision-theoretic agent is implemented in the module.
###Code
psource(DTAgentProgram)
###Output
_____no_output_____
###Markdown
The `DTAgentProgram` function is pretty self-explanatory.It encapsulates a function `program` that takes in an observation or a `percept`, updates its `belief_state` and returns the action that maximizes the `expected_outcome_utility`. INFORMATION GATHERING AGENTBefore we discuss what an information gathering agent is, we'll need to know what decision networks are.For an agent in an environment, a decision network represents information about the agent's current state, its possible actions, the state that will result from the agent's action, and the utility of that state.Decision networks have three primary kinds of nodes which are:1. __Chance nodes__: These represent random variables, just like in Bayesian networks.2. __Decision nodes__: These represent points where the decision-makes has a choice between different actions and the decision maker tries to find the optimal decision at these nodes with regard to the cost, safety and resulting utility.3. __Utility nodes__: These represent the agent's utility function.A description of the agent's utility as a function is associated with a utility node.To evaluate a decision network, we do the following:1. Initialize the evidence variables according to the current state.2. Calculate posterior probabilities for each possible value of the decision node and calculate the utility resulting from that action.3. Return the action with the highest utility.Let's have a look at the implementation of the `DecisionNetwork` class.
###Code
psource(DecisionNetwork)
###Output
_____no_output_____
###Markdown
The `DecisionNetwork` class inherits from `BayesNet` and has a few extra helper methods.`best_action` returns the best action in the network.`get_utility` is an abstract method which is supposed to return the utility of a particular action and state in the network.`get_expected_utility` computes the expected utility, given an action and evidence. Before we proceed, we need to know a few more terms.Having __perfect information__ refers to a state of being fully aware of the current state, the cost functions and the outcomes of actions.This in turn allows an agent to find the exact utility value of each state.If an agent has perfect information about the environment, maximum expected utility calculations are exact and can be computed with absolute certainty.In decision theory, the __value of perfect information__ (VPI) is the price that an agent would be willing to pay in order to gain access to _perfect information_.VPI calculations are extensively used to calculate expected utilities for nodes in a decision network.For a random variable $E_j$ whose value is currently unknown, the value of discovering $E_j$, given current information $e$ must average over all possible values $e_{jk}$ that we might discover for $E_j$, using our _current_ beliefs about its value.The VPI of $E_j$ is then given by:$$VPI_e(E_j) = \left(\sum_{k}P(E_j=e_{jk}\ |\ e) EU(\alpha_{e_{jk}}\ |\ e, E_j=e_{jk})\right) - EU(\alpha\ |\ e)$$VPI is _non-negative_, _non-additive_ and _order-indepentent_. An information gathering agent is an agent with certain properties that explores decision networks as and when required with heuristics driven by VPI calculations of nodes.A sensible agent should ask questions in a reasonable order, should avoid asking irrelevant questions, should take into account the importance of each piece of information in relation to its cost and should stop asking questions when that is appropriate._VPI_ is used as the primary heuristic to consider all these points in an information gathering agent as the agent ultimately wants to maximize the utility and needs to find the optimal cost and extent of finding the required information.As an overview, an information gathering agent works by repeatedly selecting the observations with the highest information value, until the cost of the next observation is greater than its expected benefit.The `InformationGatheringAgent` class is an abstract class that inherits from `Agent` and works on the principles discussed above.Let's have a look.
###Code
psource(InformationGatheringAgent)
###Output
_____no_output_____
###Markdown
Probability This IPy notebook acts as supporting material for **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning** and **Chapter 15 Probabilistic Reasoning over Time** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from notebook import *
###Output
_____no_output_____
###Markdown
Probability DistributionLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
%psource ProbDist
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable:probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incremently. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
%psource JointProbDist
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. Bayesian NetworksA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate__all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network.**enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource( make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**We store the samples on the observations. Let us find **P(Rain=True)**
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.508
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.7755905511811023
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**. likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx() Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Inference in Temporal Models Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | In the filtering task we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5].
###Code
%psource HiddenMarkovModel
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
umbrella_prior = [0.5, 0.5]
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**. Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88
###Markdown
Probability This IPy notebook acts as supporting material for **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning** and **Chapter 15 Probabilistic Reasoning over Time** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from notebook import psource
###Output
_____no_output_____
###Markdown
Probability DistributionLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
%psource ProbDist
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable:probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incremently. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
%psource JointProbDist
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. Bayesian NetworksA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate__all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network.**enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource( make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**We store the samples on the observations. Let us find **P(Rain=True)**
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.508
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.7755905511811023
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**.
###Code
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Probability This IPy notebook acts as supporting material for topics covered in **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning**, **Chapter 15 Probabilistic Reasoning over Time**, **Chapter 16 Making Simple Decisions** and parts of **Chapter 25 Robotics** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from utils import print_table
from notebook import psource, pseudocode, heatmap
###Output
_____no_output_____
###Markdown
CONTENTS- Probability Distribution - Joint probability distribution - Inference using full joint distributions- Bayesian Networks - BayesNode - BayesNet - Exact Inference in Bayesian Networks - Enumeration - Variable elimination - Approximate Inference in Bayesian Networks - Prior sample - Rejection sampling - Likelihood weighting - Gibbs sampling- Hidden Markov Models - Inference in Hidden Markov Models - Forward-backward - Fixed lag smoothing - Particle filtering- Monte Carlo Localization- Decision Theoretic Agent- Information Gathering Agent PROBABILITY DISTRIBUTIONLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
psource(ProbDist)
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable: probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incrementally. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
psource(JointProbDist)
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = \alpha \textbf{P}(X, \textbf{e}) = \alpha \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. BAYESIAN NETWORKSA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate_all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network. **enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource(make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
Coin Flips and Die RollsUse NumPy to create simulations and compute proportions for the following outcomes. The first one is done for you.**Please note again that we are using 0 to represent heads, and 1 to represent tails.**
###Code
# import numpy
import numpy as np
###Output
_____no_output_____
###Markdown
1. Two fair coin flips produce exactly two heads
###Code
# simulate 1 million tests of two fair coin flips
tests = np.random.randint(2, size=(int(1e6), 2))
# sums of all tests
test_sums = tests.sum(axis=1)
# proportion of tests that produced exactly two heads
(test_sums == 0).mean()
###Output
_____no_output_____
###Markdown
2. Three fair coin flips produce exactly one head
###Code
# simulate 1 million tests of three fair coin flips
tests = np.random.randint(2, size=(int(1e6), 3))
# sums of all tests
test_sums = tests.sum(axis=1)
# proportion of tests that produced exactly one head
(test_sums == 1).mean()
###Output
_____no_output_____
###Markdown
3. Three biased coin flips with P(H) = 0.6 produce exactly one head
###Code
# simulate 1 million tests of three biased coin flips
# hint: use np.random.choice()
tests = np.random.choice([0,1],size=(int(1e6), 3),p=[0.6,0.4])
# sums of all tests
test_sums = tests.sum(axis=1)
# proportion of tests that produced exactly one head
(test_sums == 2).mean()
###Output
_____no_output_____
###Markdown
4. A die rolls an even number
###Code
# simulate 1 million tests of one die roll
tests = np.random.randint(1,7,size=int(1e6))
even_count = 0
# proportion of tests that produced an even number
for i in tests:
if i % 2 == 0:
even_count = even_count+1
print(even_count/int(1e6))
###Output
0.500498
###Markdown
5. Two dice roll a double
###Code
# simulate the first million die rolls
first = np.random.randint(1,7,size=int(1e6))
# simulate the second million die rolls
second = np.random.randint(1,7,size=int(1e6))
# proportion of tests where the 1st and 2nd die rolled the same number
doubles = 0
for i in range(int(1e6)):
if first[i] == second[i]:
doubles = doubles + 1
print(doubles/int(1e6))
###Output
0.166764
###Markdown
Probability This IPy notebook acts as supporting material for topics covered in **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning**, **Chapter 15 Probabilistic Reasoning over Time**, **Chapter 16 Making Simple Decisions** and parts of **Chapter 25 Robotics** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from utils import print_table
from notebook import psource, pseudocode, heatmap
###Output
_____no_output_____
###Markdown
CONTENTS- Probability Distribution - Joint probability distribution - Inference using full joint distributions- Bayesian Networks - BayesNode - BayesNet - Exact Inference in Bayesian Networks - Enumeration - Variable elimination - Approximate Inference in Bayesian Networks - Prior sample - Rejection sampling - Likelihood weighting - Gibbs sampling- Hidden Markov Models - Inference in Hidden Markov Models - Forward-backward - Fixed lag smoothing - Particle filtering- Monte Carlo Localization- Decision Theoretic Agent- Information Gathering Agent PROBABILITY DISTRIBUTIONLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
psource(ProbDist)
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable: probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incrementally. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
psource(JointProbDist)
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = \alpha \textbf{P}(X, \textbf{e}) = \alpha \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. BAYESIAN NETWORKSA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate_all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network. **enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource(make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Runtime comparisonLet's see how the runtimes of these two algorithms compare.We expect variable elimination to outperform enumeration by a large margin as we reduce the number of repetitive calculations significantly.
###Code
%%timeit
enumeration_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
%%timeit
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
241 µs ± 64.6 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
We observe that variable elimination was faster than enumeration as we had expected but the gain in speed is not a lot, in fact it is just about 30% faster.This happened because the bayesian network in question is pretty small, with just 5 nodes, some of which aren't even required in the inference process.For more complicated networks, variable elimination will be significantly faster and runtime will reduce not just by a constant factor, but by a polynomial factor proportional to the number of nodes, due to the reduction in repeated calculations. Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**Traversing the graph in topological order is important.There are two possible topological orderings for this particular directed acyclic graph.1. `Cloudy -> Sprinkler -> Rain -> Wet Grass`2. `Cloudy -> Rain -> Sprinkler -> Wet Grass`We can follow any of the two orderings to sample from the network.Any ordering other than these two, however, cannot be used.One way to think about this is that `Cloudy` can be seen as a precondition of both `Rain` and `Sprinkler` and just like we have seen in planning, preconditions need to be satisfied before a certain action can be executed.We store the samples on the observations. Let us find **P(Rain=True)** by taking 1000 random samples from the network.
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.496
###Markdown
Sampling this another time might give different results as we have no control over the distribution of the random samples
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
answer = len(rain_true) / N
print(answer)
###Output
0.503
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.8091451292246521
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. Rejection sampling is advantageous only when we know the query beforehand.While prior sampling generally works for any query, it might fail in some scenarios.Let's say we have a generic Bayesian network and we have evidence `e`, and we want to know how many times a state `A` is true, given evidence `e` is true.Normally, prior sampling can answer this question, but let's assume that the probability of evidence `e` being true in our actual probability distribution is very small.In this situation, it might be possible that sampling never encounters a data-point where `e` is true.If our sampled data has no instance of `e` being true, `P(e) = 0`, and therefore `P(A | e) / P(e) = 0/0`, which is undefined.We cannot find the required value using this sample.We can definitely increase the number of sample points, but we can never guarantee that we will encounter the case where `e` is non-zero (assuming our actual probability distribution has atleast one case where `e` is true).To guarantee this, we would have to consider every single data point, which means we lose the speed advantage that approximation provides us and we essentially have to calculate the exact inference model of the Bayesian network.Rejection sampling will be useful in this situation, as we already know the query.While sampling from the network, we will reject any sample which is inconsistent with the evidence variables of the given query (in this example, the only evidence variable is `e`).We will only consider samples that do not violate **any** of the evidence variables.In this way, we will have enough data with the required evidence to infer queries involving a subset of that evidence.The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling takes a long time to run when the probability of finding consistent evidence is low. It is also slow for larger networks and more evidence variables.Rejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**.
###Code
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Runtime analysisLet's take a look at how much time each algorithm takes.
###Code
%%timeit
all_observations = [prior_sample(sprinkler) for x in range(1000)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
len([observation for observation in rain_true if observation['Cloudy'] == True]) / len(rain_true)
%%timeit
rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
%%timeit
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200)
%%timeit
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200)
###Output
7.03 ms ± 117 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
As expected, all algorithms have a very similar runtime.However, rejection sampling would be a lot faster and more accurate when the probabiliy of finding data-points consistent with the required evidence is small.Likelihood weighting is the fastest out of all as it doesn't involve rejecting samples, but also has a quite high variance. HIDDEN MARKOV MODELS Often, we need to carry out probabilistic inference on temporal data or a sequence of observations where the order of observations matter.We require a model similar to a Bayesian Network, but one that grows over time to keep up with the latest evidences.If you are familiar with the `mdp` module or Markov models in general, you can probably guess that a Markov model might come close to representing our problem accurately.A Markov model is basically a chain-structured Bayesian Network in which there is one state for each time step and each node has an identical probability distribution.The first node, however, has a different distribution, called the prior distribution which models the initial state of the process.A state in a Markov model depends only on the previous state and the latest evidence and not on the states before it.A **Hidden Markov Model** or **HMM** is a special case of a Markov model in which the state of the process is described by a single discrete random variable.The possible values of the variable are the possible states of the world.But what if we want to model a process with two or more state variables?In that case, we can still fit the process into the HMM framework by redefining our state variables as a single "megavariable".We do this because carrying out inference on HMMs have standard optimized algorithms.A HMM is very similar to an MDP, but we don't have the option of taking actions like in MDPs, instead, the process carries on as new evidence appears.If a HMM is truncated at a fixed length, it becomes a Bayesian network and general BN inference can be used on it to answer queries.Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | HMMs are implemented in the **`HiddenMarkovModel`** class.Let's have a look.
###Code
psource(HiddenMarkovModel)
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
Now that we have defined an HMM object, our task here is to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$ given evidence **U** at each time step **t**.The basic inference tasks that must be solved are:1. **Filtering**: Computing the posterior probability distribution over the most recent state, given all the evidence up to the current time step.2. **Prediction**: Computing the posterior probability distribution over the future state.3. **Smoothing**: Computing the posterior probability distribution over a past state. Smoothing provides a better estimation as it incorporates more evidence.4. **Most likely explanation**: Finding the most likely sequence of states for a given observation5. **Learning**: The transition and sensor models can be learnt, if not yet known, just like in an information gathering agentThere are three primary methods to carry out inference in Hidden Markov Models:1. The Forward-Backward algorithm2. Fixed lag smoothing3. Particle filteringLet's have a look at how we can carry out inference and answer queries based on our umbrella HMM using these algorithms. FORWARD-BACKWARDThis is a general algorithm that works for all Markov models, not just HMMs.In the filtering task (inference) we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5]. The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
umbrella_prior = [0.5, 0.5]
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**.Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88
###Markdown
Since HMMs are represented as single variable systems, we can represent the transition model and sensor model as matrices.The `forward_backward` algorithm can be easily carried out on this representation (as we have done here) with a time complexity of $O({S}^{2} t)$ where t is the length of the sequence and each step multiplies a vector of size $S$ with a matrix of dimensions $SxS$.Additionally, the forward pass stores $t$ vectors of size $S$ which makes the auxiliary space requirement equivalent to $O(St)$.Is there any way we can improve the time or space complexity?Fortunately, the matrix representation of HMM properties allows us to do so.If $f$ and $b$ represent the forward and backward messages respectively, we can modify the smoothing algorithm by firstrunning the standard forward pass to compute $f_{t:t}$ (forgetting all the intermediate results) and then runningbackward pass for both $b$ and $f$ together, using them to compute the smoothed estimate at each step.This optimization reduces auxlilary space requirement to constant (irrespective of the length of the sequence) providedthe transition matrix is invertible and the sensor model has no zeros (which is sometimes hard to accomplish)Let's look at another algorithm, that carries out smoothing in a more optimized way. FIXED LAG SMOOTHINGThe matrix formulation allows to optimize online smoothing with a fixed lag.Since smoothing can be done in constant, there should exist an algorithm whose time complexity is independent of the length of the lag.For smoothing a time slice $t - d$ where $d$ is the lag, we need to compute $\alpha f_{1:t-d}$ x $b_{t-d+1:t}$ incrementally.As we already know, the forward equation is$$f_{1:t+1} = \alpha O_{t+1}{T}^{T}f_{1:t}$$and the backward equation is$$b_{k+1:t} = TO_{k+1}b_{k+2:t}$$where $T$ and $O$ are the transition and sensor models respectively.For smoothing, the forward message is easy to compute but there exists no simple relation between the backward message of this time step and the one at the previous time step, hence we apply the backward equation $d$ times to get$$b_{t-d+1:t} = \left ( \prod_{i=t-d+1}^{t}{TO_i} \right )b_{t+1:t} = B_{t-d+1:t}1$$where $B_{t-d+1:t}$ is the product of the sequence of $T$ and $O$ matrices.Here's how the `probability` module implements `fixed_lag_smoothing`.
###Code
psource(fixed_lag_smoothing)
###Output
_____no_output_____
###Markdown
This algorithm applies `forward` as usual and optimizes the smoothing step by using the equations above.This optimization could be achieved only because HMM properties can be represented as matrices.`vector_to_diagonal`, `matrix_multiplication` and `inverse_matrix` are matrix manipulation functions to simplify the implementation.`normalize` is used to normalize the output before returning it. Here's how we can use `fixed_lag_smoothing` for inference on our umbrella HMM.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
Given evidence T, F, T, F and T, we want to calculate the probability distribution for the fourth day with a fixed lag of 2 days.Let `e_t = False`
###Code
e_t = F
evidence = [T, F, T, F, T]
fixed_lag_smoothing(e_t, hmm, d=2, ev=evidence, t=4)
e_t = T
evidence = [T, T, F, T, T]
fixed_lag_smoothing(e_t, hmm, d=1, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
We cannot calculate probability distributions when $t$ is less than $d$
###Code
fixed_lag_smoothing(e_t, hmm, d=5, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
As expected, the output is `None` PARTICLE FILTERINGThe filtering problem is too expensive to solve using the previous methods for problems with large or continuous state spaces.Particle filtering is a method that can solve the same problem but when the state space is a lot larger, where we wouldn't be able to do these computations in a reasonable amount of time as fast, as time goes by, and we want to keep track of things as they happen.The downside is that it is a sampling method and hence isn't accurate, but the more samples we're willing to take, the more accurate we'd get.In this method, instead of keping track of the probability distribution, we will drop particles in a similar proportion at the required regions.The internal representation of this distribution is usually a list of particles with coordinates in the state-space.A particle is just a new name for a sample.Particle filtering can be divided into four steps:1. __Initialization__: If we have some idea about the prior probability distribution, we drop the initial particles accordingly, or else we just drop them uniformly over the state space.2. __Forward pass__: As time goes by and measurements come in, we are going to move the selected particles into the grid squares that makes the most sense in terms of representing the distribution that we are trying to track.When time goes by, we just loop through all our particles and try to simulate what could happen to each one of them by sampling its next position from the transition model.This is like prior sampling - samples' frequencies reflect the transition probabilities.If we have enough samples we are pretty close to exact values.We work through the list of particles, one particle at a time, all we do is stochastically simulate what the outcome might be.If we had no dimension of time, and we had no new measurements come in, this would be exactly the same as what we did in prior sampling.3. __Reweight__:As observations come in, don't sample the observations, fix them and downweight the samples based on the evidence just like in likelihood weighting.$$w(x) = P(e/x)$$$$B(X) \propto P(e/X)B'(X)$$As before, the probabilities don't sum to one, since most have been downweighted.They sum to an approximation of $P(e)$.To normalize the resulting distribution, we can divide by $P(e)$Likelihood weighting wasn't the best thing for Bayesian networks, because we were not accounting for the incoming evidence so we were getting samples from the prior distribution, in some sense not the right distribution, so we might end up with a lot of particles with low weights. These samples were very uninformative and the way we fixed it then was by using __Gibbs sampling__.Theoretically, Gibbs sampling can be run on a HMM, but as we iterated over the process infinitely many times in a Bayesian network, we cannot do that here as we have new incoming evidence and we also need computational cycles to propagate through time.A lot of samples with very low weight and they are not representative of the _actual probability distribution_.So if we keep running likelihood weighting, we keep propagating the samples with smaller weights and carry out computations for that even though these samples have no significant contribution to the actual probability distribution.Which is why we require this last step.4. __Resample__:Rather than tracking weighted samples, we _resample_.We choose from our weighted sample distribution as many times as the number of particles we initially had and we replace these particles too, so that we have a constant number of particles.This is equivalent to renormalizing the distribution.The samples with low weight are rarely chosen in the new distribution after resampling.This newer set of particles after resampling is in some sense more representative of the actual distribution and so we are better allocating our computational cycles.Now the update is complete for this time step, continue with the next one.Let's see how this is implemented in the module.
###Code
psource(particle_filtering)
###Output
_____no_output_____
###Markdown
Here, `scalar_vector_product` and `vector_add` are helper functions to help with vector math and `weighted_sample_with_replacement` resamples from a weighted sample and replaces the original sample, as is obvious from the name.This implementation considers two state variables with generic names 'A' and 'B'. Here's how we can use `particle_filtering` on our umbrella HMM, though it doesn't make much sense using particle filtering on a problem with such a small state space.It is just to get familiar with the syntax.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
particle_filtering(T, 10, hmm)
###Output
_____no_output_____
###Markdown
We got 5 samples from state `A` and 5 samples from state `B`
###Code
particle_filtering([F, T, F, F, T], 10, hmm)
###Output
_____no_output_____
###Markdown
This time we got 2 samples from state `A` and 8 samples from state `B` Comparing runtimes for these algorithms will not be useful, as each solves the filtering task efficiently for a different scenario.`forward_backward` calculates the exact probability distribution.`fixed_lag_smoothing` calculates an approximate distribution and its runtime will depend on the value of the lag chosen.`particle_filtering` is an efficient method for approximating distributions for a very large or continuous state space. MONTE CARLO LOCALIZATIONIn the domain of robotics, particle filtering is used for _robot localization_.__Localization__ is the problem of finding out where things are, in this case, we want to find the position of a robot in a continuous state space.__Monte Carlo Localization__ is an algorithm for robots to _localize_ using a _particle filter_.Given a map of the environment, the algorithm estimates the position and orientation of a robot as it moves and senses the environment.Initially, particles are distributed uniformly over the state space, ie the robot has no information of where it is and assumes it is equally likely to be at any point in space.When the robot moves, it analyses the incoming evidence to shift and change the probability to better approximate the probability distribution of its position.The particles are then resampled based on their weights.Gradually, as more evidence comes in, the robot gets better at approximating its location and the particles converge towards the actual position of the robot.The pose of a robot is defined by its two Cartesian coordinates with values $x$ and $y$ and its direction with value $\theta$.We use the kinematic equations of motion to model a deterministic state prediction.This is our motion model (or transition model).Next, we need a sensor model.There can be two kinds of sensor models, the first assumes that the sensors detect _stable_, _recognizable_ features of the environment called __landmarks__.The robot senses the location and bearing of each landmark and updates its belief according to that.We can also assume the noise in measurements to be Gaussian, to simplify things.Another kind of sensor model is used for an array of range sensors, each of which has a fixed bearing relative to the robot.These sensors provide a set of range values in each direction.This will also be corrupted by Gaussian noise, but we can assume that the errors for different beam directions are independent and identically distributed.After evidence comes in, the robot updates its belief state and reweights the particle distribution to better aproximate the actual distribution.Let's have a look at how this algorithm is implemented in the module
###Code
psource(monte_carlo_localization)
###Output
_____no_output_____
###Markdown
Our implementation of Monte Carlo Localization uses the range scan method.The `ray_cast` helper function casts rays in different directions and stores the range values.`a` stores the `v` and `w` components of the robot's velocity.`z` is a range scan.`P_motion_sample` is the motion or transition model.`P_sensor` is the range sensor noise model.`m` is the 2D map of the environment`S` is a vector of samples of size N We'll now define a simple 2D map to run Monte Carlo Localization on.Let's say this is the map we want
###Code
m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]])
heatmap(m.m, cmap='binary')
###Output
_____no_output_____
###Markdown
Let's define the motion model as a function `P_motion_sample`.
###Code
def P_motion_sample(kin_state, v, w):
"""Sample from possible kinematic states.
Returns from a single element distribution (no uncertainity in motion)"""
pos = kin_state[:2]
orient = kin_state[2]
# for simplicity the robot first rotates and then moves
orient = (orient + w)%4
for _ in range(orient):
v = (v[1], -v[0])
pos = vector_add(pos, v)
return pos + (orient,)
###Output
_____no_output_____
###Markdown
Define the sensor model as a function `P_sensor`.
###Code
def P_sensor(x, y):
"""Conditional probability for sensor reading"""
# Need not be exact probability. Can use a scaled value.
if x == y:
return 0.8
elif abs(x - y) <= 2:
return 0.05
else:
return 0
###Output
_____no_output_____
###Markdown
Initializing variables.
###Code
a = {'v': (0, 0), 'w': 0}
z = (2, 4, 1, 6)
###Output
_____no_output_____
###Markdown
Let's run `monte_carlo_localization` with these parameters to find a sample distribution S.
###Code
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m)
###Output
_____no_output_____
###Markdown
Let's plot the values in the sample distribution `S`.
###Code
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 9 41 123 12 1 0 0 0 0 0 0 0 0 0 0
0 0 0 0 2 107 56 4 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 5 4 9 2 0 0 0 0 0 0 0 0 0 0
1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 10 260 135 5 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 5 34 50 0 0 0 0 0 0 0 0 0 0
79 4 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
26 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
0 0 0 3 2 10 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
The distribution is highly concentrated at `(5, 3)`, but the robot is not very confident about its position as some other cells also have high probability values. Let's look at another scenario.
###Code
a = {'v': (0, 1), 'w': 0}
z = (2, 3, 5, 7)
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S)
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 999 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
In this case, the robot is 99.9% certain that it is at position `(6, 7)`. DECISION THEORETIC AGENTWe now move into the domain of probabilistic decision making.To make choices between different possible plans in a certain situation in a given environment, an agent must have _preference_ between the possible outcomes of the various plans.__Utility theory__ is used to represent and reason with preferences.The agent prefers states with a higher _utility_.While constructing multi-agent systems, one major element in the design is the mechanism the agents use for making decisions about which actions to adopt in order to achieve their goals.What is usually required is a mechanism which ensures that the actions adopted lead to benefits for both individual agents, and the community of which they are part.The utility of a state is _relative_ to an agent.Preferences, as expressed by utilities, are combined with probabilities in the general theory of rational decisions called __decision theory__.An agent is said to be _rational_ if and only if it chooses the action that yields the highest expected utility, averaged over all the possible outcomes of the action. Here we'll see how a decision-theoretic agent is implemented in the module.
###Code
psource(DTAgentProgram)
###Output
_____no_output_____
###Markdown
The `DTAgentProgram` function is pretty self-explanatory.It encapsulates a function `program` that takes in an observation or a `percept`, updates its `belief_state` and returns the action that maximizes the `expected_outcome_utility`. INFORMATION GATHERING AGENTBefore we discuss what an information gathering agent is, we'll need to know what decision networks are.For an agent in an environment, a decision network represents information about the agent's current state, its possible actions, the state that will result from the agent's action, and the utility of that state.Decision networks have three primary kinds of nodes which are:1. __Chance nodes__: These represent random variables, just like in Bayesian networks.2. __Decision nodes__: These represent points where the decision-makes has a choice between different actions and the decision maker tries to find the optimal decision at these nodes with regard to the cost, safety and resulting utility.3. __Utility nodes__: These represent the agent's utility function.A description of the agent's utility as a function is associated with a utility node.To evaluate a decision network, we do the following:1. Initialize the evidence variables according to the current state.2. Calculate posterior probabilities for each possible value of the decision node and calculate the utility resulting from that action.3. Return the action with the highest utility.Let's have a look at the implementation of the `DecisionNetwork` class.
###Code
psource(DecisionNetwork)
###Output
_____no_output_____
###Markdown
The `DecisionNetwork` class inherits from `BayesNet` and has a few extra helper methods.`best_action` returns the best action in the network.`get_utility` is an abstract method which is supposed to return the utility of a particular action and state in the network.`get_expected_utility` computes the expected utility, given an action and evidence. Before we proceed, we need to know a few more terms.Having __perfect information__ refers to a state of being fully aware of the current state, the cost functions and the outcomes of actions.This in turn allows an agent to find the exact utility value of each state.If an agent has perfect information about the environment, maximum expected utility calculations are exact and can be computed with absolute certainty.In decision theory, the __value of perfect information__ (VPI) is the price that an agent would be willing to pay in order to gain access to _perfect information_.VPI calculations are extensively used to calculate expected utilities for nodes in a decision network.For a random variable $E_j$ whose value is currently unknown, the value of discovering $E_j$, given current information $e$ must average over all possible values $e_{jk}$ that we might discover for $E_j$, using our _current_ beliefs about its value.The VPI of $E_j$ is then given by:$$VPI_e(E_j) = \left(\sum_{k}P(E_j=e_{jk}\ |\ e) EU(\alpha_{e_{jk}}\ |\ e, E_j=e_{jk})\right) - EU(\alpha\ |\ e)$$VPI is _non-negative_, _non-additive_ and _order-indepentent_. An information gathering agent is an agent with certain properties that explores decision networks as and when required with heuristics driven by VPI calculations of nodes.A sensible agent should ask questions in a reasonable order, should avoid asking irrelevant questions, should take into account the importance of each piece of information in relation to its cost and should stop asking questions when that is appropriate._VPI_ is used as the primary heuristic to consider all these points in an information gathering agent as the agent ultimately wants to maximize the utility and needs to find the optimal cost and extent of finding the required information.As an overview, an information gathering agent works by repeatedly selecting the observations with the highest information value, until the cost of the next observation is greater than its expected benefit.The `InformationGatheringAgent` class is an abstract class that inherits from `Agent` and works on the principles discussed above.Let's have a look.
###Code
psource(InformationGatheringAgent)
###Output
_____no_output_____
###Markdown
Review of probability theory using python Install tabulate package: pip install tabulate on macWe will use pandas to read a CSV file and to store dataDocumentation of pandas https://pandas.pydata.org/pandas-docs/stable/user_guide/10min.html
###Code
import pandas as pd
import numpy as np
## Download student-mat.csv from ICON
#The csv file is downloaded from Kaggle
#https://www.kaggle.com/uciml/student-alcohol-consumption/data?select=student-mat.csv
df = pd.read_csv('student-mat.csv')
df.head(10) # Select the first 3 rows of data
###Output
_____no_output_____
###Markdown
Create a smaller data frame with only two columnsGrade A: G3 > 80%Absences: high absences, if a student missed 10 or more classes.
###Code
newlist = pd.DataFrame()
newlist['Grade'] = np.where(df['G3']*5 >= 80, 1, 0) # df short notation for data frame
newlist['Absences'] = np.where(df['absences'] >= 10, 1, 0)
newlist['count'] = 1
newlist.head(10)
###Output
_____no_output_____
###Markdown
Compute joint probabilities
###Code
Joint_table = pd.pivot_table(
newlist,
values='count',
index=['Grade'],
columns=['Absences'],
aggfunc=np.size,
fill_value=0
)
print("Joint Histogram")
print("-------------------")
print(Joint_table)
print("-------------------")
Joint_table = Joint_table.to_numpy()
Joint_Probabilities = Joint_table/len(newlist)
print("Joint Probabilities")
print("-------------------")
print(Joint_Probabilities)
print("-------------------")
###Output
Joint Histogram
-------------------
Absences 0 1
Grade
0 277 78
1 35 5
-------------------
Joint Probabilities
-------------------
[[0.70126582 0.19746835]
[0.08860759 0.01265823]]
-------------------
###Markdown
Compute marginal, and conditional probabilitiesMarginal probability$$P(X) = -\sum_j p(x_i,y_j)$$Conditional probability P(X|Y=y): probability of x, if Y is fixed to a specific value$$P(X|Y) = p(x,y)/p(y)$$
###Code
# P(A) Marginal probability of Absences: Sum along the grades axis (rows; axis = 0)
PA = np.sum(Joint_Probabilities,axis=0)
print("Marginal probability of Absences")
print("-------------------")
print('P(A)', PA)
print("-------------------\n")
# Conditional probabilities of Grades, given A
# Broadcasting along the rows
PGgivenA = Joint_Probabilities/PA[None,:]
print("Conditional probability of Grades given Absences P(G|A)")
print("---------------------------------")
print(PGgivenA)
print("---------------------------------")
###Output
Marginal probability of Absences
-------------------
P(A) [0.78987342 0.21012658]
-------------------
Conditional probability of Grades given Absences P(G|A)
---------------------------------
[[0.88782051 0.93975904]
[0.11217949 0.06024096]]
---------------------------------
###Markdown
To do: Evaluate the probability of getting a grade A, if the student has been absent for more than ten times P(Grade=A|Absenses>=10)1. Compute 'P(Grade=A|Absenses>=10')2. Compute 'P(Grade=A|Absenses<10')3. Add the two; what do you expect to get if you add the two? i.e. P(Grade=A|Absenses>=10) + P(Grade=A|Absenses<10)4. What will you get if you add P(Grade=A|Absenses>=10') and P(Grade=10')
###Code
# YOUR CODE HERE
print("---------------------------------")
print ('Probability of a grade of A given Absenses >=10:')
print( PGgivenA[-1][-1])
print('\n')
##Bottom Right cell probability
print("---------------------------------")
print('Probability of a grade of A given Absenses <10:')
print( PGgivenA[-1][0])
print('\n')
##Bottom Left cell probability
print("---------------------------------")
print('P(Grade=A|Absenses>=10) + P(Grade=A|Absenses<10): ')
print( PGgivenA[-1][0] + PGgivenA[-1][-1])
print('\n')
##This is the overall probability of getting an A
print("---------------------------------")
print('P(Grade=A|Absenses>=10) + P(Grade<A|Absenses>=10):')
print( PGgivenA[-1][-1] + PGgivenA[0][1])
print('\n')
##Code will print .99 repeating but is theoretically 1
###Output
---------------------------------
Probability of a grade of A given Absenses >=10:
0.060240963855421686
---------------------------------
Probability of a grade of A given Absenses <10:
0.11217948717948718
---------------------------------
P(Grade=A|Absenses>=10) + P(Grade=A|Absenses<10):
0.17242045103490888
---------------------------------
P(Grade=A|Absenses>=10) + P(Grade<A|Absenses>=10):
0.9999999999999999
###Markdown
To do: compute the marginal probability P(g) and conditional probability P(A|G)
###Code
# YOUR CODE HERE
PG = np.sum(Joint_Probabilities,axis=-1)
print("Marginal probability of Grade A")
print("-------------------")
print('P(G)', PG)
print("-------------------\n")
PAgivenG = Joint_Probabilities/PG[:,None]
print("Conditional probability of Absences Given Grades P(A|G)")
print("---------------------------------")
print(PAgivenG)
print("---------------------------------")
###Output
Marginal probability of Grade A
-------------------
P(G) [0.89873418 0.10126582]
-------------------
Conditional probability of Absences Given Grades P(A|G)
---------------------------------
[[0.78028169 0.21971831]
[0.875 0.125 ]]
---------------------------------
###Markdown
Probability This IPy notebook acts as supporting material for topics covered in **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning**, **Chapter 15 Probabilistic Reasoning over Time**, **Chapter 16 Making Simple Decisions** and parts of **Chapter 25 Robotics** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from utils import print_table
from notebook import psource, pseudocode, heatmap
###Output
_____no_output_____
###Markdown
CONTENTS- Probability Distribution - Joint probability distribution - Inference using full joint distributions- Bayesian Networks - BayesNode - BayesNet - Exact Inference in Bayesian Networks - Enumeration - Variable elimination - Approximate Inference in Bayesian Networks - Prior sample - Rejection sampling - Likelihood weighting - Gibbs sampling- Hidden Markov Models - Inference in Hidden Markov Models - Forward-backward - Fixed lag smoothing - Particle filtering- Monte Carlo Localization- Decision Theoretic Agent- Information Gathering Agent PROBABILITY DISTRIBUTIONLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
psource(ProbDist)
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable: probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incrementally. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
psource(JointProbDist)
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = \alpha \textbf{P}(X, \textbf{e}) = \alpha \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. BAYESIAN NETWORKSA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate_all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network. **enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource(make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Elimination Ask Optimizations`elimination_ask` has some critical point to consider and some optimizations could be performed:- **Operation on factors**: `sum_out` and `pointwise_product` function used in `elimination_ask` is where space and time complexity arise in the variable elimination algorithm (AIMA3e pg. 526).>The only trick is to notice that any factor that does not depend on the variable to be summed out can be moved outside the summation.- **Variable ordering**: Elimination ordering is important, every choice of ordering yields a valid algorithm, but different orderings cause different intermediate factors to be generated during the calculation (AIMA3e pg. 527). In this case the algorithm applies a reversed order.> In general, the time and space requirements of variable elimination are dominated by the size of the largest factor constructed during the operation of the algorithm. This in turn is determined by the order of elimination of variables and by the structure of the network. It turns out to be intractable to determine the optimal ordering, but several good heuristics are available. One fairly effective method is a greedy one: eliminate whichever variable minimizes the size of the next factor to be constructed. - **Variable relevance** Some variables could be irrelevant to resolve a query (i.e. sums to 1). A variable elimination algorithm can therefore remove all these variables before evaluating the query (AIMA3e pg. 528).> An optimization is to remove 'every variable that is not an ancestor of a query variable or evidence variable is irrelevant to the query'. Runtime comparisonLet's see how the runtimes of these two algorithms compare.We expect variable elimination to outperform enumeration by a large margin as we reduce the number of repetitive calculations significantly.
###Code
%%timeit
enumeration_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
%%timeit
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
262 µs ± 54.7 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
###Markdown
In this test case we observe that variable elimination is slower than what we expected. It has something to do with number of threads, how Python tries to optimize things and this happens because the network is very small, with just 5 nodes. The `elimination_ask` has some critical point and some optimizations must be perfomed as seen above.Of course, for more complicated networks, variable elimination will be significantly faster and runtime will drop not just by a constant factor, but by a polynomial factor proportional to the number of nodes, due to the reduction in repeated calculations. Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**Traversing the graph in topological order is important.There are two possible topological orderings for this particular directed acyclic graph.1. `Cloudy -> Sprinkler -> Rain -> Wet Grass`2. `Cloudy -> Rain -> Sprinkler -> Wet Grass`We can follow any of the two orderings to sample from the network.Any ordering other than these two, however, cannot be used.One way to think about this is that `Cloudy` can be seen as a precondition of both `Rain` and `Sprinkler` and just like we have seen in planning, preconditions need to be satisfied before a certain action can be executed.We store the samples on the observations. Let us find **P(Rain=True)** by taking 1000 random samples from the network.
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.503
###Markdown
Sampling this another time might give different results as we have no control over the distribution of the random samples
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
answer = len(rain_true) / N
print(answer)
###Output
0.519
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.8265895953757225
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. Rejection sampling is advantageous only when we know the query beforehand.While prior sampling generally works for any query, it might fail in some scenarios.Let's say we have a generic Bayesian network and we have evidence `e`, and we want to know how many times a state `A` is true, given evidence `e` is true.Normally, prior sampling can answer this question, but let's assume that the probability of evidence `e` being true in our actual probability distribution is very small.In this situation, it might be possible that sampling never encounters a data-point where `e` is true.If our sampled data has no instance of `e` being true, `P(e) = 0`, and therefore `P(A | e) / P(e) = 0/0`, which is undefined.We cannot find the required value using this sample.We can definitely increase the number of sample points, but we can never guarantee that we will encounter the case where `e` is non-zero (assuming our actual probability distribution has atleast one case where `e` is true).To guarantee this, we would have to consider every single data point, which means we lose the speed advantage that approximation provides us and we essentially have to calculate the exact inference model of the Bayesian network.Rejection sampling will be useful in this situation, as we already know the query.While sampling from the network, we will reject any sample which is inconsistent with the evidence variables of the given query (in this example, the only evidence variable is `e`).We will only consider samples that do not violate **any** of the evidence variables.In this way, we will have enough data with the required evidence to infer queries involving a subset of that evidence.The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling takes a long time to run when the probability of finding consistent evidence is low. It is also slow for larger networks and more evidence variables.Rejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**.
###Code
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Runtime analysisLet's take a look at how much time each algorithm takes.
###Code
%%timeit
all_observations = [prior_sample(sprinkler) for x in range(1000)]
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
len([observation for observation in rain_true if observation['Cloudy'] == True]) / len(rain_true)
%%timeit
rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
%%timeit
likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200)
%%timeit
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200)
###Output
14.4 ms ± 2.16 ms per loop (mean ± std. dev. of 7 runs, 100 loops each)
###Markdown
As expected, all algorithms have a very similar runtime.However, rejection sampling would be a lot faster and more accurate when the probabiliy of finding data-points consistent with the required evidence is small.Likelihood weighting is the fastest out of all as it doesn't involve rejecting samples, but also has a quite high variance. HIDDEN MARKOV MODELS Often, we need to carry out probabilistic inference on temporal data or a sequence of observations where the order of observations matter.We require a model similar to a Bayesian Network, but one that grows over time to keep up with the latest evidences.If you are familiar with the `mdp` module or Markov models in general, you can probably guess that a Markov model might come close to representing our problem accurately.A Markov model is basically a chain-structured Bayesian Network in which there is one state for each time step and each node has an identical probability distribution.The first node, however, has a different distribution, called the prior distribution which models the initial state of the process.A state in a Markov model depends only on the previous state and the latest evidence and not on the states before it.A **Hidden Markov Model** or **HMM** is a special case of a Markov model in which the state of the process is described by a single discrete random variable.The possible values of the variable are the possible states of the world.But what if we want to model a process with two or more state variables?In that case, we can still fit the process into the HMM framework by redefining our state variables as a single "megavariable".We do this because carrying out inference on HMMs have standard optimized algorithms.A HMM is very similar to an MDP, but we don't have the option of taking actions like in MDPs, instead, the process carries on as new evidence appears.If a HMM is truncated at a fixed length, it becomes a Bayesian network and general BN inference can be used on it to answer queries.Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | HMMs are implemented in the **`HiddenMarkovModel`** class.Let's have a look.
###Code
psource(HiddenMarkovModel)
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
Now that we have defined an HMM object, our task here is to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$ given evidence **U** at each time step **t**.The basic inference tasks that must be solved are:1. **Filtering**: Computing the posterior probability distribution over the most recent state, given all the evidence up to the current time step.2. **Prediction**: Computing the posterior probability distribution over the future state.3. **Smoothing**: Computing the posterior probability distribution over a past state. Smoothing provides a better estimation as it incorporates more evidence.4. **Most likely explanation**: Finding the most likely sequence of states for a given observation5. **Learning**: The transition and sensor models can be learnt, if not yet known, just like in an information gathering agentThere are three primary methods to carry out inference in Hidden Markov Models:1. The Forward-Backward algorithm2. Fixed lag smoothing3. Particle filteringLet's have a look at how we can carry out inference and answer queries based on our umbrella HMM using these algorithms. FORWARD-BACKWARDThis is a general algorithm that works for all Markov models, not just HMMs.In the filtering task (inference) we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5]. The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
umbrella_prior = [0.5, 0.5]
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**.Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88
###Markdown
Since HMMs are represented as single variable systems, we can represent the transition model and sensor model as matrices.The `forward_backward` algorithm can be easily carried out on this representation (as we have done here) with a time complexity of $O({S}^{2} t)$ where t is the length of the sequence and each step multiplies a vector of size $S$ with a matrix of dimensions $SxS$.Additionally, the forward pass stores $t$ vectors of size $S$ which makes the auxiliary space requirement equivalent to $O(St)$.Is there any way we can improve the time or space complexity?Fortunately, the matrix representation of HMM properties allows us to do so.If $f$ and $b$ represent the forward and backward messages respectively, we can modify the smoothing algorithm by firstrunning the standard forward pass to compute $f_{t:t}$ (forgetting all the intermediate results) and then runningbackward pass for both $b$ and $f$ together, using them to compute the smoothed estimate at each step.This optimization reduces auxlilary space requirement to constant (irrespective of the length of the sequence) providedthe transition matrix is invertible and the sensor model has no zeros (which is sometimes hard to accomplish)Let's look at another algorithm, that carries out smoothing in a more optimized way. FIXED LAG SMOOTHINGThe matrix formulation allows to optimize online smoothing with a fixed lag.Since smoothing can be done in constant, there should exist an algorithm whose time complexity is independent of the length of the lag.For smoothing a time slice $t - d$ where $d$ is the lag, we need to compute $\alpha f_{1:t-d}$ x $b_{t-d+1:t}$ incrementally.As we already know, the forward equation is$$f_{1:t+1} = \alpha O_{t+1}{T}^{T}f_{1:t}$$and the backward equation is$$b_{k+1:t} = TO_{k+1}b_{k+2:t}$$where $T$ and $O$ are the transition and sensor models respectively.For smoothing, the forward message is easy to compute but there exists no simple relation between the backward message of this time step and the one at the previous time step, hence we apply the backward equation $d$ times to get$$b_{t-d+1:t} = \left ( \prod_{i=t-d+1}^{t}{TO_i} \right )b_{t+1:t} = B_{t-d+1:t}1$$where $B_{t-d+1:t}$ is the product of the sequence of $T$ and $O$ matrices.Here's how the `probability` module implements `fixed_lag_smoothing`.
###Code
psource(fixed_lag_smoothing)
###Output
_____no_output_____
###Markdown
This algorithm applies `forward` as usual and optimizes the smoothing step by using the equations above.This optimization could be achieved only because HMM properties can be represented as matrices.`vector_to_diagonal`, `matrix_multiplication` and `inverse_matrix` are matrix manipulation functions to simplify the implementation.`normalize` is used to normalize the output before returning it. Here's how we can use `fixed_lag_smoothing` for inference on our umbrella HMM.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
Given evidence T, F, T, F and T, we want to calculate the probability distribution for the fourth day with a fixed lag of 2 days.Let `e_t = False`
###Code
e_t = F
evidence = [T, F, T, F, T]
fixed_lag_smoothing(e_t, hmm, d=2, ev=evidence, t=4)
e_t = T
evidence = [T, T, F, T, T]
fixed_lag_smoothing(e_t, hmm, d=1, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
We cannot calculate probability distributions when $t$ is less than $d$
###Code
fixed_lag_smoothing(e_t, hmm, d=5, ev=evidence, t=4)
###Output
_____no_output_____
###Markdown
As expected, the output is `None` PARTICLE FILTERINGThe filtering problem is too expensive to solve using the previous methods for problems with large or continuous state spaces.Particle filtering is a method that can solve the same problem but when the state space is a lot larger, where we wouldn't be able to do these computations in a reasonable amount of time as fast, as time goes by, and we want to keep track of things as they happen.The downside is that it is a sampling method and hence isn't accurate, but the more samples we're willing to take, the more accurate we'd get.In this method, instead of keping track of the probability distribution, we will drop particles in a similar proportion at the required regions.The internal representation of this distribution is usually a list of particles with coordinates in the state-space.A particle is just a new name for a sample.Particle filtering can be divided into four steps:1. __Initialization__: If we have some idea about the prior probability distribution, we drop the initial particles accordingly, or else we just drop them uniformly over the state space.2. __Forward pass__: As time goes by and measurements come in, we are going to move the selected particles into the grid squares that makes the most sense in terms of representing the distribution that we are trying to track.When time goes by, we just loop through all our particles and try to simulate what could happen to each one of them by sampling its next position from the transition model.This is like prior sampling - samples' frequencies reflect the transition probabilities.If we have enough samples we are pretty close to exact values.We work through the list of particles, one particle at a time, all we do is stochastically simulate what the outcome might be.If we had no dimension of time, and we had no new measurements come in, this would be exactly the same as what we did in prior sampling.3. __Reweight__:As observations come in, don't sample the observations, fix them and downweight the samples based on the evidence just like in likelihood weighting.$$w(x) = P(e/x)$$$$B(X) \propto P(e/X)B'(X)$$As before, the probabilities don't sum to one, since most have been downweighted.They sum to an approximation of $P(e)$.To normalize the resulting distribution, we can divide by $P(e)$Likelihood weighting wasn't the best thing for Bayesian networks, because we were not accounting for the incoming evidence so we were getting samples from the prior distribution, in some sense not the right distribution, so we might end up with a lot of particles with low weights. These samples were very uninformative and the way we fixed it then was by using __Gibbs sampling__.Theoretically, Gibbs sampling can be run on a HMM, but as we iterated over the process infinitely many times in a Bayesian network, we cannot do that here as we have new incoming evidence and we also need computational cycles to propagate through time.A lot of samples with very low weight and they are not representative of the _actual probability distribution_.So if we keep running likelihood weighting, we keep propagating the samples with smaller weights and carry out computations for that even though these samples have no significant contribution to the actual probability distribution.Which is why we require this last step.4. __Resample__:Rather than tracking weighted samples, we _resample_.We choose from our weighted sample distribution as many times as the number of particles we initially had and we replace these particles too, so that we have a constant number of particles.This is equivalent to renormalizing the distribution.The samples with low weight are rarely chosen in the new distribution after resampling.This newer set of particles after resampling is in some sense more representative of the actual distribution and so we are better allocating our computational cycles.Now the update is complete for this time step, continue with the next one.Let's see how this is implemented in the module.
###Code
psource(particle_filtering)
###Output
_____no_output_____
###Markdown
Here, `scalar_vector_product` and `vector_add` are helper functions to help with vector math and `weighted_sample_with_replacement` resamples from a weighted sample and replaces the original sample, as is obvious from the name.This implementation considers two state variables with generic names 'A' and 'B'. Here's how we can use `particle_filtering` on our umbrella HMM, though it doesn't make much sense using particle filtering on a problem with such a small state space.It is just to get familiar with the syntax.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
particle_filtering(T, 10, hmm)
###Output
_____no_output_____
###Markdown
We got 5 samples from state `A` and 5 samples from state `B`
###Code
particle_filtering([F, T, F, F, T], 10, hmm)
###Output
_____no_output_____
###Markdown
This time we got 2 samples from state `A` and 8 samples from state `B` Comparing runtimes for these algorithms will not be useful, as each solves the filtering task efficiently for a different scenario.`forward_backward` calculates the exact probability distribution.`fixed_lag_smoothing` calculates an approximate distribution and its runtime will depend on the value of the lag chosen.`particle_filtering` is an efficient method for approximating distributions for a very large or continuous state space. MONTE CARLO LOCALIZATIONIn the domain of robotics, particle filtering is used for _robot localization_.__Localization__ is the problem of finding out where things are, in this case, we want to find the position of a robot in a continuous state space.__Monte Carlo Localization__ is an algorithm for robots to _localize_ using a _particle filter_.Given a map of the environment, the algorithm estimates the position and orientation of a robot as it moves and senses the environment.Initially, particles are distributed uniformly over the state space, ie the robot has no information of where it is and assumes it is equally likely to be at any point in space.When the robot moves, it analyses the incoming evidence to shift and change the probability to better approximate the probability distribution of its position.The particles are then resampled based on their weights.Gradually, as more evidence comes in, the robot gets better at approximating its location and the particles converge towards the actual position of the robot.The pose of a robot is defined by its two Cartesian coordinates with values $x$ and $y$ and its direction with value $\theta$.We use the kinematic equations of motion to model a deterministic state prediction.This is our motion model (or transition model).Next, we need a sensor model.There can be two kinds of sensor models, the first assumes that the sensors detect _stable_, _recognizable_ features of the environment called __landmarks__.The robot senses the location and bearing of each landmark and updates its belief according to that.We can also assume the noise in measurements to be Gaussian, to simplify things.Another kind of sensor model is used for an array of range sensors, each of which has a fixed bearing relative to the robot.These sensors provide a set of range values in each direction.This will also be corrupted by Gaussian noise, but we can assume that the errors for different beam directions are independent and identically distributed.After evidence comes in, the robot updates its belief state and reweights the particle distribution to better aproximate the actual distribution.Let's have a look at how this algorithm is implemented in the module
###Code
psource(monte_carlo_localization)
###Output
_____no_output_____
###Markdown
Our implementation of Monte Carlo Localization uses the range scan method.The `ray_cast` helper function casts rays in different directions and stores the range values.`a` stores the `v` and `w` components of the robot's velocity.`z` is a range scan.`P_motion_sample` is the motion or transition model.`P_sensor` is the range sensor noise model.`m` is the 2D map of the environment`S` is a vector of samples of size N We'll now define a simple 2D map to run Monte Carlo Localization on.Let's say this is the map we want
###Code
m = MCLmap([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0]])
heatmap(m.m, cmap='binary')
###Output
_____no_output_____
###Markdown
Let's define the motion model as a function `P_motion_sample`.
###Code
def P_motion_sample(kin_state, v, w):
"""Sample from possible kinematic states.
Returns from a single element distribution (no uncertainity in motion)"""
pos = kin_state[:2]
orient = kin_state[2]
# for simplicity the robot first rotates and then moves
orient = (orient + w)%4
for _ in range(orient):
v = (v[1], -v[0])
pos = vector_add(pos, v)
return pos + (orient,)
###Output
_____no_output_____
###Markdown
Define the sensor model as a function `P_sensor`.
###Code
def P_sensor(x, y):
"""Conditional probability for sensor reading"""
# Need not be exact probability. Can use a scaled value.
if x == y:
return 0.8
elif abs(x - y) <= 2:
return 0.05
else:
return 0
###Output
_____no_output_____
###Markdown
Initializing variables.
###Code
a = {'v': (0, 0), 'w': 0}
z = (2, 4, 1, 6)
###Output
_____no_output_____
###Markdown
Let's run `monte_carlo_localization` with these parameters to find a sample distribution S.
###Code
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m)
###Output
_____no_output_____
###Markdown
Let's plot the values in the sample distribution `S`.
###Code
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 12 0 143 14 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 17 52 201 6 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 3 5 19 9 3 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 6 166 0 21 0 0 0 0 0 0 0 0 0 0 0
0 0 0 1 11 75 0 0 0 0 0 0 0 0 0 0 0
73 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0
124 0 0 0 0 0 0 1 0 3 0 0 0 0 0 0 0
0 0 0 14 4 15 1 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
The distribution is highly concentrated at `(5, 3)`, but the robot is not very confident about its position as some other cells also have high probability values. Let's look at another scenario.
###Code
a = {'v': (0, 1), 'w': 0}
z = (2, 3, 5, 7)
S = monte_carlo_localization(a, z, 1000, P_motion_sample, P_sensor, m, S)
grid = [[0]*17 for _ in range(11)]
for x, y, _ in S:
if 0 <= x < 11 and 0 <= y < 17:
grid[x][y] += 1
print("GRID:")
print_table(grid)
heatmap(grid, cmap='Oranges')
###Output
GRID:
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 1000 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
###Markdown
In this case, the robot is 99.9% certain that it is at position `(6, 7)`. DECISION THEORETIC AGENTWe now move into the domain of probabilistic decision making.To make choices between different possible plans in a certain situation in a given environment, an agent must have _preference_ between the possible outcomes of the various plans.__Utility theory__ is used to represent and reason with preferences.The agent prefers states with a higher _utility_.While constructing multi-agent systems, one major element in the design is the mechanism the agents use for making decisions about which actions to adopt in order to achieve their goals.What is usually required is a mechanism which ensures that the actions adopted lead to benefits for both individual agents, and the community of which they are part.The utility of a state is _relative_ to an agent.Preferences, as expressed by utilities, are combined with probabilities in the general theory of rational decisions called __decision theory__.An agent is said to be _rational_ if and only if it chooses the action that yields the highest expected utility, averaged over all the possible outcomes of the action. Here we'll see how a decision-theoretic agent is implemented in the module.
###Code
psource(DTAgentProgram)
###Output
_____no_output_____
###Markdown
The `DTAgentProgram` function is pretty self-explanatory.It encapsulates a function `program` that takes in an observation or a `percept`, updates its `belief_state` and returns the action that maximizes the `expected_outcome_utility`. INFORMATION GATHERING AGENTBefore we discuss what an information gathering agent is, we'll need to know what decision networks are.For an agent in an environment, a decision network represents information about the agent's current state, its possible actions, the state that will result from the agent's action, and the utility of that state.Decision networks have three primary kinds of nodes which are:1. __Chance nodes__: These represent random variables, just like in Bayesian networks.2. __Decision nodes__: These represent points where the decision-makes has a choice between different actions and the decision maker tries to find the optimal decision at these nodes with regard to the cost, safety and resulting utility.3. __Utility nodes__: These represent the agent's utility function.A description of the agent's utility as a function is associated with a utility node.To evaluate a decision network, we do the following:1. Initialize the evidence variables according to the current state.2. Calculate posterior probabilities for each possible value of the decision node and calculate the utility resulting from that action.3. Return the action with the highest utility.Let's have a look at the implementation of the `DecisionNetwork` class.
###Code
psource(DecisionNetwork)
###Output
_____no_output_____
###Markdown
The `DecisionNetwork` class inherits from `BayesNet` and has a few extra helper methods.`best_action` returns the best action in the network.`get_utility` is an abstract method which is supposed to return the utility of a particular action and state in the network.`get_expected_utility` computes the expected utility, given an action and evidence. Before we proceed, we need to know a few more terms.Having __perfect information__ refers to a state of being fully aware of the current state, the cost functions and the outcomes of actions.This in turn allows an agent to find the exact utility value of each state.If an agent has perfect information about the environment, maximum expected utility calculations are exact and can be computed with absolute certainty.In decision theory, the __value of perfect information__ (VPI) is the price that an agent would be willing to pay in order to gain access to _perfect information_.VPI calculations are extensively used to calculate expected utilities for nodes in a decision network.For a random variable $E_j$ whose value is currently unknown, the value of discovering $E_j$, given current information $e$ must average over all possible values $e_{jk}$ that we might discover for $E_j$, using our _current_ beliefs about its value.The VPI of $E_j$ is then given by:$$VPI_e(E_j) = \left(\sum_{k}P(E_j=e_{jk}\ |\ e) EU(\alpha_{e_{jk}}\ |\ e, E_j=e_{jk})\right) - EU(\alpha\ |\ e)$$VPI is _non-negative_, _non-additive_ and _order-indepentent_. An information gathering agent is an agent with certain properties that explores decision networks as and when required with heuristics driven by VPI calculations of nodes.A sensible agent should ask questions in a reasonable order, should avoid asking irrelevant questions, should take into account the importance of each piece of information in relation to its cost and should stop asking questions when that is appropriate._VPI_ is used as the primary heuristic to consider all these points in an information gathering agent as the agent ultimately wants to maximize the utility and needs to find the optimal cost and extent of finding the required information.As an overview, an information gathering agent works by repeatedly selecting the observations with the highest information value, until the cost of the next observation is greater than its expected benefit.The `InformationGatheringAgent` class is an abstract class that inherits from `Agent` and works on the principles discussed above.Let's have a look.
###Code
psource(InformationGatheringAgent)
###Output
_____no_output_____
###Markdown
Probability This IPy notebook acts as supporting material for **Chapter 13 Quantifying Uncertainty**, **Chapter 14 Probabilistic Reasoning** and **Chapter 15 Probabilistic Reasoning over Time** of the book* Artificial Intelligence: A Modern Approach*. This notebook makes use of the implementations in probability.py module. Let us import everything from the probability module. It might be helpful to view the source of some of our implementations. Please refer to the Introductory IPy file for more details on how to do so.
###Code
from probability import *
from notebook import *
###Output
_____no_output_____
###Markdown
Probability DistributionLet us begin by specifying discrete probability distributions. The class **ProbDist** defines a discrete probability distribution. We name our random variable and then assign probabilities to the different values of the random variable. Assigning probabilities to the values works similar to that of using a dictionary with keys being the Value and we assign to it the probability. This is possible because of the magic methods **_ _getitem_ _** and **_ _setitem_ _** which store the probabilities in the prob dict of the object. You can keep the source window open alongside while playing with the rest of the code to get a better understanding.
###Code
%psource ProbDist
p = ProbDist('Flip')
p['H'], p['T'] = 0.25, 0.75
p['T']
###Output
_____no_output_____
###Markdown
The first parameter of the constructor **varname** has a default value of '?'. So if the name is not passed it defaults to ?. The keyword argument **freqs** can be a dictionary of values of random variable:probability. These are then normalized such that the probability values sum upto 1 using the **normalize** method.
###Code
p = ProbDist(freqs={'low': 125, 'medium': 375, 'high': 500})
p.varname
(p['low'], p['medium'], p['high'])
###Output
_____no_output_____
###Markdown
Besides the **prob** and **varname** the object also separately keeps track of all the values of the distribution in a list called **values**. Every time a new value is assigned a probability it is appended to this list, This is done inside the **_ _setitem_ _** method.
###Code
p.values
###Output
_____no_output_____
###Markdown
The distribution by default is not normalized if values are added incremently. We can still force normalization by invoking the **normalize** method.
###Code
p = ProbDist('Y')
p['Cat'] = 50
p['Dog'] = 114
p['Mice'] = 64
(p['Cat'], p['Dog'], p['Mice'])
p.normalize()
(p['Cat'], p['Dog'], p['Mice'])
###Output
_____no_output_____
###Markdown
It is also possible to display the approximate values upto decimals using the **show_approx** method.
###Code
p.show_approx()
###Output
_____no_output_____
###Markdown
Joint Probability DistributionThe helper function **event_values** returns a tuple of the values of variables in event. An event is specified by a dict where the keys are the names of variables and the corresponding values are the value of the variable. Variables are specified with a list. The ordering of the returned tuple is same as those of the variables.Alternatively if the event is specified by a list or tuple of equal length of the variables. Then the events tuple is returned as it is.
###Code
event = {'A': 10, 'B': 9, 'C': 8}
variables = ['C', 'A']
event_values(event, variables)
###Output
_____no_output_____
###Markdown
_A probability model is completely determined by the joint distribution for all of the random variables._ (**Section 13.3**) The probability module implements these as the class **JointProbDist** which inherits from the **ProbDist** class. This class specifies a discrete probability distribute over a set of variables.
###Code
%psource JointProbDist
###Output
_____no_output_____
###Markdown
Values for a Joint Distribution is a an ordered tuple in which each item corresponds to the value associate with a particular variable. For Joint Distribution of X, Y where X, Y take integer values this can be something like (18, 19).To specify a Joint distribution we first need an ordered list of variables.
###Code
variables = ['X', 'Y']
j = JointProbDist(variables)
j
###Output
_____no_output_____
###Markdown
Like the **ProbDist** class **JointProbDist** also employes magic methods to assign probability to different values.The probability can be assigned in either of the two formats for all possible values of the distribution. The **event_values** call inside **_ _getitem_ _** and **_ _setitem_ _** does the required processing to make this work.
###Code
j[1,1] = 0.2
j[dict(X=0, Y=1)] = 0.5
(j[1,1], j[0,1])
###Output
_____no_output_____
###Markdown
It is also possible to list all the values for a particular variable using the **values** method.
###Code
j.values('X')
###Output
_____no_output_____
###Markdown
Inference Using Full Joint DistributionsIn this section we use Full Joint Distributions to calculate the posterior distribution given some evidence. We represent evidence by using a python dictionary with variables as dict keys and dict values representing the values.This is illustrated in **Section 13.3** of the book. The functions **enumerate_joint** and **enumerate_joint_ask** implement this functionality. Under the hood they implement **Equation 13.9** from the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$Here **α** is the normalizing factor. **X** is our query variable and **e** is the evidence. According to the equation we enumerate on the remaining variables **y** (not in evidence or query variable) i.e. all possible combinations of **y**We will be using the same example as the book. Let us create the full joint distribution from **Figure 13.3**.
###Code
full_joint = JointProbDist(['Cavity', 'Toothache', 'Catch'])
full_joint[dict(Cavity=True, Toothache=True, Catch=True)] = 0.108
full_joint[dict(Cavity=True, Toothache=True, Catch=False)] = 0.012
full_joint[dict(Cavity=True, Toothache=False, Catch=True)] = 0.016
full_joint[dict(Cavity=True, Toothache=False, Catch=False)] = 0.064
full_joint[dict(Cavity=False, Toothache=True, Catch=True)] = 0.072
full_joint[dict(Cavity=False, Toothache=False, Catch=True)] = 0.144
full_joint[dict(Cavity=False, Toothache=True, Catch=False)] = 0.008
full_joint[dict(Cavity=False, Toothache=False, Catch=False)] = 0.576
###Output
_____no_output_____
###Markdown
Let us now look at the **enumerate_joint** function returns the sum of those entries in P consistent with e,provided variables is P's remaining variables (the ones not in e). Here, P refers to the full joint distribution. The function uses a recursive call in its implementation. The first parameter **variables** refers to remaining variables. The function in each recursive call keeps on variable constant while varying others.
###Code
psource(enumerate_joint)
###Output
_____no_output_____
###Markdown
Let us assume we want to find **P(Toothache=True)**. This can be obtained by marginalization (**Equation 13.6**). We can use **enumerate_joint** to solve for this by taking Toothache=True as our evidence. **enumerate_joint** will return the sum of probabilities consistent with evidence i.e. Marginal Probability.
###Code
evidence = dict(Toothache=True)
variables = ['Cavity', 'Catch'] # variables not part of evidence
ans1 = enumerate_joint(variables, evidence, full_joint)
ans1
###Output
_____no_output_____
###Markdown
You can verify the result from our definition of the full joint distribution. We can use the same function to find more complex probabilities like **P(Cavity=True and Toothache=True)**
###Code
evidence = dict(Cavity=True, Toothache=True)
variables = ['Catch'] # variables not part of evidence
ans2 = enumerate_joint(variables, evidence, full_joint)
ans2
###Output
_____no_output_____
###Markdown
Being able to find sum of probabilities satisfying given evidence allows us to compute conditional probabilities like **P(Cavity=True | Toothache=True)** as we can rewrite this as $$P(Cavity=True | Toothache = True) = \frac{P(Cavity=True \ and \ Toothache=True)}{P(Toothache=True)}$$We have already calculated both the numerator and denominator.
###Code
ans2/ans1
###Output
_____no_output_____
###Markdown
We might be interested in the probability distribution of a particular variable conditioned on some evidence. This can involve doing calculations like above for each possible value of the variable. This has been implemented slightly differently using normalization in the function **enumerate_joint_ask** which returns a probability distribution over the values of the variable **X**, given the {var:val} observations **e**, in the **JointProbDist P**. The implementation of this function calls **enumerate_joint** for each value of the query variable and passes **extended evidence** with the new evidence having **X = xi**. This is followed by normalization of the obtained distribution.
###Code
psource(enumerate_joint_ask)
###Output
_____no_output_____
###Markdown
Let us find **P(Cavity | Toothache=True)** using **enumerate_joint_ask**.
###Code
query_variable = 'Cavity'
evidence = dict(Toothache=True)
ans = enumerate_joint_ask(query_variable, evidence, full_joint)
(ans[True], ans[False])
###Output
_____no_output_____
###Markdown
You can verify that the first value is the same as we obtained earlier by manual calculation. Bayesian NetworksA Bayesian network is a representation of the joint probability distribution encoding a collection of conditional independence statements.A Bayes Network is implemented as the class **BayesNet**. It consisits of a collection of nodes implemented by the class **BayesNode**. The implementation in the above mentioned classes focuses only on boolean variables. Each node is associated with a variable and it contains a **conditional probabilty table (cpt)**. The **cpt** represents the probability distribution of the variable conditioned on its parents **P(X | parents)**.Let us dive into the **BayesNode** implementation.
###Code
psource(BayesNode)
###Output
_____no_output_____
###Markdown
The constructor takes in the name of **variable**, **parents** and **cpt**. Here **variable** is a the name of the variable like 'Earthquake'. **parents** should a list or space separate string with variable names of parents. The conditional probability table is a dict {(v1, v2, ...): p, ...}, the distribution P(X=true | parent1=v1, parent2=v2, ...) = p. Here the keys are combination of boolean values that the parents take. The length and order of the values in keys should be same as the supplied **parent** list/string. In all cases the probability of X being false is left implicit, since it follows from P(X=true).The example below where we implement the network shown in **Figure 14.3** of the book will make this more clear.The alarm node can be made as follows:
###Code
alarm_node = BayesNode('Alarm', ['Burglary', 'Earthquake'],
{(True, True): 0.95,(True, False): 0.94, (False, True): 0.29, (False, False): 0.001})
###Output
_____no_output_____
###Markdown
It is possible to avoid using a tuple when there is only a single parent. So an alternative format for the **cpt** is
###Code
john_node = BayesNode('JohnCalls', ['Alarm'], {True: 0.90, False: 0.05})
mary_node = BayesNode('MaryCalls', 'Alarm', {(True, ): 0.70, (False, ): 0.01}) # Using string for parents.
# Equivalant to john_node definition.
###Output
_____no_output_____
###Markdown
The general format used for the alarm node always holds. For nodes with no parents we can also use.
###Code
burglary_node = BayesNode('Burglary', '', 0.001)
earthquake_node = BayesNode('Earthquake', '', 0.002)
###Output
_____no_output_____
###Markdown
It is possible to use the node for lookup function using the **p** method. The method takes in two arguments **value** and **event**. Event must be a dict of the type {variable:values, ..} The value corresponds to the value of the variable we are interested in (False or True).The method returns the conditional probability **P(X=value | parents=parent_values)**, where parent_values are the values of parents in event. (event must assign each parent a value.)
###Code
john_node.p(False, {'Alarm': True, 'Burglary': True}) # P(JohnCalls=False | Alarm=True)
###Output
_____no_output_____
###Markdown
With all the information about nodes present it is possible to construct a Bayes Network using **BayesNet**. The **BayesNet** class does not take in nodes as input but instead takes a list of **node_specs**. An entry in **node_specs** is a tuple of the parameters we use to construct a **BayesNode** namely **(X, parents, cpt)**. **node_specs** must be ordered with parents before children.
###Code
psource(BayesNet)
###Output
_____no_output_____
###Markdown
The constructor of **BayesNet** takes each item in **node_specs** and adds a **BayesNode** to its **nodes** object variable by calling the **add** method. **add** in turn adds node to the net. Its parents must already be in the net, and its variable must not. Thus add allows us to grow a **BayesNet** given its parents are already present.**burglary** global is an instance of **BayesNet** corresponding to the above example. T, F = True, False burglary = BayesNet([ ('Burglary', '', 0.001), ('Earthquake', '', 0.002), ('Alarm', 'Burglary Earthquake', {(T, T): 0.95, (T, F): 0.94, (F, T): 0.29, (F, F): 0.001}), ('JohnCalls', 'Alarm', {T: 0.90, F: 0.05}), ('MaryCalls', 'Alarm', {T: 0.70, F: 0.01}) ])
###Code
burglary
###Output
_____no_output_____
###Markdown
**BayesNet** method **variable_node** allows to reach **BayesNode** instances inside a Bayes Net. It is possible to modify the **cpt** of the nodes directly using this method.
###Code
type(burglary.variable_node('Alarm'))
burglary.variable_node('Alarm').cpt
###Output
_____no_output_____
###Markdown
Exact Inference in Bayesian NetworksA Bayes Network is a more compact representation of the full joint distribution and like full joint distributions allows us to do inference i.e. answer questions about probability distributions of random variables given some evidence.Exact algorithms don't scale well for larger networks. Approximate algorithms are explained in the next section. Inference by EnumerationWe apply techniques similar to those used for **enumerate_joint_ask** and **enumerate_joint** to draw inference from Bayesian Networks. **enumeration_ask** and **enumerate_all** implement the algorithm described in **Figure 14.9** of the book.
###Code
psource(enumerate_all)
###Output
_____no_output_____
###Markdown
**enumerate__all** recursively evaluates a general form of the **Equation 14.4** in the book.$$\textbf{P}(X | \textbf{e}) = α \textbf{P}(X, \textbf{e}) = α \sum_{y} \textbf{P}(X, \textbf{e}, \textbf{y})$$ such that **P(X, e, y)** is written in the form of product of conditional probabilities **P(variable | parents(variable))** from the Bayesian Network.**enumeration_ask** calls **enumerate_all** on each value of query variable **X** and finally normalizes them.
###Code
psource(enumeration_ask)
###Output
_____no_output_____
###Markdown
Let us solve the problem of finding out **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using the **burglary** network.**enumeration_ask** takes three arguments **X** = variable name, **e** = Evidence (in form a dict like previously explained), **bn** = The Bayes Net to do inference on.
###Code
ans_dist = enumeration_ask('Burglary', {'JohnCalls': True, 'MaryCalls': True}, burglary)
ans_dist[True]
###Output
_____no_output_____
###Markdown
Variable EliminationThe enumeration algorithm can be improved substantially by eliminating repeated calculations. In enumeration we join the joint of all hidden variables. This is of exponential size for the number of hidden variables. Variable elimination employes interleaving join and marginalization.Before we look into the implementation of Variable Elimination we must first familiarize ourselves with Factors. In general we call a multidimensional array of type P(Y1 ... Yn | X1 ... Xm) a factor where some of Xs and Ys maybe assigned values. Factors are implemented in the probability module as the class **Factor**. They take as input **variables** and **cpt**. Helper FunctionsThere are certain helper functions that help creating the **cpt** for the Factor given the evidence. Let us explore them one by one.
###Code
psource( make_factor)
###Output
_____no_output_____
###Markdown
**make_factor** is used to create the **cpt** and **variables** that will be passed to the constructor of **Factor**. We use **make_factor** for each variable. It takes in the arguments **var** the particular variable, **e** the evidence we want to do inference on, **bn** the bayes network.Here **variables** for each node refers to a list consisting of the variable itself and the parents minus any variables that are part of the evidence. This is created by finding the **node.parents** and filtering out those that are not part of the evidence.The **cpt** created is the one similar to the original **cpt** of the node with only rows that agree with the evidence.
###Code
psource(all_events)
###Output
_____no_output_____
###Markdown
The **all_events** function is a recursive generator function which yields a key for the orignal **cpt** which is part of the node. This works by extending evidence related to the node, thus all the output from **all_events** only includes events that support the evidence. Given **all_events** is a generator function one such event is returned on every call. We can try this out using the example on **Page 524** of the book. We will make **f**5(A) = P(m | A)
###Code
f5 = make_factor('MaryCalls', {'JohnCalls': True, 'MaryCalls': True}, burglary)
f5
f5.cpt
f5.variables
###Output
_____no_output_____
###Markdown
Here **f5.cpt** False key gives probability for **P(MaryCalls=True | Alarm = False)**. Due to our representation where we only store probabilities for only in cases where the node variable is True this is the same as the **cpt** of the BayesNode. Let us try a somewhat different example from the book where evidence is that the Alarm = True
###Code
new_factor = make_factor('MaryCalls', {'Alarm': True}, burglary)
new_factor.cpt
###Output
_____no_output_____
###Markdown
Here the **cpt** is for **P(MaryCalls | Alarm = True)**. Therefore the probabilities for True and False sum up to one. Note the difference between both the cases. Again the only rows included are those consistent with the evidence. Operations on FactorsWe are interested in two kinds of operations on factors. **Pointwise Product** which is used to created joint distributions and **Summing Out** which is used for marginalization.
###Code
psource(Factor.pointwise_product)
###Output
_____no_output_____
###Markdown
**Factor.pointwise_product** implements a method of creating a joint via combining two factors. We take the union of **variables** of both the factors and then generate the **cpt** for the new factor using **all_events** function. Note that the given we have eliminated rows that are not consistent with the evidence. Pointwise product assigns new probabilities by multiplying rows similar to that in a database join.
###Code
psource(pointwise_product)
###Output
_____no_output_____
###Markdown
**pointwise_product** extends this operation to more than two operands where it is done sequentially in pairs of two.
###Code
psource(Factor.sum_out)
###Output
_____no_output_____
###Markdown
**Factor.sum_out** makes a factor eliminating a variable by summing over its values. Again **events_all** is used to generate combinations for the rest of the variables.
###Code
psource(sum_out)
###Output
_____no_output_____
###Markdown
**sum_out** uses both **Factor.sum_out** and **pointwise_product** to finally eliminate a particular variable from all factors by summing over its values. Elimination AskThe algorithm described in **Figure 14.11** of the book is implemented by the function **elimination_ask**. We use this for inference. The key idea is that we eliminate the hidden variables by interleaving joining and marginalization. It takes in 3 arguments **X** the query variable, **e** the evidence variable and **bn** the Bayes network. The algorithm creates factors out of Bayes Nodes in reverse order and eliminates hidden variables using **sum_out**. Finally it takes a point wise product of all factors and normalizes. Let us finally solve the problem of inferring **P(Burglary=True | JohnCalls=True, MaryCalls=True)** using variable elimination.
###Code
psource(elimination_ask)
elimination_ask('Burglary', dict(JohnCalls=True, MaryCalls=True), burglary).show_approx()
###Output
_____no_output_____
###Markdown
Approximate Inference in Bayesian NetworksExact inference fails to scale for very large and complex Bayesian Networks. This section covers implementation of randomized sampling algorithms, also called Monte Carlo algorithms.
###Code
psource(BayesNode.sample)
###Output
_____no_output_____
###Markdown
Before we consider the different algorithms in this section let us look at the **BayesNode.sample** method. It samples from the distribution for this variable conditioned on event's values for parent_variables. That is, return True/False at random according to with the conditional probability given the parents. The **probability** function is a simple helper from **utils** module which returns True with the probability passed to it. Prior SamplingThe idea of Prior Sampling is to sample from the Bayesian Network in a topological order. We start at the top of the network and sample as per **P(Xi | parents(Xi)** i.e. the probability distribution from which the value is sampled is conditioned on the values already assigned to the variable's parents. This can be thought of as a simulation.
###Code
psource(prior_sample)
###Output
_____no_output_____
###Markdown
The function **prior_sample** implements the algorithm described in **Figure 14.13** of the book. Nodes are sampled in the topological order. The old value of the event is passed as evidence for parent values. We will use the Bayesian Network in **Figure 14.12** to try out the **prior_sample**We store the samples on the observations. Let us find **P(Rain=True)**
###Code
N = 1000
all_observations = [prior_sample(sprinkler) for x in range(N)]
###Output
_____no_output_____
###Markdown
Now we filter to get the observations where Rain = True
###Code
rain_true = [observation for observation in all_observations if observation['Rain'] == True]
###Output
_____no_output_____
###Markdown
Finally, we can find **P(Rain=True)**
###Code
answer = len(rain_true) / N
print(answer)
###Output
0.508
###Markdown
To evaluate a conditional distribution. We can use a two-step filtering process. We first separate out the variables that are consistent with the evidence. Then for each value of query variable, we can find probabilities. For example to find **P(Cloudy=True | Rain=True)**. We have already filtered out the values consistent with our evidence in **rain_true**. Now we apply a second filtering step on **rain_true** to find **P(Rain=True and Cloudy=True)**
###Code
rain_and_cloudy = [observation for observation in rain_true if observation['Cloudy'] == True]
answer = len(rain_and_cloudy) / len(rain_true)
print(answer)
###Output
0.7755905511811023
###Markdown
Rejection SamplingRejection Sampling is based on an idea similar to what we did just now. First, it generates samples from the prior distribution specified by the network. Then, it rejects all those that do not match the evidence. The function **rejection_sampling** implements the algorithm described by **Figure 14.14**
###Code
psource(rejection_sampling)
###Output
_____no_output_____
###Markdown
The function keeps counts of each of the possible values of the Query variable and increases the count when we see an observation consistent with the evidence. It takes in input parameters **X** - The Query Variable, **e** - evidence, **bn** - Bayes net and **N** - number of prior samples to generate.**consistent_with** is used to check consistency.
###Code
psource(consistent_with)
###Output
_____no_output_____
###Markdown
To answer **P(Cloudy=True | Rain=True)**
###Code
p = rejection_sampling('Cloudy', dict(Rain=True), sprinkler, 1000)
p[True]
###Output
_____no_output_____
###Markdown
Likelihood WeightingRejection sampling tends to reject a lot of samples if our evidence consists of a large number of variables. Likelihood Weighting solves this by fixing the evidence (i.e. not sampling it) and then using weights to make sure that our overall sampling is still consistent.The pseudocode in **Figure 14.15** is implemented as **likelihood_weighting** and **weighted_sample**.
###Code
psource(weighted_sample)
###Output
_____no_output_____
###Markdown
**weighted_sample** samples an event from Bayesian Network that's consistent with the evidence **e** and returns the event and its weight, the likelihood that the event accords to the evidence. It takes in two parameters **bn** the Bayesian Network and **e** the evidence.The weight is obtained by multiplying **P(xi | parents(xi))** for each node in evidence. We set the values of **event = evidence** at the start of the function.
###Code
weighted_sample(sprinkler, dict(Rain=True))
psource(likelihood_weighting)
###Output
_____no_output_____
###Markdown
**likelihood_weighting** implements the algorithm to solve our inference problem. The code is similar to **rejection_sampling** but instead of adding one for each sample we add the weight obtained from **weighted_sampling**. likelihood_weighting('Cloudy', dict(Rain=True), sprinkler, 200).show_approx() Gibbs SamplingIn likelihood sampling, it is possible to obtain low weights in cases where the evidence variables reside at the bottom of the Bayesian Network. This can happen because influence only propagates downwards in likelihood sampling.Gibbs Sampling solves this. The implementation of **Figure 14.16** is provided in the function **gibbs_ask**
###Code
psource(gibbs_ask)
###Output
_____no_output_____
###Markdown
In **gibbs_ask** we initialize the non-evidence variables to random values. And then select non-evidence variables and sample it from **P(Variable | value in the current state of all remaining vars) ** repeatedly sample. In practice, we speed this up by using **markov_blanket_sample** instead. This works because terms not involving the variable get canceled in the calculation. The arguments for **gibbs_ask** are similar to **likelihood_weighting**
###Code
gibbs_ask('Cloudy', dict(Rain=True), sprinkler, 200).show_approx()
###Output
_____no_output_____
###Markdown
Inference in Temporal Models Before we start, it will be helpful to understand the structure of a temporal model. We will use the example of the book with the guard and the umbrella. In this example, the state $\textbf{X}$ is whether it is a rainy day (`X = True`) or not (`X = False`) at Day $\textbf{t}$. In the sensor or observation model, the observation or evidence $\textbf{U}$ is whether the professor holds an umbrella (`U = True`) or not (`U = False`) on **Day** $\textbf{t}$. Based on that, the transition model is | $X_{t-1}$ | $X_{t}$ | **P**$(X_{t}| X_{t-1})$| | ------------- |------------- | ----------------------------------|| ***${False}$*** | ***${False}$*** | 0.7 || ***${False}$*** | ***${True}$*** | 0.3 || ***${True}$*** | ***${False}$*** | 0.3 || ***${True}$*** | ***${True}$*** | 0.7 |And the the sensor model will be,| $X_{t}$ | $U_{t}$ | **P**$(U_{t}|X_{t})$| | :-------------: |:-------------: | :------------------------:|| ***${False}$*** | ***${True}$*** | 0.2 || ***${False}$*** | ***${False}$*** | 0.8 || ***${True}$*** | ***${True}$*** | 0.9 || ***${True}$*** | ***${False}$*** | 0.1 | In the filtering task we are given evidence **U** in each time **t** and we want to compute the belief $B_{t}(x)= P(X_{t}|U_{1:t})$. We can think of it as a three step process:1. In every step we start with the current belief $P(X_{t}|e_{1:t})$2. We update it for time3. We update it for evidenceThe forward algorithm performs the step 2 and 3 at once. It updates, or better say reweights, the initial belief using the transition and the sensor model. Let's see the umbrella example. On **Day 0** no observation is available, and for that reason we will assume that we have equal possibilities to rain or not. In the **`HiddenMarkovModel`** class, the prior probabilities for **Day 0** are by default [0.5, 0.5].
###Code
%psource HiddenMarkovModel
###Output
_____no_output_____
###Markdown
We instantiate the object **`hmm`** of the class using a list of lists for both the transition and the sensor model.
###Code
umbrella_transition_model = [[0.7, 0.3], [0.3, 0.7]]
umbrella_sensor_model = [[0.9, 0.2], [0.1, 0.8]]
hmm = HiddenMarkovModel(umbrella_transition_model, umbrella_sensor_model)
###Output
_____no_output_____
###Markdown
The **`sensor_dist()`** method returns a list with the conditional probabilities of the sensor model.
###Code
hmm.sensor_dist(ev=True)
###Output
_____no_output_____
###Markdown
The observation update is calculated with the **`forward()`** function. Basically, we update our belief using the observation model. The function returns a list with the probabilities of **raining or not** on **Day 1**.
###Code
psource(forward)
belief_day_1 = forward(hmm, umbrella_prior, ev=True)
print ('The probability of raining on day 1 is {:.2f}'.format(belief_day_1[0]))
###Output
The probability of raining on day 1 is 0.82
###Markdown
In **Day 2** our initial belief is the updated belief of **Day 1**. Again using the **`forward()`** function we can compute the probability of raining in **Day 2**
###Code
belief_day_2 = forward(hmm, belief_day_1, ev=True)
print ('The probability of raining in day 2 is {:.2f}'.format(belief_day_2[0]))
###Output
The probability of raining in day 2 is 0.88
###Markdown
In the smoothing part we are interested in computing the distribution over past states given evidence up to the present. Assume that we want to compute the distribution for the time **k**, for $0\leq k<t $, the computation can be divided in two parts: 1. The forward message will be computed till and by filtering forward from 1 to **k**.2. The backward message can be computed by a recusive process that runs from **k** to **t**. Rather than starting at time 1, the algorithm starts at time **t**. In the umbrella example, we can compute the backward message from **Day 2** to **Day 1** by using the `backward` function. The `backward` function has as parameters the object created by the **`HiddenMarkovModel`** class, the evidence in **Day 2** (in our case is **True**), and the initial probabilities of being in state in time t+1. Since no observation is available then it will be [1, 1]. The `backward` function will return a list with the conditional probabilities.
###Code
psource(backward)
b = [1, 1]
backward(hmm, b, ev=True)
###Output
_____no_output_____
###Markdown
Some may notice that the result is not the same as in the book. The main reason is that in the book the normalization step is not used. If we want to normalize the result, one can use the **`normalize()`** helper function.In order to find the smoothed estimate for raining in **Day k**, we will use the **`forward_backward()`** function. As in the example in the book, the umbrella is observed in both days and the prior distribution is [0.5, 0.5]
###Code
pseudocode('Forward-Backward')
umbrella_prior = [0.5, 0.5]
prob = forward_backward(hmm, ev=[T, T], prior=umbrella_prior)
print ('The probability of raining in Day 0 is {:.2f} and in Day 1 is {:.2f}'.format(prob[0][0], prob[1][0]))
###Output
The probability of raining in Day 0 is 0.65 and in Day 1 is 0.88
|
stable/_downloads/7bb2e6f1056f5cae3a98ccc12aac266f/plot_eeg_no_mri.ipynb | ###Markdown
EEG forward operator with a template MRI========================================This tutorial explains how to compute the forward operator from EEG datausing the standard template MRI subject ``fsaverage``... important:: Source reconstruction without an individual T1 MRI from the subject will be less accurate. Do not over interpret activity locations which can be off by multiple centimeters.Note`plot_montage` show all the standard montages in MNE-Python. :depth: 2
###Code
# Authors: Alexandre Gramfort <[email protected]>
# Joan Massich <[email protected]>
#
# License: BSD Style.
import os.path as op
import mne
from mne.datasets import eegbci
from mne.datasets import fetch_fsaverage
# Download fsaverage files
fs_dir = fetch_fsaverage(verbose=True)
subjects_dir = op.dirname(fs_dir)
# The files live in:
subject = 'fsaverage'
trans = op.join(fs_dir, 'bem', 'fsaverage-trans.fif')
src = op.join(fs_dir, 'bem', 'fsaverage-ico-5-src.fif')
bem = op.join(fs_dir, 'bem', 'fsaverage-5120-5120-5120-bem-sol.fif')
###Output
_____no_output_____
###Markdown
Load the data-------------We use here EEG data from the BCI dataset.
###Code
raw_fname, = eegbci.load_data(subject=1, runs=[6])
raw = mne.io.read_raw_edf(raw_fname, preload=True)
# Clean channel names to be able to use a standard 1005 montage
ch_names = [c.replace('.', '') for c in raw.ch_names]
raw.rename_channels({old: new for old, new in zip(raw.ch_names, ch_names)})
# Read and set the EEG electrode locations
montage = mne.channels.read_montage('standard_1005', ch_names=raw.ch_names,
transform=True)
raw.set_montage(montage)
raw.set_eeg_reference(projection=True) # needed for inverse modeling
# Check that the locations of EEG electrodes is correct with respect to MRI
mne.viz.plot_alignment(
raw.info, src=src, eeg=['original', 'projected'], trans=trans, dig=True)
###Output
_____no_output_____
###Markdown
Setup source space and compute forward--------------------------------------
###Code
fwd = mne.make_forward_solution(raw.info, trans=trans, src=src,
bem=bem, eeg=True, mindist=5.0, n_jobs=1)
print(fwd)
# for illustration purposes use fwd to compute the sensitivity map
eeg_map = mne.sensitivity_map(fwd, ch_type='eeg', mode='fixed')
eeg_map.plot(time_label='EEG sensitivity', subjects_dir=subjects_dir,
clim=dict(lims=[5, 50, 100]))
###Output
_____no_output_____ |
examples/Tutorial_1-carrots_demo/carrots_demo.ipynb | ###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 0.4 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Currently implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values Example using Differential Privacy libraryIn this directory, we give a simple example of how to use the Python DifferentialPrivacy library. Zoo AnimalsThere are around 182 animals at Farmer Alex's zoo. Every day, Alex feedsthe animals as many carrots as they desire. The animals record how many carrotsthey have eaten per day. For this particular day, the number of carrots eatencan be seen in `animals_and_carrots.csv`.At the end of each day, Alex often asks aggregate questions about how manycarrots everyone ate. For example, he wants to know how many carrots are eateneach day, so he knows how many to order the next day. The animals are fearfulthat Alex will use the data against their best interest. For example, Alex couldget rid of the animals who eat the most carrots!To protect themselves, the animals decide to use the Python Differential Privacylibrary to aggregate their data before reporting it to Alex. This way, theanimals can control the risk that Alex will identify individuals' data whilemaintaining an adequate level of accuracy so that Alex can continue to run thezoo effectively.
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
from pydp.algorithms.laplacian import BoundedSum, BoundedMean, Count, Max
import pandas as pd
import statistics # for calculating mean without applying differential privacy
###Output
_____no_output_____
###Markdown
DataEach row in `animals_and_carrots.csv` is composed of the name of an animal, andthe number of carrots it has eaten, comma-separated.
###Code
# get carrots data from our public github repo
url = 'https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/Tutorial_1-carrots_demo/animals_and_carrots.csv'
df = pd.read_csv(url,sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking the mean of all the entries in a normal fashion without applying the DP library. This is the actual mean of all the records. Per-animal PrivacyNotice that each animal owns at most one row in the data. This means that weprovide per-animal privacy. Suppose that some animal appears multiple times inthe csv file. That animal would own more than one row in the data. In this case,using this DP library would not guarantee per-animal privacy! The animals wouldfirst have to pre-process their data in a way such that each animal doesn't ownmore than one row.
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedMean.quick_result()` takes a List of integer/ float as an input and returns the mean of the list values.
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = BoundedMean(privacy_budget, 0, 1, 100)
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of the private mean varies compared to the mean calculated using non-private statistical methods.This difference in value corresponds to the privacy that is actually preserved for individual records in it.
###Code
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
Mean: 53.01648351648352
Private Mean: 71.27272727272728
###Markdown
Counts number of animals who ate more than 'limit' carrots without applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`Count.quick_result()` takes a List of integer/ float as an input and returns the count of elements in the list.
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = Count(privacy_budget, dtype="int")
return x.quick_result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Count Above varies compared to the Count calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
Above 70: 65
private count above: 64
###Markdown
Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def max() -> int:
return df.max()[1]
###Output
_____no_output_____
###Markdown
Private Max uses Differential Privacy Library by Google to calculate the maximum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`Max.quick_result()` takes a List of integer/ float as an input and returns the list max value.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = Max(privacy_budget, 0, 100, dtype="int")
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Max varies compared to the Max calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Max:\t" + str(max()))
print("private max:\t" + str(private_max(1)))
###Output
Max: 100
private max: 78.0
###Markdown
Taking Sum of all the entries in a normal fashion without Applying the DP library. This is the actual sum of carrots eaten by all the animals.
###Code
# Function to calculate sum of carrots eaten without applying differential privacy.
def sum_carrots() -> int:
return df.sum()[1]
###Output
_____no_output_____
###Markdown
Private Sum uses Differential Privacy Library by Google to calculate the sum of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedSum.quick_result()` takes a List of integer/ float as an input and returns the list sum.
###Code
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = BoundedSum(privacy_budget,0, 1,100, dtype="float")
return x.quick_result(list(df["carrots_eaten"]))
print("Sum:\t" + str(sum_carrots()))
print("Private Sum:\t" + str(private_sum(1)))
###Output
Sum: 9649
Private Sum: 9472.0
###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 0.4 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Currently implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values Example using Differential Privacy libraryIn this directory, we give a simple example of how to use the Python DifferentialPrivacy library. Zoo AnimalsThere are around 182 animals at Farmer Alex's zoo. Every day, Alex feedsthe animals as many carrots as they desire. The animals record how many carrotsthey have eaten per day. For this particular day, the number of carrots eatencan be seen in `animals_and_carrots.csv`.At the end of each day, Alex often asks aggregate questions about how manycarrots everyone ate. For example, he wants to know how many carrots are eateneach day, so he knows how many to order the next day. The animals are fearfulthat Alex will use the data against their best interest. For example, Alex couldget rid of the animals who eat the most carrots!To protect themselves, the animals decide to use the Python Differential Privacylibrary to aggregate their data before reporting it to Alex. This way, theanimals can control the risk that Alex will identify individuals' data whilemaintaining an adequate level of accuracy so that Alex can continue to run thezoo effectively.
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
from pydp.algorithms.laplacian import BoundedSum, BoundedMean, Count, Max
import pandas as pd
import statistics # for calculating mean without applying differential privacy
###Output
_____no_output_____
###Markdown
DataEach row in `animals_and_carrots.csv` is composed of the name of an animal, andthe number of carrots it has eaten, comma-separated.
###Code
# get carrots data from our public github repo
url = "https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/Tutorial_1-carrots_demo/animals_and_carrots.csv"
df = pd.read_csv(url, sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking the mean of all the entries in a normal fashion without applying the DP library. This is the actual mean of all the records. Per-animal PrivacyNotice that each animal owns at most one row in the data. This means that weprovide per-animal privacy. Suppose that some animal appears multiple times inthe csv file. That animal would own more than one row in the data. In this case,using this DP library would not guarantee per-animal privacy! The animals wouldfirst have to pre-process their data in a way such that each animal doesn't ownmore than one row.
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedMean.quick_result()` takes a List of integer/ float as an input and returns the mean of the list values.
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = BoundedMean(privacy_budget, 0, 1, 100)
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of the private mean varies compared to the mean calculated using non-private statistical methods.This difference in value corresponds to the privacy that is actually preserved for individual records in it.
###Code
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
_____no_output_____
###Markdown
Counts number of animals who ate more than 'limit' carrots without applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`Count.quick_result()` takes a List of integer/ float as an input and returns the count of elements in the list.
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = Count(privacy_budget, dtype="int")
return x.quick_result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Count Above varies compared to the Count calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
_____no_output_____
###Markdown
Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def max() -> int:
return df.max()[1]
###Output
_____no_output_____
###Markdown
Private Max uses Differential Privacy Library by Google to calculate the maximum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`Max.quick_result()` takes a List of integer/ float as an input and returns the list max value.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = Max(epsilon = privacy_budget, lower_bound = 0, upper_bound = 100, dtype="int")
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Max varies compared to the Max calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Max:\t" + str(max()))
print("private max:\t" + str(private_max(1)))
###Output
_____no_output_____
###Markdown
Taking Sum of all the entries in a normal fashion without Applying the DP library. This is the actual sum of carrots eaten by all the animals.
###Code
# Function to calculate sum of carrots eaten without applying differential privacy.
def sum_carrots() -> int:
return df.sum()[1]
###Output
_____no_output_____
###Markdown
Private Sum uses Differential Privacy Library by Google to calculate the sum of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedSum.quick_result()` takes a List of integer/ float as an input and returns the list sum.
###Code
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = BoundedSum(epsilon = privacy_budget, delta = 0, lower_bound= 1, upper_bound = 100, dtype="float")
return x.quick_result(list(df["carrots_eaten"]))
print("Sum:\t" + str(sum_carrots()))
print("Private Sum:\t" + str(private_sum(1)))
###Output
_____no_output_____
###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 0.4 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Currently implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values Example using Differential Privacy libraryIn this directory, we give a simple example of how to use the Python DifferentialPrivacy library. Zoo AnimalsThere are around 182 animals at Farmer Alex's zoo. Every day, Alex feedsthe animals as many carrots as they desire. The animals record how many carrotsthey have eaten per day. For this particular day, the number of carrots eatencan be seen in `animals_and_carrots.csv`.At the end of each day, Alex often asks aggregate questions about how manycarrots everyone ate. For example, he wants to know how many carrots are eateneach day, so he knows how many to order the next day. The animals are fearfulthat Alex will use the data against their best interest. For example, Alex couldget rid of the animals who eat the most carrots!To protect themselves, the animals decide to use the Python Differential Privacylibrary to aggregate their data before reporting it to Alex. This way, theanimals can control the risk that Alex will identify individuals' data whilemaintaining an adequate level of accuracy so that Alex can continue to run thezoo effectively.
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
from pydp.algorithms.laplacian import BoundedSum, BoundedMean, Count, Max
import pandas as pd
import statistics # for calculating mean without applying differential privacy
###Output
_____no_output_____
###Markdown
DataEach row in `animals_and_carrots.csv` is composed of the name of an animal, andthe number of carrots it has eaten, comma-separated.
###Code
# get carrots data from our public github repo
url = 'https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/Tutorial_1-carrots_demo/animals_and_carrots.csv'
df = pd.read_csv(url,sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking the mean of all the entries in a normal fashion without applying the DP library. This is the actual mean of all the records. Per-animal PrivacyNotice that each animal owns at most one row in the data. This means that weprovide per-animal privacy. Suppose that some animal appears multiple times inthe csv file. That animal would own more than one row in the data. In this case,using this DP library would not guarantee per-animal privacy! The animals wouldfirst have to pre-process their data in a way such that each animal doesn't ownmore than one row.
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedMean.quick_result()` takes a List of integer/ float as an input and returns the mean of the list values.
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = BoundedMean(privacy_budget, 1, 100)
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of the private mean varies compared to the mean calculated using non-private statistical methods.This difference in value corresponds to the privacy that is actually preserved for individual records in it.
###Code
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
Mean: 53.01648351648352
Private Mean: 71.27272727272728
###Markdown
Counts number of animals who ate more than 'limit' carrots without applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`Count.quick_result()` takes a List of integer/ float as an input and returns the count of elements in the list.
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = Count(privacy_budget, dtype="int")
return x.quick_result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Count Above varies compared to the Count calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
Above 70: 65
private count above: 64
###Markdown
Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def max() -> int:
return df.max()[1]
###Output
_____no_output_____
###Markdown
Private Max uses Differential Privacy Library by Google to calculate the maximum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`Max.quick_result()` takes a List of integer/ float as an input and returns the list max value.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = Max(privacy_budget, 0, 100, dtype="int")
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Max varies compared to the Max calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Max:\t" + str(max()))
print("private max:\t" + str(private_max(1)))
###Output
Max: 100
private max: 78.0
###Markdown
Taking Sum of all the entries in a normal fashion without Applying the DP library. This is the actual sum of carrots eaten by all the animals.
###Code
# Function to calculate sum of carrots eaten without applying differential privacy.
def sum_carrots() -> int:
return df.sum()[1]
###Output
_____no_output_____
###Markdown
Private Sum uses Differential Privacy Library by Google to calculate the sum of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedSum.quick_result()` takes a List of integer/ float as an input and returns the list sum.
###Code
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = BoundedSum(privacy_budget,1,100, dtype="float")
return x.quick_result(list(df["carrots_eaten"]))
print("Sum:\t" + str(sum_carrots()))
print("Private Sum:\t" + str(private_sum(1)))
###Output
Sum: 9649
Private Sum: 9472.0
###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 0.4 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Currently implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values Example using Differential Privacy libraryIn this directory, we give a simple example of how to use the Python DifferentialPrivacy library. Zoo AnimalsThere are around 182 animals at Farmer Alex's zoo. Every day, Alex feedsthe animals as many carrots as they desire. The animals record how many carrotsthey have eaten per day. For this particular day, the number of carrots eatencan be seen in `animals_and_carrots.csv`.At the end of each day, Alex often asks aggregate questions about how manycarrots everyone ate. For example, he wants to know how many carrots are eateneach day, so he knows how many to order the next day. The animals are fearfulthat Alex will use the data against their best interest. For example, Alex couldget rid of the animals who eat the most carrots!To protect themselves, the animals decide to use the Python Differential Privacylibrary to aggregate their data before reporting it to Alex. This way, theanimals can control the risk that Alex will identify individuals' data whilemaintaining an adequate level of accuracy so that Alex can continue to run thezoo effectively.
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
from pydp.algorithms.laplacian import BoundedSum, BoundedMean, Count, Max
import pandas as pd
import statistics # for calculating mean without applying differential privacy
###Output
_____no_output_____
###Markdown
DataEach row in `animals_and_carrots.csv` is composed of the name of an animal, andthe number of carrots it has eaten, comma-separated.
###Code
# get carrots data from our public github repo
url = "https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/Tutorial_1-carrots_demo/animals_and_carrots.csv"
df = pd.read_csv(url, sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking the mean of all the entries in a normal fashion without applying the DP library. This is the actual mean of all the records. Per-animal PrivacyNotice that each animal owns at most one row in the data. This means that weprovide per-animal privacy. Suppose that some animal appears multiple times inthe csv file. That animal would own more than one row in the data. In this case,using this DP library would not guarantee per-animal privacy! The animals wouldfirst have to pre-process their data in a way such that each animal doesn't ownmore than one row.
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedMean.quick_result()` takes a List of integer/ float as an input and returns the mean of the list values.
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = BoundedMean(privacy_budget, 0, 1, 100)
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of the private mean varies compared to the mean calculated using non-private statistical methods.This difference in value corresponds to the privacy that is actually preserved for individual records in it.
###Code
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
Mean: 53.01648351648352
Private Mean: 71.27272727272728
###Markdown
Counts number of animals who ate more than 'limit' carrots without applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`Count.quick_result()` takes a List of integer/ float as an input and returns the count of elements in the list.
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = Count(privacy_budget, dtype="int")
return x.quick_result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Count Above varies compared to the Count calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
Above 70: 65
private count above: 64
###Markdown
Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def max() -> int:
return df.max()[1]
###Output
_____no_output_____
###Markdown
Private Max uses Differential Privacy Library by Google to calculate the maximum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`Max.quick_result()` takes a List of integer/ float as an input and returns the list max value.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = Max(privacy_budget, 0, 100, dtype="int")
return x.quick_result(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
As you can see, the value of Private Max varies compared to the Max calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it.
###Code
print("Max:\t" + str(max()))
print("private max:\t" + str(private_max(1)))
###Output
Max: 100
private max: 78.0
###Markdown
Taking Sum of all the entries in a normal fashion without Applying the DP library. This is the actual sum of carrots eaten by all the animals.
###Code
# Function to calculate sum of carrots eaten without applying differential privacy.
def sum_carrots() -> int:
return df.sum()[1]
###Output
_____no_output_____
###Markdown
Private Sum uses Differential Privacy Library by Google to calculate the sum of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedSum.quick_result()` takes a List of integer/ float as an input and returns the list sum.
###Code
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = BoundedSum(privacy_budget, 0, 1, 100, dtype="float")
return x.quick_result(list(df["carrots_eaten"]))
print("Sum:\t" + str(sum_carrots()))
print("Private Sum:\t" + str(private_sum(1)))
###Output
Sum: 9649
Private Sum: 9472.0
###Markdown
1.1 Introduction to PyDPThe PyDP package provides a Python API into [Google's Differential Privacy library](https://github.com/google/differential-privacy). This example uses the alpha 1.0.2 version of the package that has the following limitations:* Supports Linux (Windows coming soon)* Currently implements an algorithm to support computing private mean using a Laplace noise generation technique.* Supports only integer and floating point values Example using Differential Privacy libraryIn this directory, we give a simple example of how to use the Python DifferentialPrivacy library. Zoo AnimalsThere are around 182 animals at Farmer Alex's zoo. Every day, Alex feedsthe animals as many carrots as they desire. The animals record how many carrotsthey have eaten per day. For this particular day, the number of carrots eatencan be seen in `animals_and_carrots.csv`.At the end of each day, Alex often asks aggregate questions about how manycarrots everyone ate. For example, he wants to know how many carrots are eateneach day, so he knows how many to order the next day. The animals are fearfulthat Alex will use the data against their best interest. For example, Alex couldget rid of the animals who eat the most carrots!To protect themselves, the animals decide to use the Python Differential Privacylibrary to aggregate their data before reporting it to Alex. This way, theanimals can control the risk that Alex will identify individuals' data whilemaintaining an adequate level of accuracy so that Alex can continue to run thezoo effectively.
###Code
# Install the PyDP package
! pip install python-dp
import pydp as dp # by convention our package is to be imported as dp (for Differential Privacy!)
from pydp.algorithms.laplacian import BoundedSum, BoundedMean, BoundedStandardDeviation,BoundedVariance, Count, Max, Min, Median, Percentile
import pandas as pd
import statistics # for calculating mean without applying differential privacy
import numpy as np # for calculating percentile
print(dp.__version__)
###Output
1.0.2
###Markdown
DataEach row in `animals_and_carrots.csv` is composed of the name of an animal, andthe number of carrots it has eaten, comma-separated.
###Code
# get carrots data from our public github repo
url = 'https://raw.githubusercontent.com/OpenMined/PyDP/dev/examples/Tutorial_1-carrots_demo/animals_and_carrots.csv'
df = pd.read_csv(url,sep=",", names=["animal", "carrots_eaten"])
df.head()
###Output
_____no_output_____
###Markdown
Taking the mean of all the entries in a normal fashion without applying the DP library. This is the actual mean of all the records. Per-animal PrivacyNotice that each animal owns at most one row in the data. This means that weprovide per-animal privacy. Suppose that some animal appears multiple times inthe csv file. That animal would own more than one row in the data. In this case,using this DP library would not guarantee per-animal privacy! The animals wouldfirst have to pre-process their data in a way such that each animal doesn't ownmore than one row. Mean
###Code
# calculates mean without applying differential privacy
def mean_carrots() -> float:
return statistics.mean(list(df["carrots_eaten"]))
###Output
_____no_output_____
###Markdown
Private Mean uses Differential Privacy Library by Google to calculate the Mean. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedMean.quick_result()` takes a List of integer/ float as an input and returns the mean of the list values.
###Code
# calculates mean applying differential privacy
def private_mean(privacy_budget: float) -> float:
x = BoundedMean(privacy_budget, 1, 100)
return x.quick_result(list(df["carrots_eaten"]))
print("Mean: ", mean_carrots())
print("Private Mean: ", private_mean(0.8))
###Output
Mean: 53.01648351648352
Private Mean: 52.41612370476593
###Markdown
As you can see, the value of the private mean varies compared to the mean calculated using non-private statistical methods.This difference in value corresponds to the privacy that is actually preserved for individual records in it. Count Counts number of animals who ate more than 'limit' carrots without applying the DP library. This is the actual number of such animals.
###Code
# Calculates number of animals who ate more than "limit" carrots without applying differential privacy.
def count_above(limit: int) -> int:
return df[df.carrots_eaten > limit].count()[0]
###Output
_____no_output_____
###Markdown
Private Count Above uses Differential Privacy Library by Google to calculate the number of rows with value above limit. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).The function also takes the limit as an argument.`Count.quick_result()` takes a List of integer/ float as an input and returns the count of elements in the list.
###Code
# Calculates number of animals who ate more than "limit" carrots applying differential privacy.
def private_count_above(privacy_budget: float, limit: int) -> int:
x = Count(privacy_budget, dtype="int")
return x.quick_result(list(df[df.carrots_eaten > limit]["carrots_eaten"]))
print("Above 70:\t" + str(count_above(70)))
print("private count above:\t" + str(private_count_above(1, 70)))
###Output
Above 70: 65
private count above: 63
###Markdown
As you can see, the value of Private Count Above varies compared to the Count calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it. Max and Min Taking Max of all the entries in a normal fashion without Applying the DP library. This is the actual maximum of carrots eaten of all the records.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def maxi() -> int: # Named the function maxi because max is the name of a in-built fn
return df.max()[1]
# Function to return the maximum of the number of carrots eaten by any one animal without appyling differential privacy.
def mini() -> int: # Named the function mini because min is the name of a in-built fn
return df.min()[1]
###Output
_____no_output_____
###Markdown
Private Max and Min uses Differential Privacy Library by Google to calculate the maximum and minimum out of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`Max.quick_result()` takes a List of integer/ float as an input and returns the list max value.`Min.quick_result()` takes a List of integer/ float as an input and returns the list min value.
###Code
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_max(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = Max(privacy_budget, 0, 100, dtype="int")
return x.quick_result(list(df["carrots_eaten"]))
# Function to return the maximum of the number of carrots eaten by any one animal appyling differential privacy.
def private_min(privacy_budget: float) -> int:
# 0 and 150 are the upper and lower limits for the search bound.
x = Min(privacy_budget, 0, 100, dtype="int")
return x.quick_result(list(df["carrots_eaten"]))
print("Max:\t" + str(maxi()))
print("private max:\t" + str(private_max(1)))
print("Min:\t" + str(mini()))
print("private min:\t" + str(private_min(1)))
###Output
Min: 0
private min: 17
###Markdown
As you can see, the value of Private Max varies compared to the Max calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it. Bounded FunctionsBounded functions use two additional paramters called Upper **(U)** and Lower **(L)** limit parameters usedto clamp (i.e., bound) each input. `BoundedSum()` function provides a differentially private sum, clamped between upper and lower values. Bounds can be manually set or privately inferred.Here, We set the lower and upper bounds to the minimum and maximum actual values in the carrots_eaten column.
###Code
# Defining the bounds
L= 0 # Minimum value in the table
U = 100 #Maximum value in the table
###Output
_____no_output_____
###Markdown
Sum Taking Sum of all the entries in a normal fashion without Applying the DP library. This is the actual sum of carrots eaten by all the animals.
###Code
# Function to calculate sum of carrots eaten without applying differential privacy.
def sum_carrots() -> int:
return df.sum()[1]
###Output
_____no_output_____
###Markdown
Private Sum uses Differential Privacy Library by Google to calculate the sum of all the values. To preserve privacy, Laplacian mechanism is used.The function takes the argument privacy_budget as input.It is a number between 0 and 1, denoting privacy thresholdIt measures the acceptable loss of privacy (with 0 meaning no loss is acceptable).`BoundedSum.quick_result()` takes a List of integer/ float as an input and returns the list sum.
###Code
# Function to calculate sum of carrots eaten applying differential privacy.
def private_sum(privacy_budget: float) -> int:
x = BoundedSum(privacy_budget,1,100, dtype="float")
return x.quick_result(list(df["carrots_eaten"]))
print("Sum:\t" + str(sum_carrots()))
print("Private Sum:\t" + str(private_sum(1)))
###Output
Sum: 9649
Private Sum: 9600.234303019475
###Markdown
As shown in the output, we see that the Sum calculated using bounded functions with differentially private algorithms as above varies compared to the values calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it. Standard Deviation and Variance We now calculate the Standard Deviation and Variance stastically and privately for the same.
###Code
# Function to calculate bounded standard deviation of carrots eaten without applying differential privacy.
def standarddeviation_carrots() -> float:
return statistics.stdev(list(df["carrots_eaten"]))
# Function to calculate bounded standard deviation of carrots eaten applying differential privacy.
def private_stddeviation(privacy_budget: float) -> int:
x = BoundedStandardDeviation(epsilon=privacy_budget,lower_bound=L, upper_bound=U, l0_sensitivity=1,
linf_sensitivity=100, dtype="float")
return x.quick_result(list(df["carrots_eaten"]))
print("Standard Deviation:\t" + str(standarddeviation_carrots()))
print("Private Standard Deviation:\t" + str(private_stddeviation(1)))
# Function to calculate bounded variance of carrots eaten without applying differential privacy.
def variance_carrots() -> float:
return statistics.pvariance(list(df["carrots_eaten"]))
# Function to calculate bounded variance of carrots eaten applying differential privacy.
def private_variance(privacy_budget: float) -> float:
x = BoundedVariance(epsilon=privacy_budget,lower_bound=L, upper_bound=U, l0_sensitivity=1,linf_sensitivity=100, dtype="float")
return x.quick_result(list(df["carrots_eaten"]))
print("Variance:\t" + str(variance_carrots()))
print("Private Variance:\t" + str(private_variance(1)))
###Output
Variance: 857.2030249969811
Private Variance: 2500.0
###Markdown
As shown in the output, we see that the value of Standard Deviation and Variance calculated using bounded functions with differetnially private algorithms as above varies compared to the values calculated using normal Statistical methods.This difference in values refers to that privacy is actually preserved for individual records in it. Percentile Here we try to find the value of the observation with the 95th percentile amongst the rest.So we set the value of the additional parameter `percentile` as 95
###Code
percentile=95
# Function to calculate percentile of carrots eaten without applying differential privacy.
def percentile_carrots(percentile : int) -> float:
if percentile in range(0,101):
return np.percentile(list(df["carrots_eaten"]),percentile)
else:
raise Exception("Sorry, no numbers below zero or above 100")
# Function to calculate percentile of carrots eaten applying differential privacy.
def private_percentile(privacy_budget: float, percentile : int) -> float:
x = Percentile(epsilon=privacy_budget,lower_bound=L, upper_bound=U,percentile=percentile/100, dtype="float")
#print(percentile/100)
return x.quick_result(list(df["carrots_eaten"]))
print("Percentile:\t" + str(percentile_carrots(percentile)))
print("Private Percentile:\t" + str(private_percentile(1,percentile)))
###Output
Percentile: 96.0
Private Percentile: 95.08959730260416
|
Classification/Linear Models/LogisticRegression_RobustScaler.ipynb | ###Markdown
Logistic Regression with RobustScaler This Code template is for the Classification tasks using Logistic Regression and feature rescaling technique RobustScaler in a pipeline. Required Packages
###Code
!pip install imblearn
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from imblearn.over_sampling import RandomOverSampler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder,RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X = df[features]
Y = df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Distribution Of Target Variable
###Code
plt.figure(figsize = (10,6))
se.countplot(Y)
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
###Output
_____no_output_____
###Markdown
Handling Target ImbalanceThe challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
###Code
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
###Output
_____no_output_____
###Markdown
Data RescalingIt scales features using statistics that are robust to outliers. This method removes the median and scales the data in the range between 1st quartile and 3rd quartile. i.e., in between 25th quantile and 75th quantile range. This range is also called an Interquartile range.[More on RobustScaler module and parameters](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html) ModelLogistic regression is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). This can be extended to model several classes of events. Model Tuning Parameters 1. penalty : {‘l1’, ‘l2’, ‘elasticnet’, ‘none’}, default=’l2’> Used to specify the norm used in the penalization. The ‘newton-cg’, ‘sag’ and ‘lbfgs’ solvers support only l2 penalties. ‘elasticnet’ is only supported by the ‘saga’ solver. If ‘none’ (not supported by the liblinear solver), no regularization is applied. 2. C : float, default=1.0> Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization. 3. tol : float, default=1e-4> Tolerance for stopping criteria. 4. solver : {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}, default=’lbfgs’> Algorithm to use in the optimization problem. For small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones. For multiclass problems, only ‘newton-cg’, ‘sag’, ‘saga’ and ‘lbfgs’ handle multinomial loss; ‘liblinear’ is limited to one-versus-rest schemes.* ‘newton-cg’, ‘lbfgs’, ‘sag’ and ‘saga’ handle L2 or no penalty.* ‘liblinear’ and ‘saga’ also handle L1 penalty.* ‘saga’ also supports ‘elasticnet’ penalty.* ‘liblinear’ does not support setting penalty='none'. 5. random_state : int, RandomState instance, default=None> Used when solver == ‘sag’, ‘saga’ or ‘liblinear’ to shuffle the data. 6. max_iter : int, default=100> Maximum number of iterations taken for the solvers to converge. 7. multi_class : {‘auto’, ‘ovr’, ‘multinomial’}, default=’auto’> If the option chosen is ‘ovr’, then a binary problem is fit for each label. For ‘multinomial’ the loss minimised is the multinomial loss fit across the entire probability distribution, even when the data is binary. ‘multinomial’ is unavailable when solver=’liblinear’. ‘auto’ selects ‘ovr’ if the data is binary, or if solver=’liblinear’, and otherwise selects ‘multinomial’. 8. verbose : int, default=0> For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. 9. n_jobs : int, default=None> Number of CPU cores used when parallelizing over classes if multi_class=’ovr’”. This parameter is ignored when the solver is set to ‘liblinear’ regardless of whether ‘multi_class’ is specified or not. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors
###Code
# Build Model here
model = make_pipeline(RobustScaler(),LogisticRegression())
model.fit(x_train, y_train)
###Output
_____no_output_____
###Markdown
Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
###Output
Accuracy score 97.50 %
###Markdown
Confusion MatrixA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
###Code
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
###Output
_____no_output_____
###Markdown
Classification ReportA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.* **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset.
###Code
print(classification_report(y_test,model.predict(x_test)))
###Output
precision recall f1-score support
0 0.97 0.98 0.98 102
1 0.98 0.97 0.97 98
accuracy 0.97 200
macro avg 0.98 0.97 0.97 200
weighted avg 0.98 0.97 0.97 200
###Markdown
Logistic Regression with RobustScaler This Code template is for the Classification tasks using Logistic Regression and feature rescaling technique RobustScaler in a pipeline. Required Packages
###Code
!pip install imblearn
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from imblearn.over_sampling import RandomOverSampler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder,RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X = df[features]
Y = df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Distribution Of Target Variable
###Code
plt.figure(figsize = (10,6))
se.countplot(Y)
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
###Output
_____no_output_____
###Markdown
Handling Target ImbalanceThe challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
###Code
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
###Output
_____no_output_____
###Markdown
Data RescalingIt scales features using statistics that are robust to outliers. This method removes the median and scales the data in the range between 1st quartile and 3rd quartile. i.e., in between 25th quantile and 75th quantile range. This range is also called an Interquartile range.[More on RobustScaler module and parameters](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html) ModelLogistic regression is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). This can be extended to model several classes of events. Model Tuning Parameters 1. penalty : {‘l1’, ‘l2’, ‘elasticnet’, ‘none’}, default=’l2’> Used to specify the norm used in the penalization. The ‘newton-cg’, ‘sag’ and ‘lbfgs’ solvers support only l2 penalties. ‘elasticnet’ is only supported by the ‘saga’ solver. If ‘none’ (not supported by the liblinear solver), no regularization is applied. 2. C : float, default=1.0> Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization. 3. tol : float, default=1e-4> Tolerance for stopping criteria. 4. solver : {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}, default=’lbfgs’> Algorithm to use in the optimization problem. For small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones. For multiclass problems, only ‘newton-cg’, ‘sag’, ‘saga’ and ‘lbfgs’ handle multinomial loss; ‘liblinear’ is limited to one-versus-rest schemes.* ‘newton-cg’, ‘lbfgs’, ‘sag’ and ‘saga’ handle L2 or no penalty.* ‘liblinear’ and ‘saga’ also handle L1 penalty.* ‘saga’ also supports ‘elasticnet’ penalty.* ‘liblinear’ does not support setting penalty='none'. 5. random_state : int, RandomState instance, default=None> Used when solver == ‘sag’, ‘saga’ or ‘liblinear’ to shuffle the data. 6. max_iter : int, default=100> Maximum number of iterations taken for the solvers to converge. 7. multi_class : {‘auto’, ‘ovr’, ‘multinomial’}, default=’auto’> If the option chosen is ‘ovr’, then a binary problem is fit for each label. For ‘multinomial’ the loss minimised is the multinomial loss fit across the entire probability distribution, even when the data is binary. ‘multinomial’ is unavailable when solver=’liblinear’. ‘auto’ selects ‘ovr’ if the data is binary, or if solver=’liblinear’, and otherwise selects ‘multinomial’. 8. verbose : int, default=0> For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. 9. n_jobs : int, default=None> Number of CPU cores used when parallelizing over classes if multi_class=’ovr’”. This parameter is ignored when the solver is set to ‘liblinear’ regardless of whether ‘multi_class’ is specified or not. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors
###Code
# Build Model here
model = make_pipeline(RobustScaler(),LogisticRegression())
model.fit(x_train, y_train)
###Output
_____no_output_____
###Markdown
Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
###Output
Accuracy score 97.50 %
###Markdown
Confusion MatrixA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
###Code
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
###Output
_____no_output_____
###Markdown
Classification ReportA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.* **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset.
###Code
print(classification_report(y_test,model.predict(x_test)))
###Output
precision recall f1-score support
0 0.97 0.98 0.98 102
1 0.98 0.97 0.97 98
accuracy 0.97 200
macro avg 0.98 0.97 0.97 200
weighted avg 0.98 0.97 0.97 200
###Markdown
Logistic Regression with RobustScaler Required Packages
###Code
!pip install imblearn
import warnings
import numpy as np
import pandas as pd
import seaborn as se
import matplotlib.pyplot as plt
from imblearn.over_sampling import RandomOverSampler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder,RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
InitializationFilepath of CSV file
###Code
#filepath
file_path= ""
###Output
_____no_output_____
###Markdown
List of features which are required for model training .
###Code
#x_values
features=[]
###Output
_____no_output_____
###Markdown
Target feature for prediction.
###Code
#y_value
target=''
###Output
_____no_output_____
###Markdown
Data FetchingPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
###Code
df=pd.read_csv(file_path)
df.head()
###Output
_____no_output_____
###Markdown
Feature SelectionsIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.We will assign all the required input features to X and target/outcome to Y.
###Code
X = df[features]
Y = df[target]
###Output
_____no_output_____
###Markdown
Data PreprocessingSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
###Code
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
###Output
_____no_output_____
###Markdown
Correlation MapIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
###Code
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
###Output
_____no_output_____
###Markdown
Distribution Of Target Variable
###Code
plt.figure(figsize = (10,6))
se.countplot(Y)
###Output
_____no_output_____
###Markdown
Data SplittingThe train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
###Code
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
###Output
_____no_output_____
###Markdown
Handling Target ImbalanceThe challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
###Code
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
###Output
_____no_output_____
###Markdown
Data RescalingIt scales features using statistics that are robust to outliers. This method removes the median and scales the data in the range between 1st quartile and 3rd quartile. i.e., in between 25th quantile and 75th quantile range. This range is also called an Interquartile range.[More on RobustScaler module and parameters](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html) ModelLogistic regression is a statistical model that in its basic form uses a logistic function to model a binary dependent variable, although many more complex extensions exist. In regression analysis, logistic regression (or logit regression) is estimating the parameters of a logistic model (a form of binary regression). This can be extended to model several classes of events. Model Tuning Parameters 1. penalty : {‘l1’, ‘l2’, ‘elasticnet’, ‘none’}, default=’l2’> Used to specify the norm used in the penalization. The ‘newton-cg’, ‘sag’ and ‘lbfgs’ solvers support only l2 penalties. ‘elasticnet’ is only supported by the ‘saga’ solver. If ‘none’ (not supported by the liblinear solver), no regularization is applied. 2. C : float, default=1.0> Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values specify stronger regularization. 3. tol : float, default=1e-4> Tolerance for stopping criteria. 4. solver : {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}, default=’lbfgs’> Algorithm to use in the optimization problem. For small datasets, ‘liblinear’ is a good choice, whereas ‘sag’ and ‘saga’ are faster for large ones. For multiclass problems, only ‘newton-cg’, ‘sag’, ‘saga’ and ‘lbfgs’ handle multinomial loss; ‘liblinear’ is limited to one-versus-rest schemes.* ‘newton-cg’, ‘lbfgs’, ‘sag’ and ‘saga’ handle L2 or no penalty.* ‘liblinear’ and ‘saga’ also handle L1 penalty.* ‘saga’ also supports ‘elasticnet’ penalty.* ‘liblinear’ does not support setting penalty='none'. 5. random_state : int, RandomState instance, default=None> Used when solver == ‘sag’, ‘saga’ or ‘liblinear’ to shuffle the data. 6. max_iter : int, default=100> Maximum number of iterations taken for the solvers to converge. 7. multi_class : {‘auto’, ‘ovr’, ‘multinomial’}, default=’auto’> If the option chosen is ‘ovr’, then a binary problem is fit for each label. For ‘multinomial’ the loss minimised is the multinomial loss fit across the entire probability distribution, even when the data is binary. ‘multinomial’ is unavailable when solver=’liblinear’. ‘auto’ selects ‘ovr’ if the data is binary, or if solver=’liblinear’, and otherwise selects ‘multinomial’. 8. verbose : int, default=0> For the liblinear and lbfgs solvers set verbose to any positive number for verbosity. 9. n_jobs : int, default=None> Number of CPU cores used when parallelizing over classes if multi_class=’ovr’”. This parameter is ignored when the solver is set to ‘liblinear’ regardless of whether ‘multi_class’ is specified or not. None means 1 unless in a joblib.parallel_backend context. -1 means using all processors
###Code
# Build Model here
model = make_pipeline(RobustScaler(),LogisticRegression())
model.fit(x_train, y_train)
###Output
_____no_output_____
###Markdown
Model Accuracyscore() method return the mean accuracy on the given test data and labels.In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
###Code
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
###Output
Accuracy score 97.50 %
###Markdown
Confusion MatrixA confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
###Code
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
###Output
_____no_output_____
###Markdown
Classification ReportA Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.* **where**: - Precision:- Accuracy of positive predictions. - Recall:- Fraction of positives that were correctly identified. - f1-score:- percent of positive predictions were correct - support:- Support is the number of actual occurrences of the class in the specified dataset.
###Code
print(classification_report(y_test,model.predict(x_test)))
###Output
precision recall f1-score support
0 0.97 0.98 0.98 102
1 0.98 0.97 0.97 98
accuracy 0.97 200
macro avg 0.98 0.97 0.97 200
weighted avg 0.98 0.97 0.97 200
|
_posts/ithome/2020-12th-ironman/16.決策樹(迴歸器)/.ipynb_checkpoints/決策樹(Classfication-iris)-checkpoint.ipynb | ###Markdown
1) 載入資料集
###Code
url = 'https://github.com/1010code/iris-dnn-tensorflow/raw/master/data/Iris.csv'
s=requests.get(url).content
df_data=pd.read_csv(io.StringIO(s.decode('utf-8')))
df_data = df_data.drop(labels=['Id'],axis=1) # 移除Id
df_data
###Output
_____no_output_____
###Markdown
2) 手動編碼處理名目資料 (Nominal variables) - 資料前處理依據特徵資料的特性,可以選擇手動編碼或自動編碼。 使用編碼時機?進行深度學習時,神經網路只能處理數值資料。因此我們需要將所有非數字型態的特徵進行轉換。ex:| Iris-setosa | Iris-versicolor | Iris-virginica ||:---:|:---:|:---:|| 1 | 2 | 3 |
###Code
label_map = {'Iris-setosa': 0, 'Iris-versicolor': 1, 'Iris-virginica': 2}
#將編碼後的label map存至df_data['Species']中。
df_data['Class'] = df_data['Species'].map(label_map)
df_data
###Output
_____no_output_____
###Markdown
3) 檢查缺失值使用 numpy 所提供的函式來檢查是否有 NA 缺失值,假設有缺失值使用dropna()來移除。使用的時機在於當只有少量的缺失值適用,若遇到有大量缺失值的情況,或是本身的資料量就很少的情況下建議可以透過機器學習的方法補值來預測缺失值。```python 移除缺失值train=train.dropna()```
###Code
X = df_data.drop(labels=['Species','Class'],axis=1).values # 移除Species (因為字母不參與訓練)
# checked missing data
print("checked missing data(NAN mount):",len(np.where(np.isnan(X))[0]))
###Output
checked missing data(NAN mount): 0
###Markdown
4) 切割訓練集與測試集
###Code
from sklearn.model_selection import train_test_split
X=df_data.drop(labels=['Class','Species'],axis=1).values
y=df_data['Class'].values
X_train , X_test , y_train , y_test = train_test_split(X,y , test_size=.3 , random_state=42)
print('Training data shape:',X_train.shape)
print('Testing data shape:',X_test.shape)
###Output
Training data shape: (105, 4)
Testing data shape: (45, 4)
###Markdown
繪製決策邊界 Function
###Code
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, test_idx = None, resolution=0.02):
# setup marker generator and color map
markers = ('s','x','o','^','v')
colors = ('red','blue','lightgreen','gray','cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:,0].min() - 1, X[:,0].max() + 1
x2_min, x2_max = X[:,1].min() - 1, X[:,1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),
np.arange(x2_min,x2_max,resolution))
Z = classifier.predict(np.array([xx1.ravel(),xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(),xx1.max())
plt.ylim(xx2.min(),xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y==cl,0], y=X[y==cl,1],
alpha=0.8, c=[cmap(idx)], marker=markers[idx],label=cl)
if test_idx:
X_test, y_test = X[test_idx,:], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:,1], c='',
alpha=1.0, linewidth=1, marker='o',
s=55, label='test set')
###Output
_____no_output_____
###Markdown
PCA降維將原先 iris 4個特徵降成2維,方便做視覺化。
###Code
from sklearn.decomposition import PCA
pca = PCA(n_components=2, iterated_power=1)
train_reduced = pca.fit_transform(X_train)
test_reduced = pca.transform(X_test)
###Output
_____no_output_____
###Markdown
決策樹 模型決策樹會根據資料產生很多樹狀的規則,根據訓練出來的規則來對新樣本進行預測。一個決策樹會根據訓練資料自動產生一棵樹。Parameters:- criterion: 亂度的評估標準,gini/entropy。預設為gini。。- max_depth: 樹的最大深度。- random_state: 亂數種子,確保每次訓練結果都一樣。Attributes:- feature_importances_: 查詢模型特徵的重要程度。Methods:- fit: 放入X、y進行模型擬合。- predict: 預測並回傳預測類別。- score: 預測成功的比例。- predict_proba: 預測每個類別的機率值。- get_depth: 取得樹的深度。
###Code
from sklearn.tree import DecisionTreeClassifier
decisionTreeModel = DecisionTreeClassifier(criterion = 'entropy', max_depth=6, random_state=42)
decisionTreeModel.fit(train_reduced, y_train)
plt.tight_layout()
plot_decision_regions(train_reduced, y_train, decisionTreeModel)
print('train set accurancy: ',decisionTreeModel.score(train_reduced, y_train))
plt.tight_layout()
plot_decision_regions(test_reduced, y_test, decisionTreeModel)
print('test set accurancy: ',decisionTreeModel.score(test_reduced, y_test))
###Output
test set accurancy: 0.9777777777777777
###Markdown
視覺化決策樹Graphviz 是視覺化決策樹的套件,可參考Graphviz[官網](https://www2.graphviz.org/)的介紹。以下範例一樣用 iris 資料集,並採用四個特徵下去做訓練。`pip install graphviz`
###Code
import graphviz
from sklearn.tree import export_graphviz
#create model
decisionTreeModel = DecisionTreeClassifier(criterion = 'gini', max_depth=3 ,random_state=42)
decisionTreeModel.fit(X_train, y_train)
print('train set accurancy: ',decisionTreeModel.score(X_train, y_train))
print('test set accurancy: ',decisionTreeModel.score(X_test, y_test))
print('特徵重要程度: ',decisionTreeModel.feature_importances_)
dot_data = export_graphviz(decisionTreeModel, out_file=None,
feature_names=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm'],
class_names=['setosa', 'versicolor', 'virginica'],
filled=True, rounded=True,
special_characters=True)
graph = graphviz.Source(dot_data)
graph
###Output
_____no_output_____ |
docs/notebooks/robustdg_getting_started.ipynb | ###Markdown
Getting started with RobustDG: Generalization and Privacy Attacks on Rotated MNIST dataset Domain Generalization (DG) is the task of learning a predictive model that can generalize to different data distributions. Intuitively, models trained by just aggregating the data from different domain might overfit to the domains observed during training. Many DG methods have been proposed to improve the generalization of models for OOD data.Here we present a simple application of the RobustDG library to build a model on a modified MNIST dataset and then evaluate its out-of-distribution accuracy and robustness to privacy attacks. Dataset: Rotated MNIST Rotated MNIST consists of various data domains, each corresponding to a specific rotation. It provides a very easy way to genereate out of distribution (OOD) data samples. For example, the model would be shown data containing rotations between 15 to 75 degrees during training; while at the test time it has to classify digits rotated by 90 degrees. Hence, different rotations/domains lead to a difference between the train and the test distributions Training ML models that can generalize to new domains Below we provides commands to train different methods, we use the pre trained models for the this notebook. You may run these commands to first train the models Baseline: Empirical risk minimizationWe first train a model using ERM that simply pools data from different domains and builds a model. python train.py --dataset rot_mnist --method_name erm_match --match_case 0.01 --penalty_ws 0.0 MatchDG: Domain generalization via causal matchingThe MatchDG model regularize the ERM training objective by matching data samples across domains that were generated from the same base object. More details are in the [Arxiv paper](https://arxiv.org/abs/2006.07500).Train the MatchDG model on Rotated MNIST by executing the following commandMatchDG operates in two phases; in the first phase it learns a matching function and in the second phase it learns a classifier regularized as per the matching function learnt in the first phase Phase 1: Learning Match Function python train.py --dataset rot_mnist --method_name matchdg_ctr --match_case 0.01 --match_flag 1 --epochs 30 --batch_size 64 --pos_metric cos Phase 2: Learning Classifier regularised on the Match Function python train.py --dataset rot_mnist --method_name matchdg_erm --penalty_ws 0.1 --match_case -1 --ctr_match_case 0.01 --ctr_match_flag 1 --ctr_match_interrupt 5 Evaluating the trained modelAfter training the model; we can evaluate the model on various test metrics like test accuracy on the unseen domain; match function metrics, etc. Out-of-distribution accuracyWe evaluate both the ERM and MatchDG method on OOD accuracy ERM OOD accuracy
###Code
%%bash
cd ../../
python test.py --test_metric acc --dataset rot_mnist --method_name erm_match --match_case 0.01 --penalty_ws 0.0
###Output
_____no_output_____
###Markdown
MatchDG OOD accuracy
###Code
%%bash
cd ../../
python test.py --test_metric acc --dataset rot_mnist --method_name matchdg_erm --penalty_ws 0.1 --match_case -1 --ctr_match_case 0.01 --ctr_match_flag 1 --ctr_match_interrupt 5
###Output
_____no_output_____
###Markdown
The results indicate that MatchDG (97.4) outperforms ERM (95.6) on OOD accuracy by approximately 2 percent T-SNE Plots In addition to OOD accuracy, we provide metrics to evaluate the representations learnt by the methods above. We provide T-SNE plots and match function metrics ( check the match_eval.py module under the evaluations directory )Here, we evalute the representations learnt with contrastive learning (Phase 1) using T-SNE plots
###Code
%%bash
cd ../..
python test.py --test_metric t_sne --dataset rot_mnist --method_name matchdg_ctr --match_case 0.01 --match_flag 1 --epochs 30 --batch_size 64 --pos_metric cos
###Output
_____no_output_____
###Markdown
The above command stores the TSNE embeddings in json format. Now we generate the TSNE plots using the saved files
###Code
import json
import numpy as np
import matplotlib.pyplot as plt
save_path= "../../results/rot_mnist/matchdg_ctr/logit_match/train_['15', '30', '45', '60', '75']_test_['0', '90']/Model_0.01_5_1_0_resnet18_label.json"
with open(save_path) as f:
data = json.load(f)
for key in data.keys():
arr= np.array(data[key])
plt.plot( arr[:, 0], arr[:, 1], '.', label=key )
plt.title('TSNE plot of representations: Legend denotes class labels')
plt.legend()
plt.savefig('images/t_sne.png', dpi=100)
###Output
_____no_output_____
###Markdown
Robustness to membership inference privacy attack We also test the models on the membership inference attacks (MIA). MIA relies on the generalization gap of the ML models, the models that overfit leak out information about the dataset the model was trained on. ERM MIA accuracy
###Code
%%bash
cd ../..
python test.py --test_metric mia --dataset rot_mnist --method_name erm_match --match_case 0.01 --penalty_ws 0.0
###Output
_____no_output_____
###Markdown
MatchDG MIA accuracy
###Code
%%bash
cd ../..
python test.py --test_metric mia --dataset rot_mnist --method_name matchdg_erm --penalty_ws 0.1 --match_case -1 --ctr_match_case 0.01 --ctr_match_flag 1 --ctr_match_interrupt 5
###Output
_____no_output_____
###Markdown
Getting started with RobustDG: Generalization and Privacy Attacks on Rotated MNIST dataset Domain Generalization (DG) is the task of learning a predictive model that can generalize to different data distributions. Intuitively, models trained by just aggregating the data from different domain might overfit to the domains observed during training. Many DG methods have been proposed to improve the generalization of models for OOD data.Here we present a simple application of the RobustDG library to build a model on a modified MNIST dataset and then evaluate its out-of-distribution accuracy and robustness to privacy attacks. Dataset: Rotated MNIST Rotated MNIST consists of various data domains, each corresponding to a specific rotation. It provides a very easy way to genereate out of distribution (OOD) data samples. For example, the model would be shown data containing rotations between 15 to 75 degrees during training; while at the test time it has to classify digits rotated by 90 degrees. Hence, different rotations/domains lead to a difference between the train and the test distributions Training ML models that can generalize to new domains Below we provides commands to train different methods, we use the pre trained models for the this notebook. You may run these commands to first train the models Prepare Data for Rot MNIST & Fashion MNISTFrom the directory `data`, run the following command python data_gen.py resnet18 Baseline: Empirical risk minimizationWe first train a model using ERM that simply pools data from different domains and builds a model. python train.py --dataset rot_mnist --method_name erm_match --match_case 0.01 --penalty_ws 0.0 --epochs 25 MatchDG: Domain generalization via causal matchingThe MatchDG model regularize the ERM training objective by matching data samples across domains that were generated from the same base object. More details are in the [Arxiv paper](https://arxiv.org/abs/2006.07500).Train the MatchDG model on Rotated MNIST by executing the following commandMatchDG operates in two phases; in the first phase it learns a matching function and in the second phase it learns a classifier regularized as per the matching function learnt in the first phase Phase 1: Learning Match Function python train.py --dataset rot_mnist --method_name matchdg_ctr --match_case 0.01 --match_flag 1 --epochs 100 --batch_size 256 --pos_metric cos Phase 2: Learning Classifier regularised on the Match Function python train.py --dataset rot_mnist --method_name matchdg_erm --match_case -1 --penalty_ws 0.1 --epochs 25 --ctr_match_case 0.01 --ctr_match_flag 1 --ctr_match_interrupt 5 --ctr_model_name resnet18 Evaluating the trained modelAfter training the model; we can evaluate the model on various test metrics like test accuracy on the unseen domain; match function metrics, etc. Out-of-distribution accuracyWe evaluate both the ERM and MatchDG method on OOD accuracy ERM OOD accuracy
###Code
%%bash
cd ../..
python test.py --test_metric acc --dataset rot_mnist --method_name erm_match --match_case 0.01 --penalty_ws 0.0
###Output
_____no_output_____
###Markdown
MatchDG OOD accuracy
###Code
%%bash
cd ../..
python test.py --test_metric acc --dataset rot_mnist --method_name matchdg_erm --penalty_ws 0.1 --match_case -1 --ctr_match_case 0.01 --ctr_match_flag 1 --ctr_match_interrupt 5 --ctr_model_name resnet18
###Output
_____no_output_____
###Markdown
The results indicate that MatchDG (96.1) outperforms ERM (93.9) on OOD accuracy by approximately 2 percent T-SNE Plots In addition to OOD accuracy, we provide metrics to evaluate the representations learnt by the methods above. We provide T-SNE plots and match function metrics ( check the match_eval.py module under the evaluations directory )Here, we evalute the representations learnt with contrastive learning (Phase 1) using T-SNE plots
###Code
%%bash
cd ../..
python test.py --test_metric t_sne --dataset rot_mnist --method_name matchdg_ctr --match_case 0.01 --match_flag 1 --pos_metric cos
###Output
_____no_output_____
###Markdown
The above command stores the TSNE embeddings in json format. Now we generate the TSNE plots using the saved files
###Code
import json
import numpy as np
import matplotlib.pyplot as plt
save_path= "../../results/rot_mnist/matchdg_ctr/logit_match/train_['15', '30', '45', '60', '75']/Model_0.01_5_1_0_resnet18_label.json"
with open(save_path) as f:
data = json.load(f)
for key in data.keys():
arr= np.array(data[key])
plt.plot( arr[:, 0], arr[:, 1], '.', label=key )
plt.title('TSNE plot of representations: Legend denotes class labels')
plt.legend()
plt.savefig('images/t_sne.png', dpi=100)
###Output
_____no_output_____
###Markdown
Robustness to membership inference privacy attack We also test the models on the membership inference attacks (MIA). MIA relies on the generalization gap of the ML models, the models that overfit leak out information about the dataset the model was trained on. ERM MIA accuracy
###Code
%%bash
cd ../..
python test.py --test_metric mia --mia_logit 1 --mia_sample_size 2000 --batch_size 64 --dataset rot_mnist --method_name erm_match --match_case 0.01 --penalty_ws 0.0
###Output
_____no_output_____
###Markdown
MatchDG MIA accuracy
###Code
%%bash
cd ../..
python test.py --test_metric mia --mia_logit 1 --mia_sample_size 2000 --batch_size 64 --dataset rot_mnist --method_name matchdg_erm --penalty_ws 0.1 --match_case -1 --ctr_match_case 0.01 --ctr_match_flag 1 --ctr_match_interrupt 5
###Output
_____no_output_____ |
notebooks/xx_plantilla.ipynb | ###Markdown
Contenido bajo licencia **Licencia para el contenido** y código bajo licencia **Licencia para el código**. © **Autor(es)** 2019-2020. Este material es parte del curso **Nombre del curso** en el programa de **Nombre del programa** de la **Universidad**. Título Descripción global de la lección y del contenido del notebook.**Al completar este notebook usted debería estar en la capacidad de:*** Objetivo 1.* Objetivo 2.* Objetivo 3. Título sección Contenido de la sección.Se puede usar una [etiqueta HTML ](https://www.w3schools.com/Tags/tag_div.asp) y asignarla a la clase"alert alert-warning" para obtener una notaque resalte. De la siguiente manera. ```htmlContenido.``` Este es el resultado. Existen [varias opciones para esta etiqueta](https://www.w3schools.com/bootstrap/bootstrap_alerts.asp).A continuación se muestra su uso común en internet:```html ¡Éxito! Indica una acción exitosa o positiva. ¡Información! Indica un cambio o acción neutral. ¡Aviso! Indica un aviso que puede requerir atención. ¡Peligro! Indica una acción peligrosa o potencialmente negativa.```Este es el resultado: ¡Éxito! Indica una acción exitosa o positiva. ¡Información! Indica un cambio o acción neutral. ¡Aviso! Indica un aviso que puede requerir atención. ¡Peligro! Indica una acción peligrosa o potencialmente negativa.
###Code
# Código
1 + 1
###Output
_____no_output_____
###Markdown
Actividad para la clase Descripción actividad clase. Glosario de términos - **Término 1:** Definición 1.- **Término 2:** Definición 2. Referencias 1. Referencia bibliográfica 1.2. Referencia bibliográfica 2. Formato del notebook La siguiente celda cambia el formato del Notebook.
###Code
from IPython.core.display import HTML
def css_styling():
styles = open('./nb_style.css', 'r').read()
return HTML(styles)
css_styling()
###Output
_____no_output_____ |
03_slope_model_3_2.ipynb | ###Markdown
model 3-2で次数の期待値$l$の$k$-$l$プロットにおける傾き $$E(p(r)) = -r^{2} + 2r$$
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
r = np.linspace(0., 1., 100)
plt.plot(r, -r**2+2*r)
plt.xlabel(r'$r$')
plt.ylabel(r'$E(p(x,r))$')
plt.show()
###Output
_____no_output_____ |
DL_Final_Project.ipynb | ###Markdown
###Code
#@title RUN Pre-Processing?
run_preprocessing = False #@param {type:"boolean"}
###Output
_____no_output_____
###Markdown
Init Notebook
###Code
!git clone https://github.com/omier/music-genre-classifier.git
!pip3 install pytorch_lightning efficientnet_pytorch
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.nn.functional as F
import torch.optim as optim
import math
from pytorch_lightning import metrics
import plotly.express as px
import pandas as pd
import numpy as np
import pprint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
###Output
_____no_output_____
###Markdown
Pre-Processing
###Code
import librosa
from librosa import display
import matplotlib.pyplot as plt
import glob
import os
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
n_fft = 2048
hop_length = 512
n_mels = 288
song_length = 30
song_mini_batch_length = 3
def preprocess(filename, out):
for offset in range(0, song_length, song_mini_batch_length):
y, sr = librosa.load(filename, duration=song_mini_batch_length, sr=None, offset=offset)
song, _ = librosa.effects.trim(y)
S = librosa.feature.melspectrogram(song, sr=sr, n_fft=n_fft, hop_length=hop_length, n_mels=n_mels)
S_DB = librosa.power_to_db(S, ref=np.max)
fig = plt.Figure()
canvas = FigureCanvas(fig)
ax = fig.add_subplot(111)
ax.set_axis_off()
librosa.display.specshow(S_DB, ax=ax, y_axis='log', x_axis='time')
fig.savefig(f'{out}_{offset}.png', transparent=True)
data_path = 'music-genre-classifier/Data/'
output_directory = 'melspectograms/'
genres = glob.glob(f'{data_path}genres_original/*')
def ensure_dir(dir):
if not os.path.exists(dir):
os.mkdir(dir)
if run_preprocessing:
ensure_dir(f'{data_path}{output_directory}')
for g in genres:
waves = glob.glob(f'{g}/*')
genre = g.split('/')[-1]
for w in waves:
filename = '.'.join(w.split('/')[-1].split('.')[:-1])
ensure_dir(f'{data_path}{output_directory}{genre}')
preprocess(w, f'{data_path}{output_directory}{genre}/{filename}')
###Output
_____no_output_____
###Markdown
Load Data
###Code
img_data = 'music-genre-classifier/Data/melspectograms/'
dataset = torchvision.datasets.ImageFolder(
root=img_data,
transform=torchvision.transforms.ToTensor(),
)
len(dataset)
NUM_CLASSES = len(dataset.classes)
# 60% train, 20% validate, 20% test
trainset_size=math.ceil(len(dataset)*0.6)
valset_size=math.ceil(len(dataset)*0.2)
testset_size=len(dataset) - trainset_size - valset_size
trainset, valset, testset = torch.utils.data.random_split(dataset, [trainset_size, valset_size, testset_size])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,
shuffle=True)
valloader = torch.utils.data.DataLoader(valset, batch_size=16,
shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=16,
shuffle=False)
data_loaders = {'train': trainloader, 'validation': valloader, 'test': testloader}
###Output
_____no_output_____
###Markdown
Helpers
###Code
def train(model, n_epochs, criterion, trainloader):
optimizer = optim.Adam(model.parameters(), lr=0.0001)
history = []
for e in range(1, n_epochs + 1):
for counter, data in enumerate(trainloader):
inputs, labels = data
predicted_labels = model(inputs.to(device=device))
optimizer.zero_grad()
loss = criterion(predicted_labels, labels.to(device=device))
loss.backward()
optimizer.step()
current_metrics = evaluate(model, criterion)
print(f'Epoch {e}\\{n_epochs} Metrics')
pprint.pprint(current_metrics, indent=4)
history.append(current_metrics)
return history
def evaluate(model, criterion, sets=['train', 'validation']):
with torch.no_grad():
sets_metrics = dict()
for set_name, dataloader in data_loaders.items():
if set_name in sets:
recall = metrics.Recall(num_classes=NUM_CLASSES, average='macro').to(device=device)
precision = metrics.Precision(num_classes=NUM_CLASSES, average='macro').to(device=device)
accuracy = metrics.Accuracy().to(device=device)
loss = 0
for inputs, labels in dataloader:
predicted_labels = model(inputs.to(device=device))
labels = labels.to(device=device)
loss += criterion(predicted_labels, labels.to(device=device)).item()
recall.update(predicted_labels, labels)
precision.update(predicted_labels, labels)
accuracy.update(predicted_labels, labels)
sets_metrics[set_name] = { 'recall': recall.compute().item(),
'precision': precision.compute().item(),
'accuracy': accuracy.compute().item(),
'loss': loss / len(dataloader.dataset)}
return sets_metrics
def plot(history):
metrics_map = dict()
for e_sets in history:
for set_name, set_metrics in e_sets.items():
for metric_name, metric_value in set_metrics.items():
if metric_name not in metrics_map:
metrics_map[metric_name] = dict()
if set_name not in metrics_map[metric_name]:
metrics_map[metric_name][set_name] = []
metrics_map[metric_name][set_name].append(metric_value)
for metric_name, sets in metrics_map.items():
df = None
for set_name, set_metrics in sets.items():
size = len(set_metrics)
if df is None:
df = pd.DataFrame({"epoch": np.linspace(1, size, size),
metric_name: set_metrics,
"set": [set_name] * size})
else:
df = df.append(pd.DataFrame({"epoch": np.linspace(1, size, size),
metric_name: set_metrics,
"set": [set_name] * size}), ignore_index=True)
fig = px.line(df, x="epoch", y=metric_name, line_group="set", title=f"epoch {metric_name} per dataset", color="set", hover_name="set")
fig.show()
###Output
_____no_output_____
###Markdown
CNN 3 Conv 3 Linear
convolution layer 1 (convolution -> relu -> max pool 2X2)
convolution layer 2 (convolution -> relu -> max pool 2X2)
convolution layer 3 (convolution -> relu -> max pool 2X2)
3 fully connected linear layers with relu activation function
###Code
class CNNGTZAN(nn.Module):
def __init__(self):
super(CNNGTZAN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, 3)
self.conv2 = nn.Conv2d(16, 32, 3)
self.conv3 = nn.Conv2d(32, 64, 3)
# 288, 432 ->(3X3) 286, 430 ->(max pool 2X2) 143, 215
# 143, 215 ->(3X3) 141, 213 ->(max pool 2X2) 70, 106
# 70, 106 ->(3X3) 68, 104 ->(max pool 2X2) 34, 52
self.fc1 = nn.Linear(64 * 34 * 52, 128)
self.fc2 = nn.Linear(128, 64)
self.fc3 = nn.Linear(64, 10)
def forward(self, x):
# convolution layer 1 (convolution -> relu -> max pool 2X2)
x = F.max_pool2d(F.relu(self.conv1(x)), 2)
# convolution layer 2 (convolution -> relu -> max pool 2X2)
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# convolution layer 3 (convolution -> relu -> max pool 2X2)
x = F.max_pool2d(F.relu(self.conv3(x)), 2)
# flatten x to (batch_size, 64 * 34 * 52) matrix - per instance flatten
x = torch.flatten(x, start_dim=1)
# fully connected linear layers with relu activation function
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
# last fc linear layer
x = self.fc3(x)
return x
baseline_cnn = CNNGTZAN().to(device=device)
%%time
baseline_cnn_history = train(baseline_cnn, 15, nn.CrossEntropyLoss(), trainloader)
plot(baseline_cnn_history)
###Output
_____no_output_____
###Markdown
4L-2D CNN
###Code
class BigCNN(nn.Module):
def __init__(self):
super(BigCNN, self).__init__()
# 4 layers of convolution and max pooling
self._extractor = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=4),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(kernel_size=8),
)
# some linear layers for classification
self._classifier = nn.Sequential(nn.Dropout(0.2),
nn.Linear(in_features=3*2*256, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=NUM_CLASSES))
def forward(self, x):
# torch.Size([16, 3, 288, 432])
x = self._extractor(x)
# BATCH_SIZE, CHANNELS, FREQUENCY, TIME
# torch.Size([16, 256, 2, 3])
x = x.view(x.size(0), -1)
# BATCH_SIZE, 256 * 2 * 3
# torch.Size([16, 1536])
score = self._classifier(x)
# torch.Size([16, 10])
return score
big_cnn = BigCNN().to(device=device)
%%time
big_cnn_history = train(big_cnn, 15, nn.CrossEntropyLoss(), trainloader)
plot(big_cnn_history)
###Output
_____no_output_____
###Markdown
4L-2D CNN + GRU (fresh cnn with GRU)
###Code
class CNNGRU(nn.Module):
def __init__(self):
super(CNNGRU, self).__init__()
# 4 layers of convolution and max pooling
self._extractor = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=4),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(kernel_size=8),
)
# bidirectional GRU model with 3 hidden layers
self._rnnModule = nn.GRU(512, 512, bidirectional=True, num_layers=3)
# some linear layers for classification
self._classifier = nn.Sequential(nn.Dropout(0.2),
nn.Linear(in_features=3*2*512, out_features=512),
nn.ReLU(),
nn.Linear(in_features=512, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=NUM_CLASSES))
def forward(self, x):
# torch.Size([16, 3, 288, 432])
x = self._extractor(x)
# BATCH_SIZE, CHANNELS, FREQUENCY, TIME
# torch.Size([16, 256, 2, 3])
x = x.permute(0, 3, 1, 2)
# BATCH_SIZE, TIME, CHANNELS, FREQUENCY
# torch.Size([16, 3, 256, 2])
x = x.view(x.size(0), x.size(1), -1)
# BATCH_SIZE, TIME, CHANNELS*FREQUENCY
# torch.Size([16, 3, 512])
x, hn = self._rnnModule(x)
# BATCH_SIZE, TIME, 512 * 2
# torch.Size([16, 3, 1024])
x = x.view(x.size(0), -1)
# BATCH_SIZE, 512 * 2 * 3
# torch.Size([16, 3072])
score = self._classifier(x)
# torch.Size([16, 10])
return score
big_cnn_gru = CNNGRU().to(device=device)
%%time
big_cnn_gru_history = train(big_cnn_gru, 15, nn.CrossEntropyLoss(), trainloader)
plot(big_cnn_gru_history)
###Output
_____no_output_____
###Markdown
EfficientNet-b0 Transfer Learning
EfficientNet layers
Adaptive Average Pool 2d
Optional GRU layers defined by a parameter
Linear layer
Softmax
###Code
from efficientnet_pytorch import EfficientNet
model = EfficientNet.from_pretrained('efficientnet-b0', num_classes=NUM_CLASSES, advprop=True)
class MyEfficientNet(nn.Module):
def __init__(self, efficientNetModel, use_GRU=False):
super(MyEfficientNet, self).__init__()
self.efficientNetModel = efficientNetModel
# output size: torch.Size([batch_size, 1280, 9, 14])
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=1280, out_channels=256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU()
)
self.pool = nn.AdaptiveAvgPool2d(2)
self._rnnModule = nn.GRU(512, 512, bidirectional=True, num_layers=3)
self.use_GRU = use_GRU
if self.use_GRU:
lin_size = 512 * 2 * 2
else:
lin_size = 1280 * 2 * 2
self.lin = nn.Linear(lin_size, NUM_CLASSES)
def forward(self, x):
x = self.efficientNetModel.extract_features(x)
x = self.pool(x)
if self.use_GRU:
x = self.conv1(x)
# 16, 256, 2, 2
x = x.permute(0, 3, 1, 2)
# BATCH_SIZE, TIME, CHANNELS, FREQUENCY
# torch.Size([16, 2, 256, 2])
x = x.view(x.size(0), x.size(1), -1)
# BATCH_SIZE, TIME, CHANNELS*FREQUENCY
# torch.Size([16, 2, 512])
x, hn = self._rnnModule(x)
# BATCH_SIZE, TIME, 512 * 2
# torch.Size([16, 2, 1024])
x = x.view(x.size(0), -1)
# BATCH_SIZE, 512 * 2 * 2
# torch.Size([16, 2048])
x = self.lin(x)
x = nn.Softmax()(x)
return x
gtzan_EfficientNet = MyEfficientNet(model).to(device=device)
%%time
history_EfficientNet = train(gtzan_EfficientNet, 15, nn.CrossEntropyLoss(), trainloader)
plot(history_EfficientNet)
gtzan_EfficientNet_with_GRU = MyEfficientNet(model, use_GRU=True).to(device=device)
%%time
history_EfficientNet_with_GRU = train(gtzan_EfficientNet_with_GRU, 15, nn.CrossEntropyLoss(), trainloader)
plot(history_EfficientNet_with_GRU)
###Output
_____no_output_____
###Markdown
VGG11 Transfer Learning
###Code
vgg11 = torchvision.models.vgg11(pretrained=True)
vgg11
class MyVGG11(nn.Module):
def __init__(self, model):
super(MyVGG11, self).__init__()
self.features = model.features
self.avgpool = nn.AvgPool2d(2)
self.classifier = nn.Sequential(
nn.Linear(4*6*512, 8192),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(8192, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, NUM_CLASSES),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
myVGG11Model = MyVGG11(vgg11).to(device=device)
%%time
myvgg11history = train(myVGG11Model, 15, nn.CrossEntropyLoss(), trainloader)
plot(myvgg11history)
###Output
_____no_output_____
###Markdown
Models Compare (test set)
###Code
models = {
'baseline CNN': baseline_cnn,
'4 layers CNN': big_cnn,
'4 layers CNN with GRU': big_cnn_gru,
'EfficientNet-b0': gtzan_EfficientNet,
'EfficientNet-b0 with GRU': gtzan_EfficientNet_with_GRU,
'VGG11': myVGG11Model,
}
for model_name, model in models.items():
print(model_name)
print(evaluate(model, nn.CrossEntropyLoss(), sets=['test']))
###Output
baseline CNN
{'test': {'recall': 0.6674073338508606, 'precision': 0.6767995357513428, 'accuracy': 0.6666666865348816, 'loss': 0.06085472447318477}}
4 layers CNN
{'test': {'recall': 0.8373075723648071, 'precision': 0.851702868938446, 'accuracy': 0.8358358144760132, 'loss': 0.03195561381155724}}
4 layers CNN with GRU
{'test': {'recall': 0.8603062033653259, 'precision': 0.8676202893257141, 'accuracy': 0.8593593835830688, 'loss': 0.03222034716171337}}
EfficientNet-b0
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.